builtin-top.c 35.6 KB
Newer Older
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
 * builtin-top.c
 *
 * Builtin top command: Display a continuously updated profile of
 * any workload, CPU or specific PID.
 *
 * Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
 *
 * Improvements and fixes by:
 *
 *   Arjan van de Ven <arjan@linux.intel.com>
 *   Yanmin Zhang <yanmin.zhang@intel.com>
 *   Wu Fengguang <fengguang.wu@intel.com>
 *   Mike Galbraith <efault@gmx.de>
 *   Paul Mackerras <paulus@samba.org>
 *
 * Released under the GPL v2. (and only v2, not any later version)
18
 */
19
#include "builtin.h"
20

21
#include "perf.h"
22

23
#include "util/color.h"
24
#include "util/evsel.h"
25 26
#include "util/session.h"
#include "util/symbol.h"
27
#include "util/thread.h"
28
#include "util/util.h"
29
#include <linux/rbtree.h>
30 31
#include "util/parse-options.h"
#include "util/parse-events.h"
32
#include "util/cpumap.h"
33
#include "util/xyarray.h"
34

35 36
#include "util/debug.h"

37 38
#include <assert.h>
#include <fcntl.h>
39

40
#include <stdio.h>
41 42
#include <termios.h>
#include <unistd.h>
43

44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
#include <errno.h>
#include <time.h>
#include <sched.h>
#include <pthread.h>

#include <sys/syscall.h>
#include <sys/ioctl.h>
#include <sys/poll.h>
#include <sys/prctl.h>
#include <sys/wait.h>
#include <sys/uio.h>
#include <sys/mman.h>

#include <linux/unistd.h>
#include <linux/types.h>

60
#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
61

62
static bool			system_wide			=  false;
63

64
static int			default_interval		=      0;
65

66
static int			count_filter			=      5;
67
static int			print_entries;
68

69
static int			target_pid			=     -1;
70
static int			target_tid			=     -1;
71
static struct thread_map	*threads;
72
static bool			inherit				=  false;
73
static struct cpu_map		*cpus;
74
static int			realtime_prio			=      0;
75
static bool			group				=  false;
76
static unsigned int		page_size;
77 78
static unsigned int		mmap_pages			=     16;
static int			freq				=   1000; /* 1 KHz */
79

80
static int			delay_secs			=      2;
81 82
static bool			zero                            =  false;
static bool			dump_symtab                     =  false;
83

84 85
static bool			hide_kernel_symbols		=  false;
static bool			hide_user_symbols		=  false;
86
static struct winsize		winsize;
87

88 89 90 91 92 93 94 95 96 97 98
/*
 * Source
 */

struct source_line {
	u64			eip;
	unsigned long		count[MAX_COUNTERS];
	char			*line;
	struct source_line	*next;
};

99
static const char		*sym_filter			=   NULL;
100
struct sym_entry		*sym_filter_entry		=   NULL;
101
struct sym_entry		*sym_filter_entry_sched		=   NULL;
102 103
static int			sym_pcnt_filter			=      5;
static int			sym_counter			=      0;
104
static struct perf_evsel	*sym_evsel			=   NULL;
105
static int			display_weighted		=     -1;
106
static const char		*cpu_list;
107

108 109 110 111
/*
 * Symbols
 */

112 113 114 115 116 117 118
struct sym_entry_source {
	struct source_line	*source;
	struct source_line	*lines;
	struct source_line	**lines_tail;
	pthread_mutex_t		lock;
};

119
struct sym_entry {
120 121
	struct rb_node		rb_node;
	struct list_head	node;
122 123
	unsigned long		snap_count;
	double			weight;
124
	int			skip;
125
	u16			name_len;
126
	u8			origin;
127
	struct map		*map;
128
	struct sym_entry_source	*src;
129
	unsigned long		count[0];
130 131
};

132 133 134 135
/*
 * Source functions
 */

136 137
static inline struct symbol *sym_entry__symbol(struct sym_entry *self)
{
138
       return ((void *)self) + symbol_conf.priv_size;
139 140
}

141
void get_term_dimensions(struct winsize *ws)
142
{
143 144 145 146 147 148 149 150 151 152
	char *s = getenv("LINES");

	if (s != NULL) {
		ws->ws_row = atoi(s);
		s = getenv("COLUMNS");
		if (s != NULL) {
			ws->ws_col = atoi(s);
			if (ws->ws_row && ws->ws_col)
				return;
		}
153
	}
154 155 156 157
#ifdef TIOCGWINSZ
	if (ioctl(1, TIOCGWINSZ, ws) == 0 &&
	    ws->ws_row && ws->ws_col)
		return;
158
#endif
159 160
	ws->ws_row = 25;
	ws->ws_col = 80;
161 162
}

163
static void update_print_entries(struct winsize *ws)
164
{
165 166
	print_entries = ws->ws_row;

167 168 169 170 171 172
	if (print_entries > 9)
		print_entries -= 9;
}

static void sig_winch_handler(int sig __used)
{
173 174
	get_term_dimensions(&winsize);
	update_print_entries(&winsize);
175 176
}

177
static int parse_source(struct sym_entry *syme)
178 179
{
	struct symbol *sym;
180
	struct sym_entry_source *source;
181
	struct map *map;
182
	FILE *file;
183
	char command[PATH_MAX*2];
184 185
	const char *path;
	u64 len;
186 187

	if (!syme)
188 189 190 191 192 193 194 195 196 197
		return -1;

	sym = sym_entry__symbol(syme);
	map = syme->map;

	/*
	 * We can't annotate with just /proc/kallsyms
	 */
	if (map->dso->origin == DSO__ORIG_KERNEL)
		return -1;
198

199
	if (syme->src == NULL) {
200
		syme->src = zalloc(sizeof(*source));
201
		if (syme->src == NULL)
202
			return -1;
203 204 205 206 207 208 209
		pthread_mutex_init(&syme->src->lock, NULL);
	}

	source = syme->src;

	if (source->lines) {
		pthread_mutex_lock(&source->lock);
210 211
		goto out_assign;
	}
212
	path = map->dso->long_name;
213 214 215

	len = sym->end - sym->start;

216
	sprintf(command,
217 218 219
		"objdump --start-address=%#0*Lx --stop-address=%#0*Lx -dS %s",
		BITS_PER_LONG / 4, map__rip_2objdump(map, sym->start),
		BITS_PER_LONG / 4, map__rip_2objdump(map, sym->end), path);
220 221 222

	file = popen(command, "r");
	if (!file)
223
		return -1;
224

225 226
	pthread_mutex_lock(&source->lock);
	source->lines_tail = &source->lines;
227 228 229
	while (!feof(file)) {
		struct source_line *src;
		size_t dummy = 0;
230
		char *c, *sep;
231 232 233 234 235 236 237 238 239 240 241 242 243 244 245

		src = malloc(sizeof(struct source_line));
		assert(src != NULL);
		memset(src, 0, sizeof(struct source_line));

		if (getline(&src->line, &dummy, file) < 0)
			break;
		if (!src->line)
			break;

		c = strchr(src->line, '\n');
		if (c)
			*c = 0;

		src->next = NULL;
246 247
		*source->lines_tail = src;
		source->lines_tail = &src->next;
248

249 250 251 252 253
		src->eip = strtoull(src->line, &sep, 16);
		if (*sep == ':')
			src->eip = map__objdump_2ip(map, src->eip);
		else /* this line has no ip info (e.g. source line) */
			src->eip = 0;
254 255 256 257
	}
	pclose(file);
out_assign:
	sym_filter_entry = syme;
258
	pthread_mutex_unlock(&source->lock);
259
	return 0;
260 261 262 263 264 265 266
}

static void __zero_source_counters(struct sym_entry *syme)
{
	int i;
	struct source_line *line;

267
	line = syme->src->lines;
268 269 270 271 272 273 274 275 276 277 278 279 280 281
	while (line) {
		for (i = 0; i < nr_counters; i++)
			line->count[i] = 0;
		line = line->next;
	}
}

static void record_precise_ip(struct sym_entry *syme, int counter, u64 ip)
{
	struct source_line *line;

	if (syme != sym_filter_entry)
		return;

282
	if (pthread_mutex_trylock(&syme->src->lock))
283 284
		return;

285
	if (syme->src == NULL || syme->src->source == NULL)
286 287
		goto out_unlock;

288
	for (line = syme->src->lines; line; line = line->next) {
289 290 291
		/* skip lines without IP info */
		if (line->eip == 0)
			continue;
292 293 294 295 296 297 298 299
		if (line->eip == ip) {
			line->count[counter]++;
			break;
		}
		if (line->eip > ip)
			break;
	}
out_unlock:
300
	pthread_mutex_unlock(&syme->src->lock);
301 302
}

303 304
#define PATTERN_LEN		(BITS_PER_LONG / 4 + 2)

305 306
static void lookup_sym_source(struct sym_entry *syme)
{
307
	struct symbol *symbol = sym_entry__symbol(syme);
308
	struct source_line *line;
309
	char pattern[PATTERN_LEN + 1];
310

311 312
	sprintf(pattern, "%0*Lx <", BITS_PER_LONG / 4,
		map__rip_2objdump(syme->map, symbol->start));
313

314 315
	pthread_mutex_lock(&syme->src->lock);
	for (line = syme->src->lines; line; line = line->next) {
316
		if (memcmp(line->line, pattern, PATTERN_LEN) == 0) {
317
			syme->src->source = line;
318 319 320
			break;
		}
	}
321
	pthread_mutex_unlock(&syme->src->lock);
322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350
}

static void show_lines(struct source_line *queue, int count, int total)
{
	int i;
	struct source_line *line;

	line = queue;
	for (i = 0; i < count; i++) {
		float pcnt = 100.0*(float)line->count[sym_counter]/(float)total;

		printf("%8li %4.1f%%\t%s\n", line->count[sym_counter], pcnt, line->line);
		line = line->next;
	}
}

#define TRACE_COUNT     3

static void show_details(struct sym_entry *syme)
{
	struct symbol *symbol;
	struct source_line *line;
	struct source_line *line_queue = NULL;
	int displayed = 0;
	int line_queue_count = 0, total = 0, more = 0;

	if (!syme)
		return;

351
	if (!syme->src->source)
352 353
		lookup_sym_source(syme);

354
	if (!syme->src->source)
355 356
		return;

357
	symbol = sym_entry__symbol(syme);
358
	printf("Showing %s for %s\n", event_name(sym_evsel), symbol->name);
359 360
	printf("  Events  Pcnt (>=%d%%)\n", sym_pcnt_filter);

361 362
	pthread_mutex_lock(&syme->src->lock);
	line = syme->src->source;
363 364 365 366 367
	while (line) {
		total += line->count[sym_counter];
		line = line->next;
	}

368
	line = syme->src->source;
369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392
	while (line) {
		float pcnt = 0.0;

		if (!line_queue_count)
			line_queue = line;
		line_queue_count++;

		if (line->count[sym_counter])
			pcnt = 100.0 * line->count[sym_counter] / (float)total;
		if (pcnt >= (float)sym_pcnt_filter) {
			if (displayed <= print_entries)
				show_lines(line_queue, line_queue_count, total);
			else more++;
			displayed += line_queue_count;
			line_queue_count = 0;
			line_queue = NULL;
		} else if (line_queue_count > TRACE_COUNT) {
			line_queue = line_queue->next;
			line_queue_count--;
		}

		line->count[sym_counter] = zero ? 0 : line->count[sym_counter] * 7 / 8;
		line = line->next;
	}
393
	pthread_mutex_unlock(&syme->src->lock);
394 395 396
	if (more)
		printf("%d lines not displayed, maybe increase display entries [e]\n", more);
}
397

398
/*
399
 * Symbols will be added here in event__process_sample and will get out
400 401 402
 * after decayed.
 */
static LIST_HEAD(active_symbols);
403
static pthread_mutex_t active_symbols_lock = PTHREAD_MUTEX_INITIALIZER;
404 405 406 407 408 409

/*
 * Ordering weight: count-1 * count-2 * ... / count-n
 */
static double sym_weight(const struct sym_entry *sym)
{
410
	double weight = sym->snap_count;
411 412
	int counter;

413 414 415
	if (!display_weighted)
		return weight;

416 417 418 419 420 421 422 423
	for (counter = 1; counter < nr_counters-1; counter++)
		weight *= sym->count[counter];

	weight /= (sym->count[counter] + 1);

	return weight;
}

424
static long			samples;
425
static long			kernel_samples, us_samples;
426
static long			exact_samples;
427
static long			guest_us_samples, guest_kernel_samples;
428 429
static const char		CONSOLE_CLEAR[] = "";

430
static void __list_insert_active_sym(struct sym_entry *syme)
431 432 433 434
{
	list_add(&syme->node, &active_symbols);
}

435 436 437 438 439 440 441
static void list_remove_active_sym(struct sym_entry *syme)
{
	pthread_mutex_lock(&active_symbols_lock);
	list_del_init(&syme->node);
	pthread_mutex_unlock(&active_symbols_lock);
}

442 443 444 445 446 447 448 449 450 451
static void rb_insert_active_sym(struct rb_root *tree, struct sym_entry *se)
{
	struct rb_node **p = &tree->rb_node;
	struct rb_node *parent = NULL;
	struct sym_entry *iter;

	while (*p != NULL) {
		parent = *p;
		iter = rb_entry(parent, struct sym_entry, rb_node);

452
		if (se->weight > iter->weight)
453 454 455 456 457 458 459 460
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
	}

	rb_link_node(&se->rb_node, parent, p);
	rb_insert_color(&se->rb_node, tree);
}
461 462 463

static void print_sym_table(void)
{
464
	int printed = 0, j;
465 466
	struct perf_evsel *counter;
	int snap = !display_weighted ? sym_counter : 0;
467
	float samples_per_sec = samples/delay_secs;
468 469 470 471
	float ksamples_per_sec = kernel_samples/delay_secs;
	float us_samples_per_sec = (us_samples)/delay_secs;
	float guest_kernel_samples_per_sec = (guest_kernel_samples)/delay_secs;
	float guest_us_samples_per_sec = (guest_us_samples)/delay_secs;
472
	float esamples_percent = (100.0*exact_samples)/samples;
473
	float sum_ksamples = 0.0;
474 475 476
	struct sym_entry *syme, *n;
	struct rb_root tmp = RB_ROOT;
	struct rb_node *nd;
477
	int sym_width = 0, dso_width = 0, dso_short_width = 0;
478
	const int win_width = winsize.ws_col - 1;
479

480 481
	samples = us_samples = kernel_samples = exact_samples = 0;
	guest_kernel_samples = guest_us_samples = 0;
482

483
	/* Sort the active symbols */
484 485 486 487 488
	pthread_mutex_lock(&active_symbols_lock);
	syme = list_entry(active_symbols.next, struct sym_entry, node);
	pthread_mutex_unlock(&active_symbols_lock);

	list_for_each_entry_safe_from(syme, n, &active_symbols, node) {
489
		syme->snap_count = syme->count[snap];
490
		if (syme->snap_count != 0) {
491

492 493 494 495 496 497 498
			if ((hide_user_symbols &&
			     syme->origin == PERF_RECORD_MISC_USER) ||
			    (hide_kernel_symbols &&
			     syme->origin == PERF_RECORD_MISC_KERNEL)) {
				list_remove_active_sym(syme);
				continue;
			}
499
			syme->weight = sym_weight(syme);
500
			rb_insert_active_sym(&tmp, syme);
501
			sum_ksamples += syme->snap_count;
502 503

			for (j = 0; j < nr_counters; j++)
504 505
				syme->count[j] = zero ? 0 : syme->count[j] * 7 / 8;
		} else
506
			list_remove_active_sym(syme);
507 508
	}

509
	puts(CONSOLE_CLEAR);
510

511
	printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535
	if (!perf_guest) {
		printf("   PerfTop:%8.0f irqs/sec  kernel:%4.1f%%"
			"  exact: %4.1f%% [",
			samples_per_sec,
			100.0 - (100.0 * ((samples_per_sec - ksamples_per_sec) /
					 samples_per_sec)),
			esamples_percent);
	} else {
		printf("   PerfTop:%8.0f irqs/sec  kernel:%4.1f%% us:%4.1f%%"
			" guest kernel:%4.1f%% guest us:%4.1f%%"
			" exact: %4.1f%% [",
			samples_per_sec,
			100.0 - (100.0 * ((samples_per_sec-ksamples_per_sec) /
					  samples_per_sec)),
			100.0 - (100.0 * ((samples_per_sec-us_samples_per_sec) /
					  samples_per_sec)),
			100.0 - (100.0 * ((samples_per_sec -
						guest_kernel_samples_per_sec) /
					  samples_per_sec)),
			100.0 - (100.0 * ((samples_per_sec -
					   guest_us_samples_per_sec) /
					  samples_per_sec)),
			esamples_percent);
	}
536

537
	if (nr_counters == 1 || !display_weighted) {
538 539 540
		struct perf_evsel *first;
		first = list_entry(evsel_list.next, struct perf_evsel, node);
		printf("%Ld", first->attr.sample_period);
I
Ingo Molnar 已提交
541 542 543 544 545
		if (freq)
			printf("Hz ");
		else
			printf(" ");
	}
546

547
	if (!display_weighted)
548 549 550
		printf("%s", event_name(sym_evsel));
	else list_for_each_entry(counter, &evsel_list, node) {
		if (counter->idx)
551 552 553 554 555 556 557
			printf("/");

		printf("%s", event_name(counter));
	}

	printf( "], ");

558 559
	if (target_pid != -1)
		printf(" (target_pid: %d", target_pid);
560 561
	else if (target_tid != -1)
		printf(" (target_tid: %d", target_tid);
562 563 564
	else
		printf(" (all");

565
	if (cpu_list)
566
		printf(", CPU%s: %s)\n", cpus->nr > 1 ? "s" : "", cpu_list);
567
	else {
568
		if (target_tid != -1)
569 570
			printf(")\n");
		else
571
			printf(", %d CPU%s)\n", cpus->nr, cpus->nr > 1 ? "s" : "");
572 573
	}

574
	printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
575

576 577 578 579 580
	if (sym_filter_entry) {
		show_details(sym_filter_entry);
		return;
	}

581 582 583 584 585 586 587 588 589
	/*
	 * Find the longest symbol name that will be displayed
	 */
	for (nd = rb_first(&tmp); nd; nd = rb_next(nd)) {
		syme = rb_entry(nd, struct sym_entry, rb_node);
		if (++printed > print_entries ||
		    (int)syme->snap_count < count_filter)
			continue;

590 591 592
		if (syme->map->dso->long_name_len > dso_width)
			dso_width = syme->map->dso->long_name_len;

593 594 595
		if (syme->map->dso->short_name_len > dso_short_width)
			dso_short_width = syme->map->dso->short_name_len;

596 597 598 599 600 601
		if (syme->name_len > sym_width)
			sym_width = syme->name_len;
	}

	printed = 0;

602 603 604 605 606
	if (sym_width + dso_width > winsize.ws_col - 29) {
		dso_width = dso_short_width;
		if (sym_width + dso_width > winsize.ws_col - 29)
			sym_width = winsize.ws_col - dso_width - 29;
	}
607
	putchar('\n');
608
	if (nr_counters == 1)
609
		printf("             samples  pcnt");
610
	else
611
		printf("   weight    samples  pcnt");
612

613 614
	if (verbose)
		printf("         RIP       ");
615
	printf(" %-*.*s DSO\n", sym_width, sym_width, "function");
616
	printf("   %s    _______ _____",
617 618
	       nr_counters == 1 ? "      " : "______");
	if (verbose)
619
		printf(" ________________");
620
	printf(" %-*.*s", sym_width, sym_width, graph_line);
621
	printf(" %-*.*s", dso_width, dso_width, graph_line);
622
	puts("\n");
623

624
	for (nd = rb_first(&tmp); nd; nd = rb_next(nd)) {
625
		struct symbol *sym;
626
		double pcnt;
627

628
		syme = rb_entry(nd, struct sym_entry, rb_node);
629
		sym = sym_entry__symbol(syme);
630
		if (++printed > print_entries || (int)syme->snap_count < count_filter)
631
			continue;
632

633 634
		pcnt = 100.0 - (100.0 * ((sum_ksamples - syme->snap_count) /
					 sum_ksamples));
635

636
		if (nr_counters == 1 || !display_weighted)
637
			printf("%20.2f ", syme->weight);
638
		else
639
			printf("%9.1f %10ld ", syme->weight, syme->snap_count);
640

641
		percent_color_fprintf(stdout, "%4.1f%%", pcnt);
642
		if (verbose)
643
			printf(" %016llx", sym->start);
644
		printf(" %-*.*s", sym_width, sym_width, sym->name);
645 646 647 648
		printf(" %-*.*s\n", dso_width, dso_width,
		       dso_width >= syme->map->dso->long_name_len ?
					syme->map->dso->long_name :
					syme->map->dso->short_name);
649 650 651
	}
}

652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694
static void prompt_integer(int *target, const char *msg)
{
	char *buf = malloc(0), *p;
	size_t dummy = 0;
	int tmp;

	fprintf(stdout, "\n%s: ", msg);
	if (getline(&buf, &dummy, stdin) < 0)
		return;

	p = strchr(buf, '\n');
	if (p)
		*p = 0;

	p = buf;
	while(*p) {
		if (!isdigit(*p))
			goto out_free;
		p++;
	}
	tmp = strtoul(buf, NULL, 10);
	*target = tmp;
out_free:
	free(buf);
}

static void prompt_percent(int *target, const char *msg)
{
	int tmp = 0;

	prompt_integer(&tmp, msg);
	if (tmp >= 0 && tmp <= 100)
		*target = tmp;
}

static void prompt_symbol(struct sym_entry **target, const char *msg)
{
	char *buf = malloc(0), *p;
	struct sym_entry *syme = *target, *n, *found = NULL;
	size_t dummy = 0;

	/* zero counters of active symbol */
	if (syme) {
695
		pthread_mutex_lock(&syme->src->lock);
696 697
		__zero_source_counters(syme);
		*target = NULL;
698
		pthread_mutex_unlock(&syme->src->lock);
699 700 701 702 703 704 705 706 707 708 709 710 711 712 713
	}

	fprintf(stdout, "\n%s: ", msg);
	if (getline(&buf, &dummy, stdin) < 0)
		goto out_free;

	p = strchr(buf, '\n');
	if (p)
		*p = 0;

	pthread_mutex_lock(&active_symbols_lock);
	syme = list_entry(active_symbols.next, struct sym_entry, node);
	pthread_mutex_unlock(&active_symbols_lock);

	list_for_each_entry_safe_from(syme, n, &active_symbols, node) {
714
		struct symbol *sym = sym_entry__symbol(syme);
715 716 717 718 719 720 721 722

		if (!strcmp(buf, sym->name)) {
			found = syme;
			break;
		}
	}

	if (!found) {
723
		fprintf(stderr, "Sorry, %s is not active.\n", buf);
724 725 726 727 728 729 730 731 732
		sleep(1);
		return;
	} else
		parse_source(found);

out_free:
	free(buf);
}

733
static void print_mapped_keys(void)
734
{
735 736 737
	char *name = NULL;

	if (sym_filter_entry) {
738
		struct symbol *sym = sym_entry__symbol(sym_filter_entry);
739 740 741 742 743 744 745 746
		name = sym->name;
	}

	fprintf(stdout, "\nMapped keys:\n");
	fprintf(stdout, "\t[d]     display refresh delay.             \t(%d)\n", delay_secs);
	fprintf(stdout, "\t[e]     display entries (lines).           \t(%d)\n", print_entries);

	if (nr_counters > 1)
747
		fprintf(stdout, "\t[E]     active event counter.              \t(%s)\n", event_name(sym_evsel));
748 749 750

	fprintf(stdout, "\t[f]     profile display filter (count).    \t(%d)\n", count_filter);

751 752 753
	fprintf(stdout, "\t[F]     annotate display filter (percent). \t(%d%%)\n", sym_pcnt_filter);
	fprintf(stdout, "\t[s]     annotate symbol.                   \t(%s)\n", name?: "NULL");
	fprintf(stdout, "\t[S]     stop annotation.\n");
754 755 756 757

	if (nr_counters > 1)
		fprintf(stdout, "\t[w]     toggle display weighted/count[E]r. \t(%d)\n", display_weighted ? 1 : 0);

758
	fprintf(stdout,
759
		"\t[K]     hide kernel_symbols symbols.     \t(%s)\n",
760 761 762 763
		hide_kernel_symbols ? "yes" : "no");
	fprintf(stdout,
		"\t[U]     hide user symbols.               \t(%s)\n",
		hide_user_symbols ? "yes" : "no");
764
	fprintf(stdout, "\t[z]     toggle sample zeroing.             \t(%d)\n", zero ? 1 : 0);
765 766 767 768 769 770 771 772 773 774 775 776
	fprintf(stdout, "\t[qQ]    quit.\n");
}

static int key_mapped(int c)
{
	switch (c) {
		case 'd':
		case 'e':
		case 'f':
		case 'z':
		case 'q':
		case 'Q':
777 778
		case 'K':
		case 'U':
779 780 781
		case 'F':
		case 's':
		case 'S':
782 783 784 785
			return 1;
		case 'E':
		case 'w':
			return nr_counters > 1 ? 1 : 0;
786 787
		default:
			break;
788 789 790
	}

	return 0;
791 792
}

793
static void handle_keypress(struct perf_session *session, int c)
794
{
795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817
	if (!key_mapped(c)) {
		struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
		struct termios tc, save;

		print_mapped_keys();
		fprintf(stdout, "\nEnter selection, or unmapped key to continue: ");
		fflush(stdout);

		tcgetattr(0, &save);
		tc = save;
		tc.c_lflag &= ~(ICANON | ECHO);
		tc.c_cc[VMIN] = 0;
		tc.c_cc[VTIME] = 0;
		tcsetattr(0, TCSANOW, &tc);

		poll(&stdin_poll, 1, -1);
		c = getc(stdin);

		tcsetattr(0, TCSAFLUSH, &save);
		if (!key_mapped(c))
			return;
	}

818 819 820
	switch (c) {
		case 'd':
			prompt_integer(&delay_secs, "Enter display delay");
821 822
			if (delay_secs < 1)
				delay_secs = 1;
823 824 825
			break;
		case 'e':
			prompt_integer(&print_entries, "Enter display entries (lines)");
826
			if (print_entries == 0) {
827
				sig_winch_handler(SIGWINCH);
828 829 830
				signal(SIGWINCH, sig_winch_handler);
			} else
				signal(SIGWINCH, SIG_DFL);
831 832 833 834
			break;
		case 'E':
			if (nr_counters > 1) {
				fprintf(stderr, "\nAvailable events:");
835 836 837

				list_for_each_entry(sym_evsel, &evsel_list, node)
					fprintf(stderr, "\n\t%d %s", sym_evsel->idx, event_name(sym_evsel));
838 839 840 841

				prompt_integer(&sym_counter, "Enter details event counter");

				if (sym_counter >= nr_counters) {
842
					sym_evsel = list_entry(evsel_list.next, struct perf_evsel, node);
843
					sym_counter = 0;
844
					fprintf(stderr, "Sorry, no such event, using %s.\n", event_name(sym_evsel));
845
					sleep(1);
846
					break;
847
				}
848 849 850
				list_for_each_entry(sym_evsel, &evsel_list, node)
					if (sym_evsel->idx == sym_counter)
						break;
851 852 853 854 855 856 857 858
			} else sym_counter = 0;
			break;
		case 'f':
			prompt_integer(&count_filter, "Enter display event count filter");
			break;
		case 'F':
			prompt_percent(&sym_pcnt_filter, "Enter details display event filter (percent)");
			break;
859 860 861
		case 'K':
			hide_kernel_symbols = !hide_kernel_symbols;
			break;
862 863 864
		case 'q':
		case 'Q':
			printf("exiting.\n");
865
			if (dump_symtab)
866
				perf_session__fprintf_dsos(session, stderr);
867 868 869 870 871 872 873 874 875 876
			exit(0);
		case 's':
			prompt_symbol(&sym_filter_entry, "Enter details symbol");
			break;
		case 'S':
			if (!sym_filter_entry)
				break;
			else {
				struct sym_entry *syme = sym_filter_entry;

877
				pthread_mutex_lock(&syme->src->lock);
878 879
				sym_filter_entry = NULL;
				__zero_source_counters(syme);
880
				pthread_mutex_unlock(&syme->src->lock);
881 882
			}
			break;
883 884 885
		case 'U':
			hide_user_symbols = !hide_user_symbols;
			break;
886 887 888
		case 'w':
			display_weighted = ~display_weighted;
			break;
889
		case 'z':
890
			zero = !zero;
891
			break;
892 893
		default:
			break;
894 895 896
	}
}

897
static void *display_thread(void *arg __used)
898
{
899
	struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
900 901
	struct termios tc, save;
	int delay_msecs, c;
902
	struct perf_session *session = (struct perf_session *) arg;
903 904 905 906 907 908

	tcgetattr(0, &save);
	tc = save;
	tc.c_lflag &= ~(ICANON | ECHO);
	tc.c_cc[VMIN] = 0;
	tc.c_cc[VTIME] = 0;
909

910 911 912 913 914
repeat:
	delay_msecs = delay_secs * 1000;
	tcsetattr(0, TCSANOW, &tc);
	/* trash return*/
	getc(stdin);
915

916
	do {
917
		print_sym_table();
918 919
	} while (!poll(&stdin_poll, 1, delay_msecs) == 1);

920 921 922
	c = getc(stdin);
	tcsetattr(0, TCSAFLUSH, &save);

923
	handle_keypress(session, c);
924
	goto repeat;
925 926 927 928

	return NULL;
}

929
/* Tag samples to be skipped. */
930
static const char *skip_symbols[] = {
931 932 933 934 935
	"default_idle",
	"cpu_idle",
	"enter_idle",
	"exit_idle",
	"mwait_idle",
936
	"mwait_idle_with_hints",
937
	"poll_idle",
938 939
	"ppc64_runlatch_off",
	"pseries_dedicated_idle_sleep",
940 941 942
	NULL
};

943
static int symbol_filter(struct map *map, struct symbol *sym)
944
{
945 946
	struct sym_entry *syme;
	const char *name = sym->name;
947
	int i;
948

949 950 951 952 953 954 955
	/*
	 * ppc64 uses function descriptors and appends a '.' to the
	 * start of every instruction address. Remove it.
	 */
	if (name[0] == '.')
		name++;

956 957 958 959 960 961 962
	if (!strcmp(name, "_text") ||
	    !strcmp(name, "_etext") ||
	    !strcmp(name, "_sinittext") ||
	    !strncmp("init_module", name, 11) ||
	    !strncmp("cleanup_module", name, 14) ||
	    strstr(name, "_text_start") ||
	    strstr(name, "_text_end"))
963 964
		return 1;

965
	syme = symbol__priv(sym);
966
	syme->map = map;
967
	syme->src = NULL;
968 969 970 971 972 973

	if (!sym_filter_entry && sym_filter && !strcmp(name, sym_filter)) {
		/* schedule initial sym_filter_entry setup */
		sym_filter_entry_sched = syme;
		sym_filter = NULL;
	}
974

975 976 977 978 979 980
	for (i = 0; skip_symbols[i]; i++) {
		if (!strcmp(skip_symbols[i], name)) {
			syme->skip = 1;
			break;
		}
	}
981

982 983 984
	if (!syme->skip)
		syme->name_len = strlen(sym->name);

985 986 987
	return 0;
}

988
static void event__process_sample(const event_t *self,
989
				  struct sample_data *sample,
990 991
				  struct perf_session *session,
				  struct perf_evsel *evsel)
992
{
993 994
	u64 ip = self->ip.ip;
	struct sym_entry *syme;
995
	struct addr_location al;
996
	struct machine *machine;
997
	u8 origin = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
998

999 1000
	++samples;

1001
	switch (origin) {
1002
	case PERF_RECORD_MISC_USER:
1003
		++us_samples;
1004 1005
		if (hide_user_symbols)
			return;
1006
		machine = perf_session__find_host_machine(session);
1007
		break;
1008
	case PERF_RECORD_MISC_KERNEL:
1009
		++kernel_samples;
1010 1011
		if (hide_kernel_symbols)
			return;
1012
		machine = perf_session__find_host_machine(session);
1013 1014 1015
		break;
	case PERF_RECORD_MISC_GUEST_KERNEL:
		++guest_kernel_samples;
1016
		machine = perf_session__find_machine(session, self->ip.pid);
1017
		break;
1018 1019 1020 1021 1022 1023 1024
	case PERF_RECORD_MISC_GUEST_USER:
		++guest_us_samples;
		/*
		 * TODO: we don't process guest user from host side
		 * except simple counting.
		 */
		return;
1025 1026 1027 1028
	default:
		return;
	}

1029
	if (!machine && perf_guest) {
1030 1031 1032 1033 1034
		pr_err("Can't find guest [%d]'s kernel information\n",
			self->ip.pid);
		return;
	}

P
Peter Zijlstra 已提交
1035
	if (self->header.misc & PERF_RECORD_MISC_EXACT_IP)
1036 1037
		exact_samples++;

1038
	if (event__preprocess_sample(self, session, &al, sample,
1039
				     symbol_filter) < 0 ||
1040
	    al.filtered)
1041
		return;
1042

1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054
	if (al.sym == NULL) {
		/*
		 * As we do lazy loading of symtabs we only will know if the
		 * specified vmlinux file is invalid when we actually have a
		 * hit in kernel space and then try to load it. So if we get
		 * here and there are _no_ symbols in the DSO backing the
		 * kernel map, bail out.
		 *
		 * We may never get here, for instance, if we use -K/
		 * --hide-kernel-symbols, even if the user specifies an
		 * invalid --vmlinux ;-)
		 */
1055
		if (al.map == machine->vmlinux_maps[MAP__FUNCTION] &&
1056 1057 1058 1059 1060 1061 1062 1063 1064
		    RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION])) {
			pr_err("The %s file can't be used\n",
			       symbol_conf.vmlinux_name);
			exit(1);
		}

		return;
	}

1065 1066 1067 1068
	/* let's see, whether we need to install initial sym_filter_entry */
	if (sym_filter_entry_sched) {
		sym_filter_entry = sym_filter_entry_sched;
		sym_filter_entry_sched = NULL;
1069 1070 1071 1072 1073 1074
		if (parse_source(sym_filter_entry) < 0) {
			struct symbol *sym = sym_entry__symbol(sym_filter_entry);

			pr_err("Can't annotate %s", sym->name);
			if (sym_filter_entry->map->dso->origin == DSO__ORIG_KERNEL) {
				pr_err(": No vmlinux file was found in the path:\n");
1075
				machine__fprintf_vmlinux_path(machine, stderr);
1076 1077 1078 1079
			} else
				pr_err(".\n");
			exit(1);
		}
1080 1081
	}

1082
	syme = symbol__priv(al.sym);
1083
	if (!syme->skip) {
1084
		syme->count[evsel->idx]++;
1085
		syme->origin = origin;
1086
		record_precise_ip(syme, evsel->idx, ip);
1087 1088 1089 1090 1091
		pthread_mutex_lock(&active_symbols_lock);
		if (list_empty(&syme->node) || !syme->node.next)
			__list_insert_active_sym(syme);
		pthread_mutex_unlock(&active_symbols_lock);
	}
1092 1093 1094
}

struct mmap_data {
1095
	void			*base;
1096
	int			mask;
1097
	unsigned int		prev;
1098 1099
};

1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112
static int perf_evsel__alloc_mmap_per_thread(struct perf_evsel *evsel,
					     int ncpus, int nthreads)
{
	evsel->priv = xyarray__new(ncpus, nthreads, sizeof(struct mmap_data));
	return evsel->priv != NULL ? 0 : -ENOMEM;
}

static void perf_evsel__free_mmap(struct perf_evsel *evsel)
{
	xyarray__delete(evsel->priv);
	evsel->priv = NULL;
}

1113 1114
static unsigned int mmap_read_head(struct mmap_data *md)
{
1115
	struct perf_event_mmap_page *pc = md->base;
1116 1117 1118 1119 1120 1121 1122 1123
	int head;

	head = pc->data_head;
	rmb();

	return head;
}

1124
static void perf_session__mmap_read_counter(struct perf_session *self,
1125 1126
					    struct perf_evsel *evsel,
					    int cpu, int thread_idx)
1127
{
1128 1129
	struct xyarray *mmap_array = evsel->priv;
	struct mmap_data *md = xyarray__entry(mmap_array, cpu, thread_idx);
1130 1131 1132
	unsigned int head = mmap_read_head(md);
	unsigned int old = md->prev;
	unsigned char *data = md->base + page_size;
1133
	struct sample_data sample;
1134 1135 1136 1137
	int diff;

	/*
	 * If we're further behind than half the buffer, there's a chance
1138
	 * the writer will bite our tail and mess up the samples under us.
1139 1140 1141 1142 1143 1144 1145
	 *
	 * If we somehow ended up ahead of the head, we got messed up.
	 *
	 * In either case, truncate and restart at head.
	 */
	diff = head - old;
	if (diff > md->mask / 2 || diff < 0) {
1146
		fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158

		/*
		 * head points to a known good entry, start there.
		 */
		old = head;
	}

	for (; old != head;) {
		event_t *event = (event_t *)&data[old & md->mask];

		event_t event_copy;

1159
		size_t size = event->header.size;
1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180

		/*
		 * Event straddles the mmap boundary -- header should always
		 * be inside due to u64 alignment of output.
		 */
		if ((old & md->mask) + size != ((old + size) & md->mask)) {
			unsigned int offset = old;
			unsigned int len = min(sizeof(*event), size), cpy;
			void *dst = &event_copy;

			do {
				cpy = min(md->mask + 1 - (offset & md->mask), len);
				memcpy(dst, &data[offset & md->mask], cpy);
				offset += cpy;
				dst += cpy;
				len -= cpy;
			} while (len);

			event = &event_copy;
		}

1181
		event__parse_sample(event, self, &sample);
1182
		if (event->header.type == PERF_RECORD_SAMPLE)
1183
			event__process_sample(event, &sample, self, evsel);
1184
		else
1185
			event__process(event, &sample, self);
1186 1187 1188 1189 1190 1191
		old += size;
	}

	md->prev = old;
}

1192
static struct pollfd *event_array;
M
Mike Galbraith 已提交
1193

1194
static void perf_session__mmap_read(struct perf_session *self)
1195
{
1196 1197
	struct perf_evsel *counter;
	int i, thread_index;
1198

1199
	for (i = 0; i < cpus->nr; i++) {
1200
		list_for_each_entry(counter, &evsel_list, node) {
1201
			for (thread_index = 0;
1202
				thread_index < threads->nr;
1203 1204
				thread_index++) {
				perf_session__mmap_read_counter(self,
1205
					counter, i, thread_index);
1206
			}
1207
		}
1208 1209 1210
	}
}

1211 1212 1213
int nr_poll;
int group_fd;

1214
static void start_counter(int i, struct perf_evsel *evsel)
1215
{
1216 1217
	struct xyarray *mmap_array = evsel->priv;
	struct mmap_data *mm;
1218
	struct perf_event_attr *attr;
1219
	int cpu = -1;
1220
	int thread_index;
1221

1222
	if (target_tid == -1)
1223
		cpu = cpus->map[i];
1224

1225
	attr = &evsel->attr;
1226 1227

	attr->sample_type	= PERF_SAMPLE_IP | PERF_SAMPLE_TID;
1228 1229 1230 1231 1232 1233 1234

	if (freq) {
		attr->sample_type	|= PERF_SAMPLE_PERIOD;
		attr->freq		= 1;
		attr->sample_freq	= freq;
	}

1235
	attr->inherit		= (cpu < 0) && inherit;
1236
	attr->mmap		= 1;
1237

1238
	for (thread_index = 0; thread_index < threads->nr; thread_index++) {
1239
try_again:
1240
		FD(evsel, i, thread_index) = sys_perf_event_open(attr,
1241
				threads->map[thread_index], cpu, group_fd, 0);
1242

1243
		if (FD(evsel, i, thread_index) < 0) {
1244 1245 1246
			int err = errno;

			if (err == EPERM || err == EACCES)
1247 1248 1249
				die("Permission error - are you root?\n"
					"\t Consider tweaking"
					" /proc/sys/kernel/perf_event_paranoid.\n");
1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265
			/*
			 * If it's cycles then fall back to hrtimer
			 * based cpu-clock-tick sw counter, which
			 * is always available even if no PMU support:
			 */
			if (attr->type == PERF_TYPE_HARDWARE
					&& attr->config == PERF_COUNT_HW_CPU_CYCLES) {

				if (verbose)
					warning(" ... trying to fall back to cpu-clock-ticks\n");

				attr->type = PERF_TYPE_SOFTWARE;
				attr->config = PERF_COUNT_SW_CPU_CLOCK;
				goto try_again;
			}
			printf("\n");
1266
			error("sys_perf_event_open() syscall returned with %d (%s).  /bin/dmesg may provide additional information.\n",
1267
					FD(evsel, i, thread_index), strerror(err));
1268 1269 1270
			die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
			exit(-1);
		}
1271 1272
		assert(FD(evsel, i, thread_index) >= 0);
		fcntl(FD(evsel, i, thread_index), F_SETFL, O_NONBLOCK);
1273 1274

		/*
1275
		 * First counter acts as the group leader:
1276
		 */
1277
		if (group && group_fd == -1)
1278
			group_fd = FD(evsel, i, thread_index);
1279

1280
		event_array[nr_poll].fd = FD(evsel, i, thread_index);
1281 1282 1283
		event_array[nr_poll].events = POLLIN;
		nr_poll++;

1284 1285 1286 1287 1288 1289
		mm = xyarray__entry(mmap_array, i, thread_index);
		mm->prev = 0;
		mm->mask = mmap_pages*page_size - 1;
		mm->base = mmap(NULL, (mmap_pages+1)*page_size,
				PROT_READ, MAP_SHARED, FD(evsel, i, thread_index), 0);
		if (mm->base == MAP_FAILED)
1290
			die("failed to mmap with %d (%s)\n", errno, strerror(errno));
1291 1292 1293 1294 1295 1296
	}
}

static int __cmd_top(void)
{
	pthread_t thread;
1297 1298
	struct perf_evsel *counter;
	int i, ret;
1299
	/*
1300 1301
	 * FIXME: perf_session__new should allow passing a O_MMAP, so that all this
	 * mmap reading, etc is encapsulated in it. Use O_WRONLY for now.
1302
	 */
1303
	struct perf_session *session = perf_session__new(NULL, O_WRONLY, false, false, NULL);
1304 1305
	if (session == NULL)
		return -ENOMEM;
1306

1307 1308
	if (target_tid != -1)
		event__synthesize_thread(target_tid, event__process, session);
1309
	else
1310
		event__synthesize_threads(event__process, session);
1311

1312
	for (i = 0; i < cpus->nr; i++) {
1313
		group_fd = -1;
1314
		list_for_each_entry(counter, &evsel_list, node)
1315
			start_counter(i, counter);
1316 1317
	}

1318
	/* Wait for a minimal set of events before starting the snapshot */
1319
	poll(&event_array[0], nr_poll, 100);
1320

1321
	perf_session__mmap_read(session);
1322

1323
	if (pthread_create(&thread, NULL, display_thread, session)) {
1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338
		printf("Could not create display thread.\n");
		exit(-1);
	}

	if (realtime_prio) {
		struct sched_param param;

		param.sched_priority = realtime_prio;
		if (sched_setscheduler(0, SCHED_FIFO, &param)) {
			printf("Could not set realtime priority.\n");
			exit(-1);
		}
	}

	while (1) {
1339
		int hits = samples;
1340

1341
		perf_session__mmap_read(session);
1342

1343
		if (hits == samples)
1344 1345 1346 1347 1348
			ret = poll(event_array, nr_poll, 100);
	}

	return 0;
}
1349 1350 1351 1352 1353 1354 1355 1356

static const char * const top_usage[] = {
	"perf top [<options>]",
	NULL
};

static const struct option options[] = {
	OPT_CALLBACK('e', "event", NULL, "event",
1357 1358
		     "event selector. use 'perf list' to list available events",
		     parse_events),
1359 1360 1361
	OPT_INTEGER('c', "count", &default_interval,
		    "event period to sample"),
	OPT_INTEGER('p', "pid", &target_pid,
1362 1363 1364
		    "profile events on existing process id"),
	OPT_INTEGER('t', "tid", &target_tid,
		    "profile events on existing thread id"),
1365 1366
	OPT_BOOLEAN('a', "all-cpus", &system_wide,
			    "system-wide collection from all CPUs"),
1367 1368
	OPT_STRING('C', "cpu", &cpu_list, "cpu",
		    "list of cpus to monitor"),
1369 1370
	OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
		   "file", "vmlinux pathname"),
1371 1372
	OPT_BOOLEAN('K', "hide_kernel_symbols", &hide_kernel_symbols,
		    "hide kernel symbols"),
1373
	OPT_UINTEGER('m', "mmap-pages", &mmap_pages, "number of mmap data pages"),
1374 1375
	OPT_INTEGER('r', "realtime", &realtime_prio,
		    "collect data with this RT SCHED_FIFO priority"),
M
Mike Galbraith 已提交
1376
	OPT_INTEGER('d', "delay", &delay_secs,
1377 1378 1379
		    "number of seconds to delay between refreshes"),
	OPT_BOOLEAN('D', "dump-symtab", &dump_symtab,
			    "dump the symbol table used for profiling"),
1380
	OPT_INTEGER('f', "count-filter", &count_filter,
1381 1382 1383
		    "only display functions with more events than this"),
	OPT_BOOLEAN('g', "group", &group,
			    "put the counters into a counter group"),
1384 1385
	OPT_BOOLEAN('i', "inherit", &inherit,
		    "child tasks inherit counters"),
1386
	OPT_STRING('s', "sym-annotate", &sym_filter, "symbol name",
1387
		    "symbol to annotate"),
A
Anton Blanchard 已提交
1388
	OPT_BOOLEAN('z', "zero", &zero,
1389
		    "zero history across updates"),
1390
	OPT_INTEGER('F', "freq", &freq,
1391
		    "profile at this frequency"),
1392 1393
	OPT_INTEGER('E', "entries", &print_entries,
		    "display this many functions"),
1394 1395
	OPT_BOOLEAN('U', "hide_user_symbols", &hide_user_symbols,
		    "hide user symbols"),
1396
	OPT_INCR('v', "verbose", &verbose,
1397
		    "be more verbose (show counter open errors, etc)"),
1398 1399 1400
	OPT_END()
};

1401
int cmd_top(int argc, const char **argv, const char *prefix __used)
1402
{
1403 1404
	struct perf_evsel *pos;
	int status = -ENOMEM;
1405 1406 1407 1408 1409 1410 1411

	page_size = sysconf(_SC_PAGE_SIZE);

	argc = parse_options(argc, argv, options, top_usage, 0);
	if (argc)
		usage_with_options(top_usage, options);

1412
	if (target_pid != -1)
1413 1414
		target_tid = target_pid;

1415 1416 1417 1418
	threads = thread_map__new(target_pid, target_tid);
	if (threads == NULL) {
		pr_err("Problems finding threads of monitor\n");
		usage_with_options(top_usage, options);
1419 1420
	}

1421 1422
	event_array = malloc((sizeof(struct pollfd) *
			      MAX_NR_CPUS * MAX_COUNTERS * threads->nr));
1423 1424 1425
	if (!event_array)
		return -ENOMEM;

1426
	/* CPU and PID are mutually exclusive */
1427
	if (target_tid > 0 && cpu_list) {
1428 1429
		printf("WARNING: PID switch overriding CPU\n");
		sleep(1);
1430
		cpu_list = NULL;
1431 1432
	}

1433 1434 1435 1436
	if (!nr_counters && perf_evsel_list__create_default() < 0) {
		pr_err("Not enough memory for event selector list\n");
		return -ENOMEM;
	}
1437

1438 1439 1440
	if (delay_secs < 1)
		delay_secs = 1;

1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452
	/*
	 * User specified count overrides default frequency.
	 */
	if (default_interval)
		freq = 0;
	else if (freq) {
		default_interval = freq;
	} else {
		fprintf(stderr, "frequency and count are zero, aborting\n");
		exit(EXIT_FAILURE);
	}

1453
	if (target_tid != -1)
1454
		cpus = cpu_map__dummy_new();
1455
	else
1456
		cpus = cpu_map__new(cpu_list);
1457

1458
	if (cpus == NULL)
1459
		usage_with_options(top_usage, options);
1460

1461
	list_for_each_entry(pos, &evsel_list, node) {
1462 1463
		if (perf_evsel__alloc_mmap_per_thread(pos, cpus->nr, threads->nr) < 0 ||
		    perf_evsel__alloc_fd(pos, cpus->nr, threads->nr) < 0)
1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480
			goto out_free_fd;
		/*
		 * Fill in the ones not specifically initialized via -c:
		 */
		if (pos->attr.sample_period)
			continue;

		pos->attr.sample_period = default_interval;
	}

	symbol_conf.priv_size = (sizeof(struct sym_entry) +
				 (nr_counters + 1) * sizeof(unsigned long));

	symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
	if (symbol__init() < 0)
		return -1;

1481
	get_term_dimensions(&winsize);
1482
	if (print_entries == 0) {
1483
		update_print_entries(&winsize);
1484 1485 1486
		signal(SIGWINCH, sig_winch_handler);
	}

1487 1488
	status = __cmd_top();
out_free_fd:
1489
	list_for_each_entry(pos, &evsel_list, node)
1490 1491 1492
		perf_evsel__free_mmap(pos);

	return status;
1493
}