builtin-top.c 35.7 KB
Newer Older
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
 * builtin-top.c
 *
 * Builtin top command: Display a continuously updated profile of
 * any workload, CPU or specific PID.
 *
 * Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
 *
 * Improvements and fixes by:
 *
 *   Arjan van de Ven <arjan@linux.intel.com>
 *   Yanmin Zhang <yanmin.zhang@intel.com>
 *   Wu Fengguang <fengguang.wu@intel.com>
 *   Mike Galbraith <efault@gmx.de>
 *   Paul Mackerras <paulus@samba.org>
 *
 * Released under the GPL v2. (and only v2, not any later version)
18
 */
19
#include "builtin.h"
20

21
#include "perf.h"
22

23
#include "util/color.h"
24
#include "util/evsel.h"
25 26
#include "util/session.h"
#include "util/symbol.h"
27
#include "util/thread.h"
28
#include "util/util.h"
29
#include <linux/rbtree.h>
30 31
#include "util/parse-options.h"
#include "util/parse-events.h"
32
#include "util/cpumap.h"
33
#include "util/xyarray.h"
34

35 36
#include "util/debug.h"

37 38
#include <assert.h>
#include <fcntl.h>
39

40
#include <stdio.h>
41 42
#include <termios.h>
#include <unistd.h>
43

44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
#include <errno.h>
#include <time.h>
#include <sched.h>
#include <pthread.h>

#include <sys/syscall.h>
#include <sys/ioctl.h>
#include <sys/poll.h>
#include <sys/prctl.h>
#include <sys/wait.h>
#include <sys/uio.h>
#include <sys/mman.h>

#include <linux/unistd.h>
#include <linux/types.h>

60
#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
61

62
static bool			system_wide			=  false;
63

64
static int			default_interval		=      0;
65

66
static int			count_filter			=      5;
67
static int			print_entries;
68

69
static int			target_pid			=     -1;
70 71 72
static int			target_tid			=     -1;
static pid_t			*all_tids			=      NULL;
static int			thread_num			=      0;
73
static bool			inherit				=  false;
74
static int			nr_cpus				=      0;
75
static int			realtime_prio			=      0;
76
static bool			group				=  false;
77
static unsigned int		page_size;
78 79
static unsigned int		mmap_pages			=     16;
static int			freq				=   1000; /* 1 KHz */
80

81
static int			delay_secs			=      2;
82 83
static bool			zero                            =  false;
static bool			dump_symtab                     =  false;
84

85 86
static bool			hide_kernel_symbols		=  false;
static bool			hide_user_symbols		=  false;
87
static struct winsize		winsize;
88

89 90 91 92 93 94 95 96 97 98 99
/*
 * Source
 */

struct source_line {
	u64			eip;
	unsigned long		count[MAX_COUNTERS];
	char			*line;
	struct source_line	*next;
};

100
static const char		*sym_filter			=   NULL;
101
struct sym_entry		*sym_filter_entry		=   NULL;
102
struct sym_entry		*sym_filter_entry_sched		=   NULL;
103 104
static int			sym_pcnt_filter			=      5;
static int			sym_counter			=      0;
105
static struct perf_evsel	*sym_evsel			=   NULL;
106
static int			display_weighted		=     -1;
107
static const char		*cpu_list;
108

109 110 111 112
/*
 * Symbols
 */

113 114 115 116 117 118 119
struct sym_entry_source {
	struct source_line	*source;
	struct source_line	*lines;
	struct source_line	**lines_tail;
	pthread_mutex_t		lock;
};

120
struct sym_entry {
121 122
	struct rb_node		rb_node;
	struct list_head	node;
123 124
	unsigned long		snap_count;
	double			weight;
125
	int			skip;
126
	u16			name_len;
127
	u8			origin;
128
	struct map		*map;
129
	struct sym_entry_source	*src;
130
	unsigned long		count[0];
131 132
};

133 134 135 136
/*
 * Source functions
 */

137 138
static inline struct symbol *sym_entry__symbol(struct sym_entry *self)
{
139
       return ((void *)self) + symbol_conf.priv_size;
140 141
}

142
void get_term_dimensions(struct winsize *ws)
143
{
144 145 146 147 148 149 150 151 152 153
	char *s = getenv("LINES");

	if (s != NULL) {
		ws->ws_row = atoi(s);
		s = getenv("COLUMNS");
		if (s != NULL) {
			ws->ws_col = atoi(s);
			if (ws->ws_row && ws->ws_col)
				return;
		}
154
	}
155 156 157 158
#ifdef TIOCGWINSZ
	if (ioctl(1, TIOCGWINSZ, ws) == 0 &&
	    ws->ws_row && ws->ws_col)
		return;
159
#endif
160 161
	ws->ws_row = 25;
	ws->ws_col = 80;
162 163
}

164
static void update_print_entries(struct winsize *ws)
165
{
166 167
	print_entries = ws->ws_row;

168 169 170 171 172 173
	if (print_entries > 9)
		print_entries -= 9;
}

static void sig_winch_handler(int sig __used)
{
174 175
	get_term_dimensions(&winsize);
	update_print_entries(&winsize);
176 177
}

178
static int parse_source(struct sym_entry *syme)
179 180
{
	struct symbol *sym;
181
	struct sym_entry_source *source;
182
	struct map *map;
183
	FILE *file;
184
	char command[PATH_MAX*2];
185 186
	const char *path;
	u64 len;
187 188

	if (!syme)
189 190 191 192 193 194 195 196 197 198
		return -1;

	sym = sym_entry__symbol(syme);
	map = syme->map;

	/*
	 * We can't annotate with just /proc/kallsyms
	 */
	if (map->dso->origin == DSO__ORIG_KERNEL)
		return -1;
199

200
	if (syme->src == NULL) {
201
		syme->src = zalloc(sizeof(*source));
202
		if (syme->src == NULL)
203
			return -1;
204 205 206 207 208 209 210
		pthread_mutex_init(&syme->src->lock, NULL);
	}

	source = syme->src;

	if (source->lines) {
		pthread_mutex_lock(&source->lock);
211 212
		goto out_assign;
	}
213
	path = map->dso->long_name;
214 215 216

	len = sym->end - sym->start;

217
	sprintf(command,
218 219 220
		"objdump --start-address=%#0*Lx --stop-address=%#0*Lx -dS %s",
		BITS_PER_LONG / 4, map__rip_2objdump(map, sym->start),
		BITS_PER_LONG / 4, map__rip_2objdump(map, sym->end), path);
221 222 223

	file = popen(command, "r");
	if (!file)
224
		return -1;
225

226 227
	pthread_mutex_lock(&source->lock);
	source->lines_tail = &source->lines;
228 229 230
	while (!feof(file)) {
		struct source_line *src;
		size_t dummy = 0;
231
		char *c, *sep;
232 233 234 235 236 237 238 239 240 241 242 243 244 245 246

		src = malloc(sizeof(struct source_line));
		assert(src != NULL);
		memset(src, 0, sizeof(struct source_line));

		if (getline(&src->line, &dummy, file) < 0)
			break;
		if (!src->line)
			break;

		c = strchr(src->line, '\n');
		if (c)
			*c = 0;

		src->next = NULL;
247 248
		*source->lines_tail = src;
		source->lines_tail = &src->next;
249

250 251 252 253 254
		src->eip = strtoull(src->line, &sep, 16);
		if (*sep == ':')
			src->eip = map__objdump_2ip(map, src->eip);
		else /* this line has no ip info (e.g. source line) */
			src->eip = 0;
255 256 257 258
	}
	pclose(file);
out_assign:
	sym_filter_entry = syme;
259
	pthread_mutex_unlock(&source->lock);
260
	return 0;
261 262 263 264 265 266 267
}

static void __zero_source_counters(struct sym_entry *syme)
{
	int i;
	struct source_line *line;

268
	line = syme->src->lines;
269 270 271 272 273 274 275 276 277 278 279 280 281 282
	while (line) {
		for (i = 0; i < nr_counters; i++)
			line->count[i] = 0;
		line = line->next;
	}
}

static void record_precise_ip(struct sym_entry *syme, int counter, u64 ip)
{
	struct source_line *line;

	if (syme != sym_filter_entry)
		return;

283
	if (pthread_mutex_trylock(&syme->src->lock))
284 285
		return;

286
	if (syme->src == NULL || syme->src->source == NULL)
287 288
		goto out_unlock;

289
	for (line = syme->src->lines; line; line = line->next) {
290 291 292
		/* skip lines without IP info */
		if (line->eip == 0)
			continue;
293 294 295 296 297 298 299 300
		if (line->eip == ip) {
			line->count[counter]++;
			break;
		}
		if (line->eip > ip)
			break;
	}
out_unlock:
301
	pthread_mutex_unlock(&syme->src->lock);
302 303
}

304 305
#define PATTERN_LEN		(BITS_PER_LONG / 4 + 2)

306 307
static void lookup_sym_source(struct sym_entry *syme)
{
308
	struct symbol *symbol = sym_entry__symbol(syme);
309
	struct source_line *line;
310
	char pattern[PATTERN_LEN + 1];
311

312 313
	sprintf(pattern, "%0*Lx <", BITS_PER_LONG / 4,
		map__rip_2objdump(syme->map, symbol->start));
314

315 316
	pthread_mutex_lock(&syme->src->lock);
	for (line = syme->src->lines; line; line = line->next) {
317
		if (memcmp(line->line, pattern, PATTERN_LEN) == 0) {
318
			syme->src->source = line;
319 320 321
			break;
		}
	}
322
	pthread_mutex_unlock(&syme->src->lock);
323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351
}

static void show_lines(struct source_line *queue, int count, int total)
{
	int i;
	struct source_line *line;

	line = queue;
	for (i = 0; i < count; i++) {
		float pcnt = 100.0*(float)line->count[sym_counter]/(float)total;

		printf("%8li %4.1f%%\t%s\n", line->count[sym_counter], pcnt, line->line);
		line = line->next;
	}
}

#define TRACE_COUNT     3

static void show_details(struct sym_entry *syme)
{
	struct symbol *symbol;
	struct source_line *line;
	struct source_line *line_queue = NULL;
	int displayed = 0;
	int line_queue_count = 0, total = 0, more = 0;

	if (!syme)
		return;

352
	if (!syme->src->source)
353 354
		lookup_sym_source(syme);

355
	if (!syme->src->source)
356 357
		return;

358
	symbol = sym_entry__symbol(syme);
359
	printf("Showing %s for %s\n", event_name(sym_evsel), symbol->name);
360 361
	printf("  Events  Pcnt (>=%d%%)\n", sym_pcnt_filter);

362 363
	pthread_mutex_lock(&syme->src->lock);
	line = syme->src->source;
364 365 366 367 368
	while (line) {
		total += line->count[sym_counter];
		line = line->next;
	}

369
	line = syme->src->source;
370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393
	while (line) {
		float pcnt = 0.0;

		if (!line_queue_count)
			line_queue = line;
		line_queue_count++;

		if (line->count[sym_counter])
			pcnt = 100.0 * line->count[sym_counter] / (float)total;
		if (pcnt >= (float)sym_pcnt_filter) {
			if (displayed <= print_entries)
				show_lines(line_queue, line_queue_count, total);
			else more++;
			displayed += line_queue_count;
			line_queue_count = 0;
			line_queue = NULL;
		} else if (line_queue_count > TRACE_COUNT) {
			line_queue = line_queue->next;
			line_queue_count--;
		}

		line->count[sym_counter] = zero ? 0 : line->count[sym_counter] * 7 / 8;
		line = line->next;
	}
394
	pthread_mutex_unlock(&syme->src->lock);
395 396 397
	if (more)
		printf("%d lines not displayed, maybe increase display entries [e]\n", more);
}
398

399
/*
400
 * Symbols will be added here in event__process_sample and will get out
401 402 403
 * after decayed.
 */
static LIST_HEAD(active_symbols);
404
static pthread_mutex_t active_symbols_lock = PTHREAD_MUTEX_INITIALIZER;
405 406 407 408 409 410

/*
 * Ordering weight: count-1 * count-2 * ... / count-n
 */
static double sym_weight(const struct sym_entry *sym)
{
411
	double weight = sym->snap_count;
412 413
	int counter;

414 415 416
	if (!display_weighted)
		return weight;

417 418 419 420 421 422 423 424
	for (counter = 1; counter < nr_counters-1; counter++)
		weight *= sym->count[counter];

	weight /= (sym->count[counter] + 1);

	return weight;
}

425
static long			samples;
426
static long			kernel_samples, us_samples;
427
static long			exact_samples;
428
static long			guest_us_samples, guest_kernel_samples;
429 430
static const char		CONSOLE_CLEAR[] = "";

431
static void __list_insert_active_sym(struct sym_entry *syme)
432 433 434 435
{
	list_add(&syme->node, &active_symbols);
}

436 437 438 439 440 441 442
static void list_remove_active_sym(struct sym_entry *syme)
{
	pthread_mutex_lock(&active_symbols_lock);
	list_del_init(&syme->node);
	pthread_mutex_unlock(&active_symbols_lock);
}

443 444 445 446 447 448 449 450 451 452
static void rb_insert_active_sym(struct rb_root *tree, struct sym_entry *se)
{
	struct rb_node **p = &tree->rb_node;
	struct rb_node *parent = NULL;
	struct sym_entry *iter;

	while (*p != NULL) {
		parent = *p;
		iter = rb_entry(parent, struct sym_entry, rb_node);

453
		if (se->weight > iter->weight)
454 455 456 457 458 459 460 461
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
	}

	rb_link_node(&se->rb_node, parent, p);
	rb_insert_color(&se->rb_node, tree);
}
462 463 464

static void print_sym_table(void)
{
465
	int printed = 0, j;
466 467
	struct perf_evsel *counter;
	int snap = !display_weighted ? sym_counter : 0;
468
	float samples_per_sec = samples/delay_secs;
469 470 471 472
	float ksamples_per_sec = kernel_samples/delay_secs;
	float us_samples_per_sec = (us_samples)/delay_secs;
	float guest_kernel_samples_per_sec = (guest_kernel_samples)/delay_secs;
	float guest_us_samples_per_sec = (guest_us_samples)/delay_secs;
473
	float esamples_percent = (100.0*exact_samples)/samples;
474
	float sum_ksamples = 0.0;
475 476 477
	struct sym_entry *syme, *n;
	struct rb_root tmp = RB_ROOT;
	struct rb_node *nd;
478
	int sym_width = 0, dso_width = 0, dso_short_width = 0;
479
	const int win_width = winsize.ws_col - 1;
480

481 482
	samples = us_samples = kernel_samples = exact_samples = 0;
	guest_kernel_samples = guest_us_samples = 0;
483

484
	/* Sort the active symbols */
485 486 487 488 489
	pthread_mutex_lock(&active_symbols_lock);
	syme = list_entry(active_symbols.next, struct sym_entry, node);
	pthread_mutex_unlock(&active_symbols_lock);

	list_for_each_entry_safe_from(syme, n, &active_symbols, node) {
490
		syme->snap_count = syme->count[snap];
491
		if (syme->snap_count != 0) {
492

493 494 495 496 497 498 499
			if ((hide_user_symbols &&
			     syme->origin == PERF_RECORD_MISC_USER) ||
			    (hide_kernel_symbols &&
			     syme->origin == PERF_RECORD_MISC_KERNEL)) {
				list_remove_active_sym(syme);
				continue;
			}
500
			syme->weight = sym_weight(syme);
501
			rb_insert_active_sym(&tmp, syme);
502
			sum_ksamples += syme->snap_count;
503 504

			for (j = 0; j < nr_counters; j++)
505 506
				syme->count[j] = zero ? 0 : syme->count[j] * 7 / 8;
		} else
507
			list_remove_active_sym(syme);
508 509
	}

510
	puts(CONSOLE_CLEAR);
511

512
	printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536
	if (!perf_guest) {
		printf("   PerfTop:%8.0f irqs/sec  kernel:%4.1f%%"
			"  exact: %4.1f%% [",
			samples_per_sec,
			100.0 - (100.0 * ((samples_per_sec - ksamples_per_sec) /
					 samples_per_sec)),
			esamples_percent);
	} else {
		printf("   PerfTop:%8.0f irqs/sec  kernel:%4.1f%% us:%4.1f%%"
			" guest kernel:%4.1f%% guest us:%4.1f%%"
			" exact: %4.1f%% [",
			samples_per_sec,
			100.0 - (100.0 * ((samples_per_sec-ksamples_per_sec) /
					  samples_per_sec)),
			100.0 - (100.0 * ((samples_per_sec-us_samples_per_sec) /
					  samples_per_sec)),
			100.0 - (100.0 * ((samples_per_sec -
						guest_kernel_samples_per_sec) /
					  samples_per_sec)),
			100.0 - (100.0 * ((samples_per_sec -
					   guest_us_samples_per_sec) /
					  samples_per_sec)),
			esamples_percent);
	}
537

538
	if (nr_counters == 1 || !display_weighted) {
539 540 541
		struct perf_evsel *first;
		first = list_entry(evsel_list.next, struct perf_evsel, node);
		printf("%Ld", first->attr.sample_period);
I
Ingo Molnar 已提交
542 543 544 545 546
		if (freq)
			printf("Hz ");
		else
			printf(" ");
	}
547

548
	if (!display_weighted)
549 550 551
		printf("%s", event_name(sym_evsel));
	else list_for_each_entry(counter, &evsel_list, node) {
		if (counter->idx)
552 553 554 555 556 557 558
			printf("/");

		printf("%s", event_name(counter));
	}

	printf( "], ");

559 560
	if (target_pid != -1)
		printf(" (target_pid: %d", target_pid);
561 562
	else if (target_tid != -1)
		printf(" (target_tid: %d", target_tid);
563 564 565
	else
		printf(" (all");

566 567
	if (cpu_list)
		printf(", CPU%s: %s)\n", nr_cpus > 1 ? "s" : "", cpu_list);
568
	else {
569
		if (target_tid != -1)
570 571
			printf(")\n");
		else
572
			printf(", %d CPU%s)\n", nr_cpus, nr_cpus > 1 ? "s" : "");
573 574
	}

575
	printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
576

577 578 579 580 581
	if (sym_filter_entry) {
		show_details(sym_filter_entry);
		return;
	}

582 583 584 585 586 587 588 589 590
	/*
	 * Find the longest symbol name that will be displayed
	 */
	for (nd = rb_first(&tmp); nd; nd = rb_next(nd)) {
		syme = rb_entry(nd, struct sym_entry, rb_node);
		if (++printed > print_entries ||
		    (int)syme->snap_count < count_filter)
			continue;

591 592 593
		if (syme->map->dso->long_name_len > dso_width)
			dso_width = syme->map->dso->long_name_len;

594 595 596
		if (syme->map->dso->short_name_len > dso_short_width)
			dso_short_width = syme->map->dso->short_name_len;

597 598 599 600 601 602
		if (syme->name_len > sym_width)
			sym_width = syme->name_len;
	}

	printed = 0;

603 604 605 606 607
	if (sym_width + dso_width > winsize.ws_col - 29) {
		dso_width = dso_short_width;
		if (sym_width + dso_width > winsize.ws_col - 29)
			sym_width = winsize.ws_col - dso_width - 29;
	}
608
	putchar('\n');
609
	if (nr_counters == 1)
610
		printf("             samples  pcnt");
611
	else
612
		printf("   weight    samples  pcnt");
613

614 615
	if (verbose)
		printf("         RIP       ");
616
	printf(" %-*.*s DSO\n", sym_width, sym_width, "function");
617
	printf("   %s    _______ _____",
618 619
	       nr_counters == 1 ? "      " : "______");
	if (verbose)
620
		printf(" ________________");
621
	printf(" %-*.*s", sym_width, sym_width, graph_line);
622
	printf(" %-*.*s", dso_width, dso_width, graph_line);
623
	puts("\n");
624

625
	for (nd = rb_first(&tmp); nd; nd = rb_next(nd)) {
626
		struct symbol *sym;
627
		double pcnt;
628

629
		syme = rb_entry(nd, struct sym_entry, rb_node);
630
		sym = sym_entry__symbol(syme);
631
		if (++printed > print_entries || (int)syme->snap_count < count_filter)
632
			continue;
633

634 635
		pcnt = 100.0 - (100.0 * ((sum_ksamples - syme->snap_count) /
					 sum_ksamples));
636

637
		if (nr_counters == 1 || !display_weighted)
638
			printf("%20.2f ", syme->weight);
639
		else
640
			printf("%9.1f %10ld ", syme->weight, syme->snap_count);
641

642
		percent_color_fprintf(stdout, "%4.1f%%", pcnt);
643
		if (verbose)
644
			printf(" %016llx", sym->start);
645
		printf(" %-*.*s", sym_width, sym_width, sym->name);
646 647 648 649
		printf(" %-*.*s\n", dso_width, dso_width,
		       dso_width >= syme->map->dso->long_name_len ?
					syme->map->dso->long_name :
					syme->map->dso->short_name);
650 651 652
	}
}

653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695
static void prompt_integer(int *target, const char *msg)
{
	char *buf = malloc(0), *p;
	size_t dummy = 0;
	int tmp;

	fprintf(stdout, "\n%s: ", msg);
	if (getline(&buf, &dummy, stdin) < 0)
		return;

	p = strchr(buf, '\n');
	if (p)
		*p = 0;

	p = buf;
	while(*p) {
		if (!isdigit(*p))
			goto out_free;
		p++;
	}
	tmp = strtoul(buf, NULL, 10);
	*target = tmp;
out_free:
	free(buf);
}

static void prompt_percent(int *target, const char *msg)
{
	int tmp = 0;

	prompt_integer(&tmp, msg);
	if (tmp >= 0 && tmp <= 100)
		*target = tmp;
}

static void prompt_symbol(struct sym_entry **target, const char *msg)
{
	char *buf = malloc(0), *p;
	struct sym_entry *syme = *target, *n, *found = NULL;
	size_t dummy = 0;

	/* zero counters of active symbol */
	if (syme) {
696
		pthread_mutex_lock(&syme->src->lock);
697 698
		__zero_source_counters(syme);
		*target = NULL;
699
		pthread_mutex_unlock(&syme->src->lock);
700 701 702 703 704 705 706 707 708 709 710 711 712 713 714
	}

	fprintf(stdout, "\n%s: ", msg);
	if (getline(&buf, &dummy, stdin) < 0)
		goto out_free;

	p = strchr(buf, '\n');
	if (p)
		*p = 0;

	pthread_mutex_lock(&active_symbols_lock);
	syme = list_entry(active_symbols.next, struct sym_entry, node);
	pthread_mutex_unlock(&active_symbols_lock);

	list_for_each_entry_safe_from(syme, n, &active_symbols, node) {
715
		struct symbol *sym = sym_entry__symbol(syme);
716 717 718 719 720 721 722 723

		if (!strcmp(buf, sym->name)) {
			found = syme;
			break;
		}
	}

	if (!found) {
724
		fprintf(stderr, "Sorry, %s is not active.\n", buf);
725 726 727 728 729 730 731 732 733
		sleep(1);
		return;
	} else
		parse_source(found);

out_free:
	free(buf);
}

734
static void print_mapped_keys(void)
735
{
736 737 738
	char *name = NULL;

	if (sym_filter_entry) {
739
		struct symbol *sym = sym_entry__symbol(sym_filter_entry);
740 741 742 743 744 745 746 747
		name = sym->name;
	}

	fprintf(stdout, "\nMapped keys:\n");
	fprintf(stdout, "\t[d]     display refresh delay.             \t(%d)\n", delay_secs);
	fprintf(stdout, "\t[e]     display entries (lines).           \t(%d)\n", print_entries);

	if (nr_counters > 1)
748
		fprintf(stdout, "\t[E]     active event counter.              \t(%s)\n", event_name(sym_evsel));
749 750 751

	fprintf(stdout, "\t[f]     profile display filter (count).    \t(%d)\n", count_filter);

752 753 754
	fprintf(stdout, "\t[F]     annotate display filter (percent). \t(%d%%)\n", sym_pcnt_filter);
	fprintf(stdout, "\t[s]     annotate symbol.                   \t(%s)\n", name?: "NULL");
	fprintf(stdout, "\t[S]     stop annotation.\n");
755 756 757 758

	if (nr_counters > 1)
		fprintf(stdout, "\t[w]     toggle display weighted/count[E]r. \t(%d)\n", display_weighted ? 1 : 0);

759
	fprintf(stdout,
760
		"\t[K]     hide kernel_symbols symbols.     \t(%s)\n",
761 762 763 764
		hide_kernel_symbols ? "yes" : "no");
	fprintf(stdout,
		"\t[U]     hide user symbols.               \t(%s)\n",
		hide_user_symbols ? "yes" : "no");
765
	fprintf(stdout, "\t[z]     toggle sample zeroing.             \t(%d)\n", zero ? 1 : 0);
766 767 768 769 770 771 772 773 774 775 776 777
	fprintf(stdout, "\t[qQ]    quit.\n");
}

static int key_mapped(int c)
{
	switch (c) {
		case 'd':
		case 'e':
		case 'f':
		case 'z':
		case 'q':
		case 'Q':
778 779
		case 'K':
		case 'U':
780 781 782
		case 'F':
		case 's':
		case 'S':
783 784 785 786
			return 1;
		case 'E':
		case 'w':
			return nr_counters > 1 ? 1 : 0;
787 788
		default:
			break;
789 790 791
	}

	return 0;
792 793
}

794
static void handle_keypress(struct perf_session *session, int c)
795
{
796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818
	if (!key_mapped(c)) {
		struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
		struct termios tc, save;

		print_mapped_keys();
		fprintf(stdout, "\nEnter selection, or unmapped key to continue: ");
		fflush(stdout);

		tcgetattr(0, &save);
		tc = save;
		tc.c_lflag &= ~(ICANON | ECHO);
		tc.c_cc[VMIN] = 0;
		tc.c_cc[VTIME] = 0;
		tcsetattr(0, TCSANOW, &tc);

		poll(&stdin_poll, 1, -1);
		c = getc(stdin);

		tcsetattr(0, TCSAFLUSH, &save);
		if (!key_mapped(c))
			return;
	}

819 820 821
	switch (c) {
		case 'd':
			prompt_integer(&delay_secs, "Enter display delay");
822 823
			if (delay_secs < 1)
				delay_secs = 1;
824 825 826
			break;
		case 'e':
			prompt_integer(&print_entries, "Enter display entries (lines)");
827
			if (print_entries == 0) {
828
				sig_winch_handler(SIGWINCH);
829 830 831
				signal(SIGWINCH, sig_winch_handler);
			} else
				signal(SIGWINCH, SIG_DFL);
832 833 834 835
			break;
		case 'E':
			if (nr_counters > 1) {
				fprintf(stderr, "\nAvailable events:");
836 837 838

				list_for_each_entry(sym_evsel, &evsel_list, node)
					fprintf(stderr, "\n\t%d %s", sym_evsel->idx, event_name(sym_evsel));
839 840 841 842

				prompt_integer(&sym_counter, "Enter details event counter");

				if (sym_counter >= nr_counters) {
843
					sym_evsel = list_entry(evsel_list.next, struct perf_evsel, node);
844
					sym_counter = 0;
845
					fprintf(stderr, "Sorry, no such event, using %s.\n", event_name(sym_evsel));
846
					sleep(1);
847
					break;
848
				}
849 850 851
				list_for_each_entry(sym_evsel, &evsel_list, node)
					if (sym_evsel->idx == sym_counter)
						break;
852 853 854 855 856 857 858 859
			} else sym_counter = 0;
			break;
		case 'f':
			prompt_integer(&count_filter, "Enter display event count filter");
			break;
		case 'F':
			prompt_percent(&sym_pcnt_filter, "Enter details display event filter (percent)");
			break;
860 861 862
		case 'K':
			hide_kernel_symbols = !hide_kernel_symbols;
			break;
863 864 865
		case 'q':
		case 'Q':
			printf("exiting.\n");
866
			if (dump_symtab)
867
				perf_session__fprintf_dsos(session, stderr);
868 869 870 871 872 873 874 875 876 877
			exit(0);
		case 's':
			prompt_symbol(&sym_filter_entry, "Enter details symbol");
			break;
		case 'S':
			if (!sym_filter_entry)
				break;
			else {
				struct sym_entry *syme = sym_filter_entry;

878
				pthread_mutex_lock(&syme->src->lock);
879 880
				sym_filter_entry = NULL;
				__zero_source_counters(syme);
881
				pthread_mutex_unlock(&syme->src->lock);
882 883
			}
			break;
884 885 886
		case 'U':
			hide_user_symbols = !hide_user_symbols;
			break;
887 888 889
		case 'w':
			display_weighted = ~display_weighted;
			break;
890
		case 'z':
891
			zero = !zero;
892
			break;
893 894
		default:
			break;
895 896 897
	}
}

898
static void *display_thread(void *arg __used)
899
{
900
	struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
901 902
	struct termios tc, save;
	int delay_msecs, c;
903
	struct perf_session *session = (struct perf_session *) arg;
904 905 906 907 908 909

	tcgetattr(0, &save);
	tc = save;
	tc.c_lflag &= ~(ICANON | ECHO);
	tc.c_cc[VMIN] = 0;
	tc.c_cc[VTIME] = 0;
910

911 912 913 914 915
repeat:
	delay_msecs = delay_secs * 1000;
	tcsetattr(0, TCSANOW, &tc);
	/* trash return*/
	getc(stdin);
916

917
	do {
918
		print_sym_table();
919 920
	} while (!poll(&stdin_poll, 1, delay_msecs) == 1);

921 922 923
	c = getc(stdin);
	tcsetattr(0, TCSAFLUSH, &save);

924
	handle_keypress(session, c);
925
	goto repeat;
926 927 928 929

	return NULL;
}

930
/* Tag samples to be skipped. */
931
static const char *skip_symbols[] = {
932 933 934 935 936
	"default_idle",
	"cpu_idle",
	"enter_idle",
	"exit_idle",
	"mwait_idle",
937
	"mwait_idle_with_hints",
938
	"poll_idle",
939 940
	"ppc64_runlatch_off",
	"pseries_dedicated_idle_sleep",
941 942 943
	NULL
};

944
static int symbol_filter(struct map *map, struct symbol *sym)
945
{
946 947
	struct sym_entry *syme;
	const char *name = sym->name;
948
	int i;
949

950 951 952 953 954 955 956
	/*
	 * ppc64 uses function descriptors and appends a '.' to the
	 * start of every instruction address. Remove it.
	 */
	if (name[0] == '.')
		name++;

957 958 959 960 961 962 963
	if (!strcmp(name, "_text") ||
	    !strcmp(name, "_etext") ||
	    !strcmp(name, "_sinittext") ||
	    !strncmp("init_module", name, 11) ||
	    !strncmp("cleanup_module", name, 14) ||
	    strstr(name, "_text_start") ||
	    strstr(name, "_text_end"))
964 965
		return 1;

966
	syme = symbol__priv(sym);
967
	syme->map = map;
968
	syme->src = NULL;
969 970 971 972 973 974

	if (!sym_filter_entry && sym_filter && !strcmp(name, sym_filter)) {
		/* schedule initial sym_filter_entry setup */
		sym_filter_entry_sched = syme;
		sym_filter = NULL;
	}
975

976 977 978 979 980 981
	for (i = 0; skip_symbols[i]; i++) {
		if (!strcmp(skip_symbols[i], name)) {
			syme->skip = 1;
			break;
		}
	}
982

983 984 985
	if (!syme->skip)
		syme->name_len = strlen(sym->name);

986 987 988
	return 0;
}

989
static void event__process_sample(const event_t *self,
990
				  struct sample_data *sample,
991 992
				  struct perf_session *session,
				  struct perf_evsel *evsel)
993
{
994 995
	u64 ip = self->ip.ip;
	struct sym_entry *syme;
996
	struct addr_location al;
997
	struct machine *machine;
998
	u8 origin = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
999

1000 1001
	++samples;

1002
	switch (origin) {
1003
	case PERF_RECORD_MISC_USER:
1004
		++us_samples;
1005 1006
		if (hide_user_symbols)
			return;
1007
		machine = perf_session__find_host_machine(session);
1008
		break;
1009
	case PERF_RECORD_MISC_KERNEL:
1010
		++kernel_samples;
1011 1012
		if (hide_kernel_symbols)
			return;
1013
		machine = perf_session__find_host_machine(session);
1014 1015 1016
		break;
	case PERF_RECORD_MISC_GUEST_KERNEL:
		++guest_kernel_samples;
1017
		machine = perf_session__find_machine(session, self->ip.pid);
1018
		break;
1019 1020 1021 1022 1023 1024 1025
	case PERF_RECORD_MISC_GUEST_USER:
		++guest_us_samples;
		/*
		 * TODO: we don't process guest user from host side
		 * except simple counting.
		 */
		return;
1026 1027 1028 1029
	default:
		return;
	}

1030
	if (!machine && perf_guest) {
1031 1032 1033 1034 1035
		pr_err("Can't find guest [%d]'s kernel information\n",
			self->ip.pid);
		return;
	}

P
Peter Zijlstra 已提交
1036
	if (self->header.misc & PERF_RECORD_MISC_EXACT_IP)
1037 1038
		exact_samples++;

1039
	if (event__preprocess_sample(self, session, &al, sample,
1040
				     symbol_filter) < 0 ||
1041
	    al.filtered)
1042
		return;
1043

1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055
	if (al.sym == NULL) {
		/*
		 * As we do lazy loading of symtabs we only will know if the
		 * specified vmlinux file is invalid when we actually have a
		 * hit in kernel space and then try to load it. So if we get
		 * here and there are _no_ symbols in the DSO backing the
		 * kernel map, bail out.
		 *
		 * We may never get here, for instance, if we use -K/
		 * --hide-kernel-symbols, even if the user specifies an
		 * invalid --vmlinux ;-)
		 */
1056
		if (al.map == machine->vmlinux_maps[MAP__FUNCTION] &&
1057 1058 1059 1060 1061 1062 1063 1064 1065
		    RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION])) {
			pr_err("The %s file can't be used\n",
			       symbol_conf.vmlinux_name);
			exit(1);
		}

		return;
	}

1066 1067 1068 1069
	/* let's see, whether we need to install initial sym_filter_entry */
	if (sym_filter_entry_sched) {
		sym_filter_entry = sym_filter_entry_sched;
		sym_filter_entry_sched = NULL;
1070 1071 1072 1073 1074 1075
		if (parse_source(sym_filter_entry) < 0) {
			struct symbol *sym = sym_entry__symbol(sym_filter_entry);

			pr_err("Can't annotate %s", sym->name);
			if (sym_filter_entry->map->dso->origin == DSO__ORIG_KERNEL) {
				pr_err(": No vmlinux file was found in the path:\n");
1076
				machine__fprintf_vmlinux_path(machine, stderr);
1077 1078 1079 1080
			} else
				pr_err(".\n");
			exit(1);
		}
1081 1082
	}

1083
	syme = symbol__priv(al.sym);
1084
	if (!syme->skip) {
1085
		syme->count[evsel->idx]++;
1086
		syme->origin = origin;
1087
		record_precise_ip(syme, evsel->idx, ip);
1088 1089 1090 1091 1092
		pthread_mutex_lock(&active_symbols_lock);
		if (list_empty(&syme->node) || !syme->node.next)
			__list_insert_active_sym(syme);
		pthread_mutex_unlock(&active_symbols_lock);
	}
1093 1094 1095
}

struct mmap_data {
1096
	void			*base;
1097
	int			mask;
1098
	unsigned int		prev;
1099 1100
};

1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113
static int perf_evsel__alloc_mmap_per_thread(struct perf_evsel *evsel,
					     int ncpus, int nthreads)
{
	evsel->priv = xyarray__new(ncpus, nthreads, sizeof(struct mmap_data));
	return evsel->priv != NULL ? 0 : -ENOMEM;
}

static void perf_evsel__free_mmap(struct perf_evsel *evsel)
{
	xyarray__delete(evsel->priv);
	evsel->priv = NULL;
}

1114 1115
static unsigned int mmap_read_head(struct mmap_data *md)
{
1116
	struct perf_event_mmap_page *pc = md->base;
1117 1118 1119 1120 1121 1122 1123 1124
	int head;

	head = pc->data_head;
	rmb();

	return head;
}

1125
static void perf_session__mmap_read_counter(struct perf_session *self,
1126 1127
					    struct perf_evsel *evsel,
					    int cpu, int thread_idx)
1128
{
1129 1130
	struct xyarray *mmap_array = evsel->priv;
	struct mmap_data *md = xyarray__entry(mmap_array, cpu, thread_idx);
1131 1132 1133
	unsigned int head = mmap_read_head(md);
	unsigned int old = md->prev;
	unsigned char *data = md->base + page_size;
1134
	struct sample_data sample;
1135 1136 1137 1138
	int diff;

	/*
	 * If we're further behind than half the buffer, there's a chance
1139
	 * the writer will bite our tail and mess up the samples under us.
1140 1141 1142 1143 1144 1145 1146
	 *
	 * If we somehow ended up ahead of the head, we got messed up.
	 *
	 * In either case, truncate and restart at head.
	 */
	diff = head - old;
	if (diff > md->mask / 2 || diff < 0) {
1147
		fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159

		/*
		 * head points to a known good entry, start there.
		 */
		old = head;
	}

	for (; old != head;) {
		event_t *event = (event_t *)&data[old & md->mask];

		event_t event_copy;

1160
		size_t size = event->header.size;
1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181

		/*
		 * Event straddles the mmap boundary -- header should always
		 * be inside due to u64 alignment of output.
		 */
		if ((old & md->mask) + size != ((old + size) & md->mask)) {
			unsigned int offset = old;
			unsigned int len = min(sizeof(*event), size), cpy;
			void *dst = &event_copy;

			do {
				cpy = min(md->mask + 1 - (offset & md->mask), len);
				memcpy(dst, &data[offset & md->mask], cpy);
				offset += cpy;
				dst += cpy;
				len -= cpy;
			} while (len);

			event = &event_copy;
		}

1182
		event__parse_sample(event, self, &sample);
1183
		if (event->header.type == PERF_RECORD_SAMPLE)
1184
			event__process_sample(event, &sample, self, evsel);
1185
		else
1186
			event__process(event, &sample, self);
1187 1188 1189 1190 1191 1192
		old += size;
	}

	md->prev = old;
}

1193
static struct pollfd *event_array;
M
Mike Galbraith 已提交
1194

1195
static void perf_session__mmap_read(struct perf_session *self)
1196
{
1197 1198
	struct perf_evsel *counter;
	int i, thread_index;
1199 1200

	for (i = 0; i < nr_cpus; i++) {
1201
		list_for_each_entry(counter, &evsel_list, node) {
1202 1203 1204 1205
			for (thread_index = 0;
				thread_index < thread_num;
				thread_index++) {
				perf_session__mmap_read_counter(self,
1206
					counter, i, thread_index);
1207
			}
1208
		}
1209 1210 1211
	}
}

1212 1213 1214
int nr_poll;
int group_fd;

1215
static void start_counter(int i, struct perf_evsel *evsel)
1216
{
1217 1218
	struct xyarray *mmap_array = evsel->priv;
	struct mmap_data *mm;
1219
	struct perf_event_attr *attr;
1220
	int cpu = -1;
1221
	int thread_index;
1222

1223
	if (target_tid == -1)
1224
		cpu = cpumap[i];
1225

1226
	attr = &evsel->attr;
1227 1228

	attr->sample_type	= PERF_SAMPLE_IP | PERF_SAMPLE_TID;
1229 1230 1231 1232 1233 1234 1235

	if (freq) {
		attr->sample_type	|= PERF_SAMPLE_PERIOD;
		attr->freq		= 1;
		attr->sample_freq	= freq;
	}

1236
	attr->inherit		= (cpu < 0) && inherit;
1237
	attr->mmap		= 1;
1238

1239
	for (thread_index = 0; thread_index < thread_num; thread_index++) {
1240
try_again:
1241
		FD(evsel, i, thread_index) = sys_perf_event_open(attr,
1242 1243
				all_tids[thread_index], cpu, group_fd, 0);

1244
		if (FD(evsel, i, thread_index) < 0) {
1245 1246 1247
			int err = errno;

			if (err == EPERM || err == EACCES)
1248 1249 1250
				die("Permission error - are you root?\n"
					"\t Consider tweaking"
					" /proc/sys/kernel/perf_event_paranoid.\n");
1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266
			/*
			 * If it's cycles then fall back to hrtimer
			 * based cpu-clock-tick sw counter, which
			 * is always available even if no PMU support:
			 */
			if (attr->type == PERF_TYPE_HARDWARE
					&& attr->config == PERF_COUNT_HW_CPU_CYCLES) {

				if (verbose)
					warning(" ... trying to fall back to cpu-clock-ticks\n");

				attr->type = PERF_TYPE_SOFTWARE;
				attr->config = PERF_COUNT_SW_CPU_CLOCK;
				goto try_again;
			}
			printf("\n");
1267
			error("sys_perf_event_open() syscall returned with %d (%s).  /bin/dmesg may provide additional information.\n",
1268
					FD(evsel, i, thread_index), strerror(err));
1269 1270 1271
			die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
			exit(-1);
		}
1272 1273
		assert(FD(evsel, i, thread_index) >= 0);
		fcntl(FD(evsel, i, thread_index), F_SETFL, O_NONBLOCK);
1274 1275

		/*
1276
		 * First counter acts as the group leader:
1277
		 */
1278
		if (group && group_fd == -1)
1279
			group_fd = FD(evsel, i, thread_index);
1280

1281
		event_array[nr_poll].fd = FD(evsel, i, thread_index);
1282 1283 1284
		event_array[nr_poll].events = POLLIN;
		nr_poll++;

1285 1286 1287 1288 1289 1290
		mm = xyarray__entry(mmap_array, i, thread_index);
		mm->prev = 0;
		mm->mask = mmap_pages*page_size - 1;
		mm->base = mmap(NULL, (mmap_pages+1)*page_size,
				PROT_READ, MAP_SHARED, FD(evsel, i, thread_index), 0);
		if (mm->base == MAP_FAILED)
1291
			die("failed to mmap with %d (%s)\n", errno, strerror(errno));
1292 1293 1294 1295 1296 1297
	}
}

static int __cmd_top(void)
{
	pthread_t thread;
1298 1299
	struct perf_evsel *counter;
	int i, ret;
1300
	/*
1301 1302
	 * FIXME: perf_session__new should allow passing a O_MMAP, so that all this
	 * mmap reading, etc is encapsulated in it. Use O_WRONLY for now.
1303
	 */
1304
	struct perf_session *session = perf_session__new(NULL, O_WRONLY, false, false, NULL);
1305 1306
	if (session == NULL)
		return -ENOMEM;
1307

1308 1309
	if (target_tid != -1)
		event__synthesize_thread(target_tid, event__process, session);
1310
	else
1311
		event__synthesize_threads(event__process, session);
1312

1313 1314
	for (i = 0; i < nr_cpus; i++) {
		group_fd = -1;
1315
		list_for_each_entry(counter, &evsel_list, node)
1316
			start_counter(i, counter);
1317 1318
	}

1319
	/* Wait for a minimal set of events before starting the snapshot */
1320
	poll(&event_array[0], nr_poll, 100);
1321

1322
	perf_session__mmap_read(session);
1323

1324
	if (pthread_create(&thread, NULL, display_thread, session)) {
1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339
		printf("Could not create display thread.\n");
		exit(-1);
	}

	if (realtime_prio) {
		struct sched_param param;

		param.sched_priority = realtime_prio;
		if (sched_setscheduler(0, SCHED_FIFO, &param)) {
			printf("Could not set realtime priority.\n");
			exit(-1);
		}
	}

	while (1) {
1340
		int hits = samples;
1341

1342
		perf_session__mmap_read(session);
1343

1344
		if (hits == samples)
1345 1346 1347 1348 1349
			ret = poll(event_array, nr_poll, 100);
	}

	return 0;
}
1350 1351 1352 1353 1354 1355 1356 1357

static const char * const top_usage[] = {
	"perf top [<options>]",
	NULL
};

static const struct option options[] = {
	OPT_CALLBACK('e', "event", NULL, "event",
1358 1359
		     "event selector. use 'perf list' to list available events",
		     parse_events),
1360 1361 1362
	OPT_INTEGER('c', "count", &default_interval,
		    "event period to sample"),
	OPT_INTEGER('p', "pid", &target_pid,
1363 1364 1365
		    "profile events on existing process id"),
	OPT_INTEGER('t', "tid", &target_tid,
		    "profile events on existing thread id"),
1366 1367
	OPT_BOOLEAN('a', "all-cpus", &system_wide,
			    "system-wide collection from all CPUs"),
1368 1369
	OPT_STRING('C', "cpu", &cpu_list, "cpu",
		    "list of cpus to monitor"),
1370 1371
	OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
		   "file", "vmlinux pathname"),
1372 1373
	OPT_BOOLEAN('K', "hide_kernel_symbols", &hide_kernel_symbols,
		    "hide kernel symbols"),
1374
	OPT_UINTEGER('m', "mmap-pages", &mmap_pages, "number of mmap data pages"),
1375 1376
	OPT_INTEGER('r', "realtime", &realtime_prio,
		    "collect data with this RT SCHED_FIFO priority"),
M
Mike Galbraith 已提交
1377
	OPT_INTEGER('d', "delay", &delay_secs,
1378 1379 1380
		    "number of seconds to delay between refreshes"),
	OPT_BOOLEAN('D', "dump-symtab", &dump_symtab,
			    "dump the symbol table used for profiling"),
1381
	OPT_INTEGER('f', "count-filter", &count_filter,
1382 1383 1384
		    "only display functions with more events than this"),
	OPT_BOOLEAN('g', "group", &group,
			    "put the counters into a counter group"),
1385 1386
	OPT_BOOLEAN('i', "inherit", &inherit,
		    "child tasks inherit counters"),
1387
	OPT_STRING('s', "sym-annotate", &sym_filter, "symbol name",
1388
		    "symbol to annotate"),
A
Anton Blanchard 已提交
1389
	OPT_BOOLEAN('z', "zero", &zero,
1390
		    "zero history across updates"),
1391
	OPT_INTEGER('F', "freq", &freq,
1392
		    "profile at this frequency"),
1393 1394
	OPT_INTEGER('E', "entries", &print_entries,
		    "display this many functions"),
1395 1396
	OPT_BOOLEAN('U', "hide_user_symbols", &hide_user_symbols,
		    "hide user symbols"),
1397
	OPT_INCR('v', "verbose", &verbose,
1398
		    "be more verbose (show counter open errors, etc)"),
1399 1400 1401
	OPT_END()
};

1402
int cmd_top(int argc, const char **argv, const char *prefix __used)
1403
{
1404 1405
	struct perf_evsel *pos;
	int status = -ENOMEM;
1406 1407 1408 1409 1410 1411 1412

	page_size = sysconf(_SC_PAGE_SIZE);

	argc = parse_options(argc, argv, options, top_usage, 0);
	if (argc)
		usage_with_options(top_usage, options);

1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434
	if (target_pid != -1) {
		target_tid = target_pid;
		thread_num = find_all_tid(target_pid, &all_tids);
		if (thread_num <= 0) {
			fprintf(stderr, "Can't find all threads of pid %d\n",
				target_pid);
			usage_with_options(top_usage, options);
		}
	} else {
		all_tids=malloc(sizeof(pid_t));
		if (!all_tids)
			return -ENOMEM;

		all_tids[0] = target_tid;
		thread_num = 1;
	}

	event_array = malloc(
		sizeof(struct pollfd)*MAX_NR_CPUS*MAX_COUNTERS*thread_num);
	if (!event_array)
		return -ENOMEM;

1435
	/* CPU and PID are mutually exclusive */
1436
	if (target_tid > 0 && cpu_list) {
1437 1438
		printf("WARNING: PID switch overriding CPU\n");
		sleep(1);
1439
		cpu_list = NULL;
1440 1441
	}

1442 1443 1444 1445
	if (!nr_counters && perf_evsel_list__create_default() < 0) {
		pr_err("Not enough memory for event selector list\n");
		return -ENOMEM;
	}
1446

1447 1448 1449
	if (delay_secs < 1)
		delay_secs = 1;

1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461
	/*
	 * User specified count overrides default frequency.
	 */
	if (default_interval)
		freq = 0;
	else if (freq) {
		default_interval = freq;
	} else {
		fprintf(stderr, "frequency and count are zero, aborting\n");
		exit(EXIT_FAILURE);
	}

1462
	if (target_tid != -1)
1463
		nr_cpus = 1;
1464
	else
1465 1466 1467 1468
		nr_cpus = read_cpu_map(cpu_list);

	if (nr_cpus < 1)
		usage_with_options(top_usage, options);
1469

1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489
	list_for_each_entry(pos, &evsel_list, node) {
		if (perf_evsel__alloc_mmap_per_thread(pos, nr_cpus, thread_num) < 0 ||
		    perf_evsel__alloc_fd(pos, nr_cpus, thread_num) < 0)
			goto out_free_fd;
		/*
		 * Fill in the ones not specifically initialized via -c:
		 */
		if (pos->attr.sample_period)
			continue;

		pos->attr.sample_period = default_interval;
	}

	symbol_conf.priv_size = (sizeof(struct sym_entry) +
				 (nr_counters + 1) * sizeof(unsigned long));

	symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
	if (symbol__init() < 0)
		return -1;

1490
	get_term_dimensions(&winsize);
1491
	if (print_entries == 0) {
1492
		update_print_entries(&winsize);
1493 1494 1495
		signal(SIGWINCH, sig_winch_handler);
	}

1496 1497
	status = __cmd_top();
out_free_fd:
1498
	list_for_each_entry(pos, &evsel_list, node)
1499 1500 1501
		perf_evsel__free_mmap(pos);

	return status;
1502
}