builtin-top.c 35.0 KB
Newer Older
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
 * builtin-top.c
 *
 * Builtin top command: Display a continuously updated profile of
 * any workload, CPU or specific PID.
 *
 * Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
 *
 * Improvements and fixes by:
 *
 *   Arjan van de Ven <arjan@linux.intel.com>
 *   Yanmin Zhang <yanmin.zhang@intel.com>
 *   Wu Fengguang <fengguang.wu@intel.com>
 *   Mike Galbraith <efault@gmx.de>
 *   Paul Mackerras <paulus@samba.org>
 *
 * Released under the GPL v2. (and only v2, not any later version)
18
 */
19
#include "builtin.h"
20

21
#include "perf.h"
22

23
#include "util/color.h"
24 25
#include "util/session.h"
#include "util/symbol.h"
26
#include "util/thread.h"
27
#include "util/util.h"
28
#include <linux/rbtree.h>
29 30
#include "util/parse-options.h"
#include "util/parse-events.h"
31
#include "util/cpumap.h"
32

33 34
#include "util/debug.h"

35 36
#include <assert.h>
#include <fcntl.h>
37

38
#include <stdio.h>
39 40
#include <termios.h>
#include <unistd.h>
41

42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57
#include <errno.h>
#include <time.h>
#include <sched.h>
#include <pthread.h>

#include <sys/syscall.h>
#include <sys/ioctl.h>
#include <sys/poll.h>
#include <sys/prctl.h>
#include <sys/wait.h>
#include <sys/uio.h>
#include <sys/mman.h>

#include <linux/unistd.h>
#include <linux/types.h>

58
static int			*fd[MAX_NR_CPUS][MAX_COUNTERS];
59

60
static bool			system_wide			=  false;
61

62
static int			default_interval		=      0;
63

64
static int			count_filter			=      5;
65
static int			print_entries;
66

67
static int			target_pid			=     -1;
68 69 70
static int			target_tid			=     -1;
static pid_t			*all_tids			=      NULL;
static int			thread_num			=      0;
71
static bool			inherit				=  false;
72 73
static int			profile_cpu			=     -1;
static int			nr_cpus				=      0;
74
static int			realtime_prio			=      0;
75
static bool			group				=  false;
76
static unsigned int		page_size;
77 78
static unsigned int		mmap_pages			=     16;
static int			freq				=   1000; /* 1 KHz */
79

80
static int			delay_secs			=      2;
81 82
static bool			zero                            =  false;
static bool			dump_symtab                     =  false;
83

84 85
static bool			hide_kernel_symbols		=  false;
static bool			hide_user_symbols		=  false;
86
static struct winsize		winsize;
87

88 89 90 91 92 93 94 95 96 97 98
/*
 * Source
 */

struct source_line {
	u64			eip;
	unsigned long		count[MAX_COUNTERS];
	char			*line;
	struct source_line	*next;
};

99
static const char		*sym_filter			=   NULL;
100
struct sym_entry		*sym_filter_entry		=   NULL;
101
struct sym_entry		*sym_filter_entry_sched		=   NULL;
102 103 104
static int			sym_pcnt_filter			=      5;
static int			sym_counter			=      0;
static int			display_weighted		=     -1;
105
static const char		*cpu_list;
106

107 108 109 110
/*
 * Symbols
 */

111 112 113 114 115 116 117
struct sym_entry_source {
	struct source_line	*source;
	struct source_line	*lines;
	struct source_line	**lines_tail;
	pthread_mutex_t		lock;
};

118
struct sym_entry {
119 120
	struct rb_node		rb_node;
	struct list_head	node;
121 122
	unsigned long		snap_count;
	double			weight;
123
	int			skip;
124
	u16			name_len;
125
	u8			origin;
126
	struct map		*map;
127
	struct sym_entry_source	*src;
128
	unsigned long		count[0];
129 130
};

131 132 133 134
/*
 * Source functions
 */

135 136
static inline struct symbol *sym_entry__symbol(struct sym_entry *self)
{
137
       return ((void *)self) + symbol_conf.priv_size;
138 139
}

140
void get_term_dimensions(struct winsize *ws)
141
{
142 143 144 145 146 147 148 149 150 151
	char *s = getenv("LINES");

	if (s != NULL) {
		ws->ws_row = atoi(s);
		s = getenv("COLUMNS");
		if (s != NULL) {
			ws->ws_col = atoi(s);
			if (ws->ws_row && ws->ws_col)
				return;
		}
152
	}
153 154 155 156
#ifdef TIOCGWINSZ
	if (ioctl(1, TIOCGWINSZ, ws) == 0 &&
	    ws->ws_row && ws->ws_col)
		return;
157
#endif
158 159
	ws->ws_row = 25;
	ws->ws_col = 80;
160 161
}

162
static void update_print_entries(struct winsize *ws)
163
{
164 165
	print_entries = ws->ws_row;

166 167 168 169 170 171
	if (print_entries > 9)
		print_entries -= 9;
}

static void sig_winch_handler(int sig __used)
{
172 173
	get_term_dimensions(&winsize);
	update_print_entries(&winsize);
174 175
}

176
static int parse_source(struct sym_entry *syme)
177 178
{
	struct symbol *sym;
179
	struct sym_entry_source *source;
180
	struct map *map;
181
	FILE *file;
182
	char command[PATH_MAX*2];
183 184
	const char *path;
	u64 len;
185 186

	if (!syme)
187 188 189 190 191 192 193 194 195 196
		return -1;

	sym = sym_entry__symbol(syme);
	map = syme->map;

	/*
	 * We can't annotate with just /proc/kallsyms
	 */
	if (map->dso->origin == DSO__ORIG_KERNEL)
		return -1;
197

198
	if (syme->src == NULL) {
199
		syme->src = zalloc(sizeof(*source));
200
		if (syme->src == NULL)
201
			return -1;
202 203 204 205 206 207 208
		pthread_mutex_init(&syme->src->lock, NULL);
	}

	source = syme->src;

	if (source->lines) {
		pthread_mutex_lock(&source->lock);
209 210
		goto out_assign;
	}
211
	path = map->dso->long_name;
212 213 214

	len = sym->end - sym->start;

215
	sprintf(command,
216 217 218
		"objdump --start-address=%#0*Lx --stop-address=%#0*Lx -dS %s",
		BITS_PER_LONG / 4, map__rip_2objdump(map, sym->start),
		BITS_PER_LONG / 4, map__rip_2objdump(map, sym->end), path);
219 220 221

	file = popen(command, "r");
	if (!file)
222
		return -1;
223

224 225
	pthread_mutex_lock(&source->lock);
	source->lines_tail = &source->lines;
226 227 228
	while (!feof(file)) {
		struct source_line *src;
		size_t dummy = 0;
229
		char *c, *sep;
230 231 232 233 234 235 236 237 238 239 240 241 242 243 244

		src = malloc(sizeof(struct source_line));
		assert(src != NULL);
		memset(src, 0, sizeof(struct source_line));

		if (getline(&src->line, &dummy, file) < 0)
			break;
		if (!src->line)
			break;

		c = strchr(src->line, '\n');
		if (c)
			*c = 0;

		src->next = NULL;
245 246
		*source->lines_tail = src;
		source->lines_tail = &src->next;
247

248 249 250 251 252
		src->eip = strtoull(src->line, &sep, 16);
		if (*sep == ':')
			src->eip = map__objdump_2ip(map, src->eip);
		else /* this line has no ip info (e.g. source line) */
			src->eip = 0;
253 254 255 256
	}
	pclose(file);
out_assign:
	sym_filter_entry = syme;
257
	pthread_mutex_unlock(&source->lock);
258
	return 0;
259 260 261 262 263 264 265
}

static void __zero_source_counters(struct sym_entry *syme)
{
	int i;
	struct source_line *line;

266
	line = syme->src->lines;
267 268 269 270 271 272 273 274 275 276 277 278 279 280
	while (line) {
		for (i = 0; i < nr_counters; i++)
			line->count[i] = 0;
		line = line->next;
	}
}

static void record_precise_ip(struct sym_entry *syme, int counter, u64 ip)
{
	struct source_line *line;

	if (syme != sym_filter_entry)
		return;

281
	if (pthread_mutex_trylock(&syme->src->lock))
282 283
		return;

284
	if (syme->src == NULL || syme->src->source == NULL)
285 286
		goto out_unlock;

287
	for (line = syme->src->lines; line; line = line->next) {
288 289 290
		/* skip lines without IP info */
		if (line->eip == 0)
			continue;
291 292 293 294 295 296 297 298
		if (line->eip == ip) {
			line->count[counter]++;
			break;
		}
		if (line->eip > ip)
			break;
	}
out_unlock:
299
	pthread_mutex_unlock(&syme->src->lock);
300 301
}

302 303
#define PATTERN_LEN		(BITS_PER_LONG / 4 + 2)

304 305
static void lookup_sym_source(struct sym_entry *syme)
{
306
	struct symbol *symbol = sym_entry__symbol(syme);
307
	struct source_line *line;
308
	char pattern[PATTERN_LEN + 1];
309

310 311
	sprintf(pattern, "%0*Lx <", BITS_PER_LONG / 4,
		map__rip_2objdump(syme->map, symbol->start));
312

313 314
	pthread_mutex_lock(&syme->src->lock);
	for (line = syme->src->lines; line; line = line->next) {
315
		if (memcmp(line->line, pattern, PATTERN_LEN) == 0) {
316
			syme->src->source = line;
317 318 319
			break;
		}
	}
320
	pthread_mutex_unlock(&syme->src->lock);
321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349
}

static void show_lines(struct source_line *queue, int count, int total)
{
	int i;
	struct source_line *line;

	line = queue;
	for (i = 0; i < count; i++) {
		float pcnt = 100.0*(float)line->count[sym_counter]/(float)total;

		printf("%8li %4.1f%%\t%s\n", line->count[sym_counter], pcnt, line->line);
		line = line->next;
	}
}

#define TRACE_COUNT     3

static void show_details(struct sym_entry *syme)
{
	struct symbol *symbol;
	struct source_line *line;
	struct source_line *line_queue = NULL;
	int displayed = 0;
	int line_queue_count = 0, total = 0, more = 0;

	if (!syme)
		return;

350
	if (!syme->src->source)
351 352
		lookup_sym_source(syme);

353
	if (!syme->src->source)
354 355
		return;

356
	symbol = sym_entry__symbol(syme);
357 358 359
	printf("Showing %s for %s\n", event_name(sym_counter), symbol->name);
	printf("  Events  Pcnt (>=%d%%)\n", sym_pcnt_filter);

360 361
	pthread_mutex_lock(&syme->src->lock);
	line = syme->src->source;
362 363 364 365 366
	while (line) {
		total += line->count[sym_counter];
		line = line->next;
	}

367
	line = syme->src->source;
368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391
	while (line) {
		float pcnt = 0.0;

		if (!line_queue_count)
			line_queue = line;
		line_queue_count++;

		if (line->count[sym_counter])
			pcnt = 100.0 * line->count[sym_counter] / (float)total;
		if (pcnt >= (float)sym_pcnt_filter) {
			if (displayed <= print_entries)
				show_lines(line_queue, line_queue_count, total);
			else more++;
			displayed += line_queue_count;
			line_queue_count = 0;
			line_queue = NULL;
		} else if (line_queue_count > TRACE_COUNT) {
			line_queue = line_queue->next;
			line_queue_count--;
		}

		line->count[sym_counter] = zero ? 0 : line->count[sym_counter] * 7 / 8;
		line = line->next;
	}
392
	pthread_mutex_unlock(&syme->src->lock);
393 394 395
	if (more)
		printf("%d lines not displayed, maybe increase display entries [e]\n", more);
}
396

397
/*
398
 * Symbols will be added here in event__process_sample and will get out
399 400 401
 * after decayed.
 */
static LIST_HEAD(active_symbols);
402
static pthread_mutex_t active_symbols_lock = PTHREAD_MUTEX_INITIALIZER;
403 404 405 406 407 408

/*
 * Ordering weight: count-1 * count-2 * ... / count-n
 */
static double sym_weight(const struct sym_entry *sym)
{
409
	double weight = sym->snap_count;
410 411
	int counter;

412 413 414
	if (!display_weighted)
		return weight;

415 416 417 418 419 420 421 422
	for (counter = 1; counter < nr_counters-1; counter++)
		weight *= sym->count[counter];

	weight /= (sym->count[counter] + 1);

	return weight;
}

423
static long			samples;
424
static long			kernel_samples, us_samples;
425
static long			exact_samples;
426
static long			guest_us_samples, guest_kernel_samples;
427 428
static const char		CONSOLE_CLEAR[] = "";

429
static void __list_insert_active_sym(struct sym_entry *syme)
430 431 432 433
{
	list_add(&syme->node, &active_symbols);
}

434 435 436 437 438 439 440
static void list_remove_active_sym(struct sym_entry *syme)
{
	pthread_mutex_lock(&active_symbols_lock);
	list_del_init(&syme->node);
	pthread_mutex_unlock(&active_symbols_lock);
}

441 442 443 444 445 446 447 448 449 450
static void rb_insert_active_sym(struct rb_root *tree, struct sym_entry *se)
{
	struct rb_node **p = &tree->rb_node;
	struct rb_node *parent = NULL;
	struct sym_entry *iter;

	while (*p != NULL) {
		parent = *p;
		iter = rb_entry(parent, struct sym_entry, rb_node);

451
		if (se->weight > iter->weight)
452 453 454 455 456 457 458 459
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
	}

	rb_link_node(&se->rb_node, parent, p);
	rb_insert_color(&se->rb_node, tree);
}
460 461 462

static void print_sym_table(void)
{
463
	int printed = 0, j;
464
	int counter, snap = !display_weighted ? sym_counter : 0;
465
	float samples_per_sec = samples/delay_secs;
466 467 468 469
	float ksamples_per_sec = kernel_samples/delay_secs;
	float us_samples_per_sec = (us_samples)/delay_secs;
	float guest_kernel_samples_per_sec = (guest_kernel_samples)/delay_secs;
	float guest_us_samples_per_sec = (guest_us_samples)/delay_secs;
470
	float esamples_percent = (100.0*exact_samples)/samples;
471
	float sum_ksamples = 0.0;
472 473 474
	struct sym_entry *syme, *n;
	struct rb_root tmp = RB_ROOT;
	struct rb_node *nd;
475
	int sym_width = 0, dso_width = 0, dso_short_width = 0;
476
	const int win_width = winsize.ws_col - 1;
477

478 479
	samples = us_samples = kernel_samples = exact_samples = 0;
	guest_kernel_samples = guest_us_samples = 0;
480

481
	/* Sort the active symbols */
482 483 484 485 486
	pthread_mutex_lock(&active_symbols_lock);
	syme = list_entry(active_symbols.next, struct sym_entry, node);
	pthread_mutex_unlock(&active_symbols_lock);

	list_for_each_entry_safe_from(syme, n, &active_symbols, node) {
487
		syme->snap_count = syme->count[snap];
488
		if (syme->snap_count != 0) {
489

490 491 492 493 494 495 496
			if ((hide_user_symbols &&
			     syme->origin == PERF_RECORD_MISC_USER) ||
			    (hide_kernel_symbols &&
			     syme->origin == PERF_RECORD_MISC_KERNEL)) {
				list_remove_active_sym(syme);
				continue;
			}
497
			syme->weight = sym_weight(syme);
498
			rb_insert_active_sym(&tmp, syme);
499
			sum_ksamples += syme->snap_count;
500 501

			for (j = 0; j < nr_counters; j++)
502 503
				syme->count[j] = zero ? 0 : syme->count[j] * 7 / 8;
		} else
504
			list_remove_active_sym(syme);
505 506
	}

507
	puts(CONSOLE_CLEAR);
508

509
	printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533
	if (!perf_guest) {
		printf("   PerfTop:%8.0f irqs/sec  kernel:%4.1f%%"
			"  exact: %4.1f%% [",
			samples_per_sec,
			100.0 - (100.0 * ((samples_per_sec - ksamples_per_sec) /
					 samples_per_sec)),
			esamples_percent);
	} else {
		printf("   PerfTop:%8.0f irqs/sec  kernel:%4.1f%% us:%4.1f%%"
			" guest kernel:%4.1f%% guest us:%4.1f%%"
			" exact: %4.1f%% [",
			samples_per_sec,
			100.0 - (100.0 * ((samples_per_sec-ksamples_per_sec) /
					  samples_per_sec)),
			100.0 - (100.0 * ((samples_per_sec-us_samples_per_sec) /
					  samples_per_sec)),
			100.0 - (100.0 * ((samples_per_sec -
						guest_kernel_samples_per_sec) /
					  samples_per_sec)),
			100.0 - (100.0 * ((samples_per_sec -
					   guest_us_samples_per_sec) /
					  samples_per_sec)),
			esamples_percent);
	}
534

535
	if (nr_counters == 1 || !display_weighted) {
536
		printf("%Ld", (u64)attrs[0].sample_period);
I
Ingo Molnar 已提交
537 538 539 540 541
		if (freq)
			printf("Hz ");
		else
			printf(" ");
	}
542

543 544 545
	if (!display_weighted)
		printf("%s", event_name(sym_counter));
	else for (counter = 0; counter < nr_counters; counter++) {
546 547 548 549 550 551 552 553
		if (counter)
			printf("/");

		printf("%s", event_name(counter));
	}

	printf( "], ");

554 555
	if (target_pid != -1)
		printf(" (target_pid: %d", target_pid);
556 557
	else if (target_tid != -1)
		printf(" (target_tid: %d", target_tid);
558 559 560 561 562 563
	else
		printf(" (all");

	if (profile_cpu != -1)
		printf(", cpu: %d)\n", profile_cpu);
	else {
564
		if (target_tid != -1)
565 566 567 568 569
			printf(")\n");
		else
			printf(", %d CPUs)\n", nr_cpus);
	}

570
	printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
571

572 573 574 575 576
	if (sym_filter_entry) {
		show_details(sym_filter_entry);
		return;
	}

577 578 579 580 581 582 583 584 585
	/*
	 * Find the longest symbol name that will be displayed
	 */
	for (nd = rb_first(&tmp); nd; nd = rb_next(nd)) {
		syme = rb_entry(nd, struct sym_entry, rb_node);
		if (++printed > print_entries ||
		    (int)syme->snap_count < count_filter)
			continue;

586 587 588
		if (syme->map->dso->long_name_len > dso_width)
			dso_width = syme->map->dso->long_name_len;

589 590 591
		if (syme->map->dso->short_name_len > dso_short_width)
			dso_short_width = syme->map->dso->short_name_len;

592 593 594 595 596 597
		if (syme->name_len > sym_width)
			sym_width = syme->name_len;
	}

	printed = 0;

598 599 600 601 602
	if (sym_width + dso_width > winsize.ws_col - 29) {
		dso_width = dso_short_width;
		if (sym_width + dso_width > winsize.ws_col - 29)
			sym_width = winsize.ws_col - dso_width - 29;
	}
603
	putchar('\n');
604
	if (nr_counters == 1)
605
		printf("             samples  pcnt");
606
	else
607
		printf("   weight    samples  pcnt");
608

609 610
	if (verbose)
		printf("         RIP       ");
611
	printf(" %-*.*s DSO\n", sym_width, sym_width, "function");
612
	printf("   %s    _______ _____",
613 614
	       nr_counters == 1 ? "      " : "______");
	if (verbose)
615
		printf(" ________________");
616
	printf(" %-*.*s", sym_width, sym_width, graph_line);
617
	printf(" %-*.*s", dso_width, dso_width, graph_line);
618
	puts("\n");
619

620
	for (nd = rb_first(&tmp); nd; nd = rb_next(nd)) {
621
		struct symbol *sym;
622
		double pcnt;
623

624
		syme = rb_entry(nd, struct sym_entry, rb_node);
625
		sym = sym_entry__symbol(syme);
626
		if (++printed > print_entries || (int)syme->snap_count < count_filter)
627
			continue;
628

629 630
		pcnt = 100.0 - (100.0 * ((sum_ksamples - syme->snap_count) /
					 sum_ksamples));
631

632
		if (nr_counters == 1 || !display_weighted)
633
			printf("%20.2f ", syme->weight);
634
		else
635
			printf("%9.1f %10ld ", syme->weight, syme->snap_count);
636

637
		percent_color_fprintf(stdout, "%4.1f%%", pcnt);
638
		if (verbose)
639
			printf(" %016llx", sym->start);
640
		printf(" %-*.*s", sym_width, sym_width, sym->name);
641 642 643 644
		printf(" %-*.*s\n", dso_width, dso_width,
		       dso_width >= syme->map->dso->long_name_len ?
					syme->map->dso->long_name :
					syme->map->dso->short_name);
645 646 647
	}
}

648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690
static void prompt_integer(int *target, const char *msg)
{
	char *buf = malloc(0), *p;
	size_t dummy = 0;
	int tmp;

	fprintf(stdout, "\n%s: ", msg);
	if (getline(&buf, &dummy, stdin) < 0)
		return;

	p = strchr(buf, '\n');
	if (p)
		*p = 0;

	p = buf;
	while(*p) {
		if (!isdigit(*p))
			goto out_free;
		p++;
	}
	tmp = strtoul(buf, NULL, 10);
	*target = tmp;
out_free:
	free(buf);
}

static void prompt_percent(int *target, const char *msg)
{
	int tmp = 0;

	prompt_integer(&tmp, msg);
	if (tmp >= 0 && tmp <= 100)
		*target = tmp;
}

static void prompt_symbol(struct sym_entry **target, const char *msg)
{
	char *buf = malloc(0), *p;
	struct sym_entry *syme = *target, *n, *found = NULL;
	size_t dummy = 0;

	/* zero counters of active symbol */
	if (syme) {
691
		pthread_mutex_lock(&syme->src->lock);
692 693
		__zero_source_counters(syme);
		*target = NULL;
694
		pthread_mutex_unlock(&syme->src->lock);
695 696 697 698 699 700 701 702 703 704 705 706 707 708 709
	}

	fprintf(stdout, "\n%s: ", msg);
	if (getline(&buf, &dummy, stdin) < 0)
		goto out_free;

	p = strchr(buf, '\n');
	if (p)
		*p = 0;

	pthread_mutex_lock(&active_symbols_lock);
	syme = list_entry(active_symbols.next, struct sym_entry, node);
	pthread_mutex_unlock(&active_symbols_lock);

	list_for_each_entry_safe_from(syme, n, &active_symbols, node) {
710
		struct symbol *sym = sym_entry__symbol(syme);
711 712 713 714 715 716 717 718

		if (!strcmp(buf, sym->name)) {
			found = syme;
			break;
		}
	}

	if (!found) {
719
		fprintf(stderr, "Sorry, %s is not active.\n", buf);
720 721 722 723 724 725 726 727 728
		sleep(1);
		return;
	} else
		parse_source(found);

out_free:
	free(buf);
}

729
static void print_mapped_keys(void)
730
{
731 732 733
	char *name = NULL;

	if (sym_filter_entry) {
734
		struct symbol *sym = sym_entry__symbol(sym_filter_entry);
735 736 737 738 739 740 741 742 743 744 745 746
		name = sym->name;
	}

	fprintf(stdout, "\nMapped keys:\n");
	fprintf(stdout, "\t[d]     display refresh delay.             \t(%d)\n", delay_secs);
	fprintf(stdout, "\t[e]     display entries (lines).           \t(%d)\n", print_entries);

	if (nr_counters > 1)
		fprintf(stdout, "\t[E]     active event counter.              \t(%s)\n", event_name(sym_counter));

	fprintf(stdout, "\t[f]     profile display filter (count).    \t(%d)\n", count_filter);

747 748 749
	fprintf(stdout, "\t[F]     annotate display filter (percent). \t(%d%%)\n", sym_pcnt_filter);
	fprintf(stdout, "\t[s]     annotate symbol.                   \t(%s)\n", name?: "NULL");
	fprintf(stdout, "\t[S]     stop annotation.\n");
750 751 752 753

	if (nr_counters > 1)
		fprintf(stdout, "\t[w]     toggle display weighted/count[E]r. \t(%d)\n", display_weighted ? 1 : 0);

754
	fprintf(stdout,
755
		"\t[K]     hide kernel_symbols symbols.     \t(%s)\n",
756 757 758 759
		hide_kernel_symbols ? "yes" : "no");
	fprintf(stdout,
		"\t[U]     hide user symbols.               \t(%s)\n",
		hide_user_symbols ? "yes" : "no");
760
	fprintf(stdout, "\t[z]     toggle sample zeroing.             \t(%d)\n", zero ? 1 : 0);
761 762 763 764 765 766 767 768 769 770 771 772
	fprintf(stdout, "\t[qQ]    quit.\n");
}

static int key_mapped(int c)
{
	switch (c) {
		case 'd':
		case 'e':
		case 'f':
		case 'z':
		case 'q':
		case 'Q':
773 774
		case 'K':
		case 'U':
775 776 777
		case 'F':
		case 's':
		case 'S':
778 779 780 781
			return 1;
		case 'E':
		case 'w':
			return nr_counters > 1 ? 1 : 0;
782 783
		default:
			break;
784 785 786
	}

	return 0;
787 788
}

789
static void handle_keypress(struct perf_session *session, int c)
790
{
791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813
	if (!key_mapped(c)) {
		struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
		struct termios tc, save;

		print_mapped_keys();
		fprintf(stdout, "\nEnter selection, or unmapped key to continue: ");
		fflush(stdout);

		tcgetattr(0, &save);
		tc = save;
		tc.c_lflag &= ~(ICANON | ECHO);
		tc.c_cc[VMIN] = 0;
		tc.c_cc[VTIME] = 0;
		tcsetattr(0, TCSANOW, &tc);

		poll(&stdin_poll, 1, -1);
		c = getc(stdin);

		tcsetattr(0, TCSAFLUSH, &save);
		if (!key_mapped(c))
			return;
	}

814 815 816
	switch (c) {
		case 'd':
			prompt_integer(&delay_secs, "Enter display delay");
817 818
			if (delay_secs < 1)
				delay_secs = 1;
819 820 821
			break;
		case 'e':
			prompt_integer(&print_entries, "Enter display entries (lines)");
822
			if (print_entries == 0) {
823
				sig_winch_handler(SIGWINCH);
824 825 826
				signal(SIGWINCH, sig_winch_handler);
			} else
				signal(SIGWINCH, SIG_DFL);
827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850
			break;
		case 'E':
			if (nr_counters > 1) {
				int i;

				fprintf(stderr, "\nAvailable events:");
				for (i = 0; i < nr_counters; i++)
					fprintf(stderr, "\n\t%d %s", i, event_name(i));

				prompt_integer(&sym_counter, "Enter details event counter");

				if (sym_counter >= nr_counters) {
					fprintf(stderr, "Sorry, no such event, using %s.\n", event_name(0));
					sym_counter = 0;
					sleep(1);
				}
			} else sym_counter = 0;
			break;
		case 'f':
			prompt_integer(&count_filter, "Enter display event count filter");
			break;
		case 'F':
			prompt_percent(&sym_pcnt_filter, "Enter details display event filter (percent)");
			break;
851 852 853
		case 'K':
			hide_kernel_symbols = !hide_kernel_symbols;
			break;
854 855 856
		case 'q':
		case 'Q':
			printf("exiting.\n");
857
			if (dump_symtab)
858
				perf_session__fprintf_dsos(session, stderr);
859 860 861 862 863 864 865 866 867 868
			exit(0);
		case 's':
			prompt_symbol(&sym_filter_entry, "Enter details symbol");
			break;
		case 'S':
			if (!sym_filter_entry)
				break;
			else {
				struct sym_entry *syme = sym_filter_entry;

869
				pthread_mutex_lock(&syme->src->lock);
870 871
				sym_filter_entry = NULL;
				__zero_source_counters(syme);
872
				pthread_mutex_unlock(&syme->src->lock);
873 874
			}
			break;
875 876 877
		case 'U':
			hide_user_symbols = !hide_user_symbols;
			break;
878 879 880
		case 'w':
			display_weighted = ~display_weighted;
			break;
881
		case 'z':
882
			zero = !zero;
883
			break;
884 885
		default:
			break;
886 887 888
	}
}

889
static void *display_thread(void *arg __used)
890
{
891
	struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
892 893
	struct termios tc, save;
	int delay_msecs, c;
894
	struct perf_session *session = (struct perf_session *) arg;
895 896 897 898 899 900

	tcgetattr(0, &save);
	tc = save;
	tc.c_lflag &= ~(ICANON | ECHO);
	tc.c_cc[VMIN] = 0;
	tc.c_cc[VTIME] = 0;
901

902 903 904 905 906
repeat:
	delay_msecs = delay_secs * 1000;
	tcsetattr(0, TCSANOW, &tc);
	/* trash return*/
	getc(stdin);
907

908
	do {
909
		print_sym_table();
910 911
	} while (!poll(&stdin_poll, 1, delay_msecs) == 1);

912 913 914
	c = getc(stdin);
	tcsetattr(0, TCSAFLUSH, &save);

915
	handle_keypress(session, c);
916
	goto repeat;
917 918 919 920

	return NULL;
}

921
/* Tag samples to be skipped. */
922
static const char *skip_symbols[] = {
923 924 925 926 927
	"default_idle",
	"cpu_idle",
	"enter_idle",
	"exit_idle",
	"mwait_idle",
928
	"mwait_idle_with_hints",
929
	"poll_idle",
930 931
	"ppc64_runlatch_off",
	"pseries_dedicated_idle_sleep",
932 933 934
	NULL
};

935
static int symbol_filter(struct map *map, struct symbol *sym)
936
{
937 938
	struct sym_entry *syme;
	const char *name = sym->name;
939
	int i;
940

941 942 943 944 945 946 947
	/*
	 * ppc64 uses function descriptors and appends a '.' to the
	 * start of every instruction address. Remove it.
	 */
	if (name[0] == '.')
		name++;

948 949 950 951 952 953 954
	if (!strcmp(name, "_text") ||
	    !strcmp(name, "_etext") ||
	    !strcmp(name, "_sinittext") ||
	    !strncmp("init_module", name, 11) ||
	    !strncmp("cleanup_module", name, 14) ||
	    strstr(name, "_text_start") ||
	    strstr(name, "_text_end"))
955 956
		return 1;

957
	syme = symbol__priv(sym);
958
	syme->map = map;
959
	syme->src = NULL;
960 961 962 963 964 965

	if (!sym_filter_entry && sym_filter && !strcmp(name, sym_filter)) {
		/* schedule initial sym_filter_entry setup */
		sym_filter_entry_sched = syme;
		sym_filter = NULL;
	}
966

967 968 969 970 971 972
	for (i = 0; skip_symbols[i]; i++) {
		if (!strcmp(skip_symbols[i], name)) {
			syme->skip = 1;
			break;
		}
	}
973

974 975 976
	if (!syme->skip)
		syme->name_len = strlen(sym->name);

977 978 979
	return 0;
}

980 981
static void event__process_sample(const event_t *self,
				 struct perf_session *session, int counter)
982
{
983 984
	u64 ip = self->ip.ip;
	struct sym_entry *syme;
985
	struct addr_location al;
986
	struct sample_data data;
987
	struct machine *machine;
988
	u8 origin = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
989

990 991
	++samples;

992
	switch (origin) {
993
	case PERF_RECORD_MISC_USER:
994
		++us_samples;
995 996
		if (hide_user_symbols)
			return;
997
		machine = perf_session__find_host_machine(session);
998
		break;
999
	case PERF_RECORD_MISC_KERNEL:
1000
		++kernel_samples;
1001 1002
		if (hide_kernel_symbols)
			return;
1003
		machine = perf_session__find_host_machine(session);
1004 1005 1006
		break;
	case PERF_RECORD_MISC_GUEST_KERNEL:
		++guest_kernel_samples;
1007
		machine = perf_session__find_machine(session, self->ip.pid);
1008
		break;
1009 1010 1011 1012 1013 1014 1015
	case PERF_RECORD_MISC_GUEST_USER:
		++guest_us_samples;
		/*
		 * TODO: we don't process guest user from host side
		 * except simple counting.
		 */
		return;
1016 1017 1018 1019
	default:
		return;
	}

1020
	if (!machine && perf_guest) {
1021 1022 1023 1024 1025
		pr_err("Can't find guest [%d]'s kernel information\n",
			self->ip.pid);
		return;
	}

P
Peter Zijlstra 已提交
1026
	if (self->header.misc & PERF_RECORD_MISC_EXACT_IP)
1027 1028
		exact_samples++;

1029 1030
	if (event__preprocess_sample(self, session, &al, &data,
				     symbol_filter) < 0 ||
1031
	    al.filtered)
1032
		return;
1033

1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045
	if (al.sym == NULL) {
		/*
		 * As we do lazy loading of symtabs we only will know if the
		 * specified vmlinux file is invalid when we actually have a
		 * hit in kernel space and then try to load it. So if we get
		 * here and there are _no_ symbols in the DSO backing the
		 * kernel map, bail out.
		 *
		 * We may never get here, for instance, if we use -K/
		 * --hide-kernel-symbols, even if the user specifies an
		 * invalid --vmlinux ;-)
		 */
1046
		if (al.map == machine->vmlinux_maps[MAP__FUNCTION] &&
1047 1048 1049 1050 1051 1052 1053 1054 1055
		    RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION])) {
			pr_err("The %s file can't be used\n",
			       symbol_conf.vmlinux_name);
			exit(1);
		}

		return;
	}

1056 1057 1058 1059
	/* let's see, whether we need to install initial sym_filter_entry */
	if (sym_filter_entry_sched) {
		sym_filter_entry = sym_filter_entry_sched;
		sym_filter_entry_sched = NULL;
1060 1061 1062 1063 1064 1065
		if (parse_source(sym_filter_entry) < 0) {
			struct symbol *sym = sym_entry__symbol(sym_filter_entry);

			pr_err("Can't annotate %s", sym->name);
			if (sym_filter_entry->map->dso->origin == DSO__ORIG_KERNEL) {
				pr_err(": No vmlinux file was found in the path:\n");
1066
				machine__fprintf_vmlinux_path(machine, stderr);
1067 1068 1069 1070
			} else
				pr_err(".\n");
			exit(1);
		}
1071 1072
	}

1073
	syme = symbol__priv(al.sym);
1074 1075
	if (!syme->skip) {
		syme->count[counter]++;
1076
		syme->origin = origin;
1077 1078 1079 1080 1081 1082
		record_precise_ip(syme, counter, ip);
		pthread_mutex_lock(&active_symbols_lock);
		if (list_empty(&syme->node) || !syme->node.next)
			__list_insert_active_sym(syme);
		pthread_mutex_unlock(&active_symbols_lock);
	}
1083 1084
}

1085
static int event__process(event_t *event, struct perf_session *session)
1086 1087 1088
{
	switch (event->header.type) {
	case PERF_RECORD_COMM:
1089
		event__process_comm(event, session);
1090 1091
		break;
	case PERF_RECORD_MMAP:
1092
		event__process_mmap(event, session);
1093
		break;
1094 1095 1096 1097
	case PERF_RECORD_FORK:
	case PERF_RECORD_EXIT:
		event__process_task(event, session);
		break;
1098 1099
	default:
		break;
1100 1101
	}

1102
	return 0;
1103 1104 1105
}

struct mmap_data {
1106 1107
	int			counter;
	void			*base;
1108
	int			mask;
1109
	unsigned int		prev;
1110 1111 1112 1113
};

static unsigned int mmap_read_head(struct mmap_data *md)
{
1114
	struct perf_event_mmap_page *pc = md->base;
1115 1116 1117 1118 1119 1120 1121 1122
	int head;

	head = pc->data_head;
	rmb();

	return head;
}

1123 1124
static void perf_session__mmap_read_counter(struct perf_session *self,
					    struct mmap_data *md)
1125 1126 1127 1128 1129 1130 1131 1132
{
	unsigned int head = mmap_read_head(md);
	unsigned int old = md->prev;
	unsigned char *data = md->base + page_size;
	int diff;

	/*
	 * If we're further behind than half the buffer, there's a chance
1133
	 * the writer will bite our tail and mess up the samples under us.
1134 1135 1136 1137 1138 1139 1140
	 *
	 * If we somehow ended up ahead of the head, we got messed up.
	 *
	 * In either case, truncate and restart at head.
	 */
	diff = head - old;
	if (diff > md->mask / 2 || diff < 0) {
1141
		fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153

		/*
		 * head points to a known good entry, start there.
		 */
		old = head;
	}

	for (; old != head;) {
		event_t *event = (event_t *)&data[old & md->mask];

		event_t event_copy;

1154
		size_t size = event->header.size;
1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175

		/*
		 * Event straddles the mmap boundary -- header should always
		 * be inside due to u64 alignment of output.
		 */
		if ((old & md->mask) + size != ((old + size) & md->mask)) {
			unsigned int offset = old;
			unsigned int len = min(sizeof(*event), size), cpy;
			void *dst = &event_copy;

			do {
				cpy = min(md->mask + 1 - (offset & md->mask), len);
				memcpy(dst, &data[offset & md->mask], cpy);
				offset += cpy;
				dst += cpy;
				len -= cpy;
			} while (len);

			event = &event_copy;
		}

1176
		if (event->header.type == PERF_RECORD_SAMPLE)
1177
			event__process_sample(event, self, md->counter);
1178
		else
1179
			event__process(event, self);
1180 1181 1182 1183 1184 1185
		old += size;
	}

	md->prev = old;
}

1186 1187
static struct pollfd *event_array;
static struct mmap_data *mmap_array[MAX_NR_CPUS][MAX_COUNTERS];
M
Mike Galbraith 已提交
1188

1189
static void perf_session__mmap_read(struct perf_session *self)
1190
{
1191
	int i, counter, thread_index;
1192 1193 1194

	for (i = 0; i < nr_cpus; i++) {
		for (counter = 0; counter < nr_counters; counter++)
1195 1196 1197 1198 1199 1200
			for (thread_index = 0;
				thread_index < thread_num;
				thread_index++) {
				perf_session__mmap_read_counter(self,
					&mmap_array[i][counter][thread_index]);
			}
1201 1202 1203
	}
}

1204 1205 1206 1207
int nr_poll;
int group_fd;

static void start_counter(int i, int counter)
1208
{
1209
	struct perf_event_attr *attr;
1210
	int cpu;
1211
	int thread_index;
1212 1213

	cpu = profile_cpu;
1214
	if (target_tid == -1 && profile_cpu == -1)
1215
		cpu = cpumap[i];
1216 1217 1218 1219

	attr = attrs + counter;

	attr->sample_type	= PERF_SAMPLE_IP | PERF_SAMPLE_TID;
1220 1221 1222 1223 1224 1225 1226

	if (freq) {
		attr->sample_type	|= PERF_SAMPLE_PERIOD;
		attr->freq		= 1;
		attr->sample_freq	= freq;
	}

1227
	attr->inherit		= (cpu < 0) && inherit;
1228
	attr->mmap		= 1;
1229

1230
	for (thread_index = 0; thread_index < thread_num; thread_index++) {
1231
try_again:
1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262
		fd[i][counter][thread_index] = sys_perf_event_open(attr,
				all_tids[thread_index], cpu, group_fd, 0);

		if (fd[i][counter][thread_index] < 0) {
			int err = errno;

			if (err == EPERM || err == EACCES)
				die("No permission - are you root?\n");
			/*
			 * If it's cycles then fall back to hrtimer
			 * based cpu-clock-tick sw counter, which
			 * is always available even if no PMU support:
			 */
			if (attr->type == PERF_TYPE_HARDWARE
					&& attr->config == PERF_COUNT_HW_CPU_CYCLES) {

				if (verbose)
					warning(" ... trying to fall back to cpu-clock-ticks\n");

				attr->type = PERF_TYPE_SOFTWARE;
				attr->config = PERF_COUNT_SW_CPU_CLOCK;
				goto try_again;
			}
			printf("\n");
			error("perfcounter syscall returned with %d (%s)\n",
					fd[i][counter][thread_index], strerror(err));
			die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
			exit(-1);
		}
		assert(fd[i][counter][thread_index] >= 0);
		fcntl(fd[i][counter][thread_index], F_SETFL, O_NONBLOCK);
1263 1264

		/*
1265
		 * First counter acts as the group leader:
1266
		 */
1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280
		if (group && group_fd == -1)
			group_fd = fd[i][counter][thread_index];

		event_array[nr_poll].fd = fd[i][counter][thread_index];
		event_array[nr_poll].events = POLLIN;
		nr_poll++;

		mmap_array[i][counter][thread_index].counter = counter;
		mmap_array[i][counter][thread_index].prev = 0;
		mmap_array[i][counter][thread_index].mask = mmap_pages*page_size - 1;
		mmap_array[i][counter][thread_index].base = mmap(NULL, (mmap_pages+1)*page_size,
				PROT_READ, MAP_SHARED, fd[i][counter][thread_index], 0);
		if (mmap_array[i][counter][thread_index].base == MAP_FAILED)
			die("failed to mmap with %d (%s)\n", errno, strerror(errno));
1281 1282 1283 1284 1285 1286 1287
	}
}

static int __cmd_top(void)
{
	pthread_t thread;
	int i, counter;
1288
	int ret;
1289
	/*
1290 1291
	 * FIXME: perf_session__new should allow passing a O_MMAP, so that all this
	 * mmap reading, etc is encapsulated in it. Use O_WRONLY for now.
1292
	 */
T
Tom Zanussi 已提交
1293
	struct perf_session *session = perf_session__new(NULL, O_WRONLY, false, false);
1294 1295
	if (session == NULL)
		return -ENOMEM;
1296

1297 1298
	if (target_tid != -1)
		event__synthesize_thread(target_tid, event__process, session);
1299
	else
1300
		event__synthesize_threads(event__process, session);
1301

1302 1303
	for (i = 0; i < nr_cpus; i++) {
		group_fd = -1;
1304 1305
		for (counter = 0; counter < nr_counters; counter++)
			start_counter(i, counter);
1306 1307
	}

1308
	/* Wait for a minimal set of events before starting the snapshot */
1309
	poll(&event_array[0], nr_poll, 100);
1310

1311
	perf_session__mmap_read(session);
1312

1313
	if (pthread_create(&thread, NULL, display_thread, session)) {
1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328
		printf("Could not create display thread.\n");
		exit(-1);
	}

	if (realtime_prio) {
		struct sched_param param;

		param.sched_priority = realtime_prio;
		if (sched_setscheduler(0, SCHED_FIFO, &param)) {
			printf("Could not set realtime priority.\n");
			exit(-1);
		}
	}

	while (1) {
1329
		int hits = samples;
1330

1331
		perf_session__mmap_read(session);
1332

1333
		if (hits == samples)
1334 1335 1336 1337 1338
			ret = poll(event_array, nr_poll, 100);
	}

	return 0;
}
1339 1340 1341 1342 1343 1344 1345 1346

static const char * const top_usage[] = {
	"perf top [<options>]",
	NULL
};

static const struct option options[] = {
	OPT_CALLBACK('e', "event", NULL, "event",
1347 1348
		     "event selector. use 'perf list' to list available events",
		     parse_events),
1349 1350 1351
	OPT_INTEGER('c', "count", &default_interval,
		    "event period to sample"),
	OPT_INTEGER('p', "pid", &target_pid,
1352 1353 1354
		    "profile events on existing process id"),
	OPT_INTEGER('t', "tid", &target_tid,
		    "profile events on existing thread id"),
1355 1356
	OPT_BOOLEAN('a', "all-cpus", &system_wide,
			    "system-wide collection from all CPUs"),
1357 1358
	OPT_STRING('C', "cpu", &cpu_list, "cpu",
		    "list of cpus to monitor"),
1359 1360
	OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
		   "file", "vmlinux pathname"),
1361 1362
	OPT_BOOLEAN('K', "hide_kernel_symbols", &hide_kernel_symbols,
		    "hide kernel symbols"),
1363
	OPT_UINTEGER('m', "mmap-pages", &mmap_pages, "number of mmap data pages"),
1364 1365
	OPT_INTEGER('r', "realtime", &realtime_prio,
		    "collect data with this RT SCHED_FIFO priority"),
M
Mike Galbraith 已提交
1366
	OPT_INTEGER('d', "delay", &delay_secs,
1367 1368 1369
		    "number of seconds to delay between refreshes"),
	OPT_BOOLEAN('D', "dump-symtab", &dump_symtab,
			    "dump the symbol table used for profiling"),
1370
	OPT_INTEGER('f', "count-filter", &count_filter,
1371 1372 1373
		    "only display functions with more events than this"),
	OPT_BOOLEAN('g', "group", &group,
			    "put the counters into a counter group"),
1374 1375
	OPT_BOOLEAN('i', "inherit", &inherit,
		    "child tasks inherit counters"),
1376
	OPT_STRING('s', "sym-annotate", &sym_filter, "symbol name",
1377
		    "symbol to annotate"),
A
Anton Blanchard 已提交
1378
	OPT_BOOLEAN('z', "zero", &zero,
1379
		    "zero history across updates"),
1380
	OPT_INTEGER('F', "freq", &freq,
1381
		    "profile at this frequency"),
1382 1383
	OPT_INTEGER('E', "entries", &print_entries,
		    "display this many functions"),
1384 1385
	OPT_BOOLEAN('U', "hide_user_symbols", &hide_user_symbols,
		    "hide user symbols"),
1386
	OPT_INCR('v', "verbose", &verbose,
1387
		    "be more verbose (show counter open errors, etc)"),
1388 1389 1390
	OPT_END()
};

1391
int cmd_top(int argc, const char **argv, const char *prefix __used)
1392
{
1393
	int counter;
1394
	int i,j;
1395 1396 1397 1398 1399 1400 1401

	page_size = sysconf(_SC_PAGE_SIZE);

	argc = parse_options(argc, argv, options, top_usage, 0);
	if (argc)
		usage_with_options(top_usage, options);

1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421
	if (target_pid != -1) {
		target_tid = target_pid;
		thread_num = find_all_tid(target_pid, &all_tids);
		if (thread_num <= 0) {
			fprintf(stderr, "Can't find all threads of pid %d\n",
				target_pid);
			usage_with_options(top_usage, options);
		}
	} else {
		all_tids=malloc(sizeof(pid_t));
		if (!all_tids)
			return -ENOMEM;

		all_tids[0] = target_tid;
		thread_num = 1;
	}

	for (i = 0; i < MAX_NR_CPUS; i++) {
		for (j = 0; j < MAX_COUNTERS; j++) {
			fd[i][j] = malloc(sizeof(int)*thread_num);
1422
			mmap_array[i][j] = zalloc(
1423 1424 1425 1426 1427 1428 1429 1430 1431 1432
				sizeof(struct mmap_data)*thread_num);
			if (!fd[i][j] || !mmap_array[i][j])
				return -ENOMEM;
		}
	}
	event_array = malloc(
		sizeof(struct pollfd)*MAX_NR_CPUS*MAX_COUNTERS*thread_num);
	if (!event_array)
		return -ENOMEM;

1433
	/* CPU and PID are mutually exclusive */
1434
	if (target_tid > 0 && cpu_list) {
1435 1436
		printf("WARNING: PID switch overriding CPU\n");
		sleep(1);
1437
		cpu_list = NULL;
1438 1439
	}

1440
	if (!nr_counters)
1441 1442
		nr_counters = 1;

1443 1444
	symbol_conf.priv_size = (sizeof(struct sym_entry) +
				 (nr_counters + 1) * sizeof(unsigned long));
1445 1446

	symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
1447
	if (symbol__init() < 0)
1448
		return -1;
1449

1450 1451 1452
	if (delay_secs < 1)
		delay_secs = 1;

1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464
	/*
	 * User specified count overrides default frequency.
	 */
	if (default_interval)
		freq = 0;
	else if (freq) {
		default_interval = freq;
	} else {
		fprintf(stderr, "frequency and count are zero, aborting\n");
		exit(EXIT_FAILURE);
	}

1465 1466 1467
	/*
	 * Fill in the ones not specifically initialized via -c:
	 */
1468
	for (counter = 0; counter < nr_counters; counter++) {
1469
		if (attrs[counter].sample_period)
1470 1471
			continue;

1472
		attrs[counter].sample_period = default_interval;
1473 1474
	}

1475
	if (target_tid != -1)
1476
		nr_cpus = 1;
1477
	else
1478 1479 1480 1481
		nr_cpus = read_cpu_map(cpu_list);

	if (nr_cpus < 1)
		usage_with_options(top_usage, options);
1482

1483
	get_term_dimensions(&winsize);
1484
	if (print_entries == 0) {
1485
		update_print_entries(&winsize);
1486 1487 1488
		signal(SIGWINCH, sig_winch_handler);
	}

1489 1490
	return __cmd_top();
}