builtin-top.c 31.3 KB
Newer Older
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
 * builtin-top.c
 *
 * Builtin top command: Display a continuously updated profile of
 * any workload, CPU or specific PID.
 *
 * Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
 *
 * Improvements and fixes by:
 *
 *   Arjan van de Ven <arjan@linux.intel.com>
 *   Yanmin Zhang <yanmin.zhang@intel.com>
 *   Wu Fengguang <fengguang.wu@intel.com>
 *   Mike Galbraith <efault@gmx.de>
 *   Paul Mackerras <paulus@samba.org>
 *
 * Released under the GPL v2. (and only v2, not any later version)
18
 */
19
#include "builtin.h"
20

21
#include "perf.h"
22

23
#include "util/color.h"
24 25
#include "util/session.h"
#include "util/symbol.h"
26
#include "util/thread.h"
27
#include "util/util.h"
28
#include <linux/rbtree.h>
29 30
#include "util/parse-options.h"
#include "util/parse-events.h"
31
#include "util/cpumap.h"
32

33 34
#include "util/debug.h"

35 36
#include <assert.h>
#include <fcntl.h>
37

38
#include <stdio.h>
39 40
#include <termios.h>
#include <unistd.h>
41

42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57
#include <errno.h>
#include <time.h>
#include <sched.h>
#include <pthread.h>

#include <sys/syscall.h>
#include <sys/ioctl.h>
#include <sys/poll.h>
#include <sys/prctl.h>
#include <sys/wait.h>
#include <sys/uio.h>
#include <sys/mman.h>

#include <linux/unistd.h>
#include <linux/types.h>

58
static int			fd[MAX_NR_CPUS][MAX_COUNTERS];
59

60
static int			system_wide			=      0;
61

62
static int			default_interval		=      0;
63

64
static int			count_filter			=      5;
65
static int			print_entries;
66

67 68 69 70 71 72
static int			target_pid			=     -1;
static int			inherit				=      0;
static int			profile_cpu			=     -1;
static int			nr_cpus				=      0;
static unsigned int		realtime_prio			=      0;
static int			group				=      0;
73
static unsigned int		page_size;
74 75
static unsigned int		mmap_pages			=     16;
static int			freq				=   1000; /* 1 KHz */
76

77 78 79
static int			delay_secs			=      2;
static int			zero                            =      0;
static int			dump_symtab                     =      0;
80

81 82
static bool			hide_kernel_symbols		=  false;
static bool			hide_user_symbols		=  false;
83
static struct winsize		winsize;
84

85 86 87 88 89 90 91 92 93 94 95
/*
 * Source
 */

struct source_line {
	u64			eip;
	unsigned long		count[MAX_COUNTERS];
	char			*line;
	struct source_line	*next;
};

96 97
static char			*sym_filter			=   NULL;
struct sym_entry		*sym_filter_entry		=   NULL;
98
struct sym_entry		*sym_filter_entry_sched		=   NULL;
99 100 101
static int			sym_pcnt_filter			=      5;
static int			sym_counter			=      0;
static int			display_weighted		=     -1;
102

103 104 105 106
/*
 * Symbols
 */

107 108 109 110 111 112 113
struct sym_entry_source {
	struct source_line	*source;
	struct source_line	*lines;
	struct source_line	**lines_tail;
	pthread_mutex_t		lock;
};

114
struct sym_entry {
115 116
	struct rb_node		rb_node;
	struct list_head	node;
117 118
	unsigned long		snap_count;
	double			weight;
119
	int			skip;
120
	u16			name_len;
121
	u8			origin;
122
	struct map		*map;
123
	struct sym_entry_source	*src;
124
	unsigned long		count[0];
125 126
};

127 128 129 130
/*
 * Source functions
 */

131 132
static inline struct symbol *sym_entry__symbol(struct sym_entry *self)
{
133
       return ((void *)self) + symbol_conf.priv_size;
134 135
}

136
void get_term_dimensions(struct winsize *ws)
137
{
138 139 140 141 142 143 144 145 146 147
	char *s = getenv("LINES");

	if (s != NULL) {
		ws->ws_row = atoi(s);
		s = getenv("COLUMNS");
		if (s != NULL) {
			ws->ws_col = atoi(s);
			if (ws->ws_row && ws->ws_col)
				return;
		}
148
	}
149 150 151 152
#ifdef TIOCGWINSZ
	if (ioctl(1, TIOCGWINSZ, ws) == 0 &&
	    ws->ws_row && ws->ws_col)
		return;
153
#endif
154 155
	ws->ws_row = 25;
	ws->ws_col = 80;
156 157
}

158
static void update_print_entries(struct winsize *ws)
159
{
160 161
	print_entries = ws->ws_row;

162 163 164 165 166 167
	if (print_entries > 9)
		print_entries -= 9;
}

static void sig_winch_handler(int sig __used)
{
168 169
	get_term_dimensions(&winsize);
	update_print_entries(&winsize);
170 171
}

172 173 174
static void parse_source(struct sym_entry *syme)
{
	struct symbol *sym;
175
	struct sym_entry_source *source;
176
	struct map *map;
177
	FILE *file;
178
	char command[PATH_MAX*2];
179 180
	const char *path;
	u64 len;
181 182 183 184

	if (!syme)
		return;

185
	if (syme->src == NULL) {
186
		syme->src = zalloc(sizeof(*source));
187 188 189 190 191 192 193 194 195
		if (syme->src == NULL)
			return;
		pthread_mutex_init(&syme->src->lock, NULL);
	}

	source = syme->src;

	if (source->lines) {
		pthread_mutex_lock(&source->lock);
196 197 198
		goto out_assign;
	}

199
	sym = sym_entry__symbol(syme);
200 201
	map = syme->map;
	path = map->dso->long_name;
202 203 204

	len = sym->end - sym->start;

205
	sprintf(command,
206 207 208
		"objdump --start-address=%#0*Lx --stop-address=%#0*Lx -dS %s",
		BITS_PER_LONG / 4, map__rip_2objdump(map, sym->start),
		BITS_PER_LONG / 4, map__rip_2objdump(map, sym->end), path);
209 210 211 212 213

	file = popen(command, "r");
	if (!file)
		return;

214 215
	pthread_mutex_lock(&source->lock);
	source->lines_tail = &source->lines;
216 217 218
	while (!feof(file)) {
		struct source_line *src;
		size_t dummy = 0;
219
		char *c, *sep;
220 221 222 223 224 225 226 227 228 229 230 231 232 233 234

		src = malloc(sizeof(struct source_line));
		assert(src != NULL);
		memset(src, 0, sizeof(struct source_line));

		if (getline(&src->line, &dummy, file) < 0)
			break;
		if (!src->line)
			break;

		c = strchr(src->line, '\n');
		if (c)
			*c = 0;

		src->next = NULL;
235 236
		*source->lines_tail = src;
		source->lines_tail = &src->next;
237

238 239 240 241 242
		src->eip = strtoull(src->line, &sep, 16);
		if (*sep == ':')
			src->eip = map__objdump_2ip(map, src->eip);
		else /* this line has no ip info (e.g. source line) */
			src->eip = 0;
243 244 245 246
	}
	pclose(file);
out_assign:
	sym_filter_entry = syme;
247
	pthread_mutex_unlock(&source->lock);
248 249 250 251 252 253 254
}

static void __zero_source_counters(struct sym_entry *syme)
{
	int i;
	struct source_line *line;

255
	line = syme->src->lines;
256 257 258 259 260 261 262 263 264 265 266 267 268 269
	while (line) {
		for (i = 0; i < nr_counters; i++)
			line->count[i] = 0;
		line = line->next;
	}
}

static void record_precise_ip(struct sym_entry *syme, int counter, u64 ip)
{
	struct source_line *line;

	if (syme != sym_filter_entry)
		return;

270
	if (pthread_mutex_trylock(&syme->src->lock))
271 272
		return;

273
	if (syme->src == NULL || syme->src->source == NULL)
274 275
		goto out_unlock;

276
	for (line = syme->src->lines; line; line = line->next) {
277 278 279
		/* skip lines without IP info */
		if (line->eip == 0)
			continue;
280 281 282 283 284 285 286 287
		if (line->eip == ip) {
			line->count[counter]++;
			break;
		}
		if (line->eip > ip)
			break;
	}
out_unlock:
288
	pthread_mutex_unlock(&syme->src->lock);
289 290
}

291 292
#define PATTERN_LEN		(BITS_PER_LONG / 4 + 2)

293 294
static void lookup_sym_source(struct sym_entry *syme)
{
295
	struct symbol *symbol = sym_entry__symbol(syme);
296
	struct source_line *line;
297
	char pattern[PATTERN_LEN + 1];
298

299 300
	sprintf(pattern, "%0*Lx <", BITS_PER_LONG / 4,
		map__rip_2objdump(syme->map, symbol->start));
301

302 303
	pthread_mutex_lock(&syme->src->lock);
	for (line = syme->src->lines; line; line = line->next) {
304
		if (memcmp(line->line, pattern, PATTERN_LEN) == 0) {
305
			syme->src->source = line;
306 307 308
			break;
		}
	}
309
	pthread_mutex_unlock(&syme->src->lock);
310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338
}

static void show_lines(struct source_line *queue, int count, int total)
{
	int i;
	struct source_line *line;

	line = queue;
	for (i = 0; i < count; i++) {
		float pcnt = 100.0*(float)line->count[sym_counter]/(float)total;

		printf("%8li %4.1f%%\t%s\n", line->count[sym_counter], pcnt, line->line);
		line = line->next;
	}
}

#define TRACE_COUNT     3

static void show_details(struct sym_entry *syme)
{
	struct symbol *symbol;
	struct source_line *line;
	struct source_line *line_queue = NULL;
	int displayed = 0;
	int line_queue_count = 0, total = 0, more = 0;

	if (!syme)
		return;

339
	if (!syme->src->source)
340 341
		lookup_sym_source(syme);

342
	if (!syme->src->source)
343 344
		return;

345
	symbol = sym_entry__symbol(syme);
346 347 348
	printf("Showing %s for %s\n", event_name(sym_counter), symbol->name);
	printf("  Events  Pcnt (>=%d%%)\n", sym_pcnt_filter);

349 350
	pthread_mutex_lock(&syme->src->lock);
	line = syme->src->source;
351 352 353 354 355
	while (line) {
		total += line->count[sym_counter];
		line = line->next;
	}

356
	line = syme->src->source;
357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380
	while (line) {
		float pcnt = 0.0;

		if (!line_queue_count)
			line_queue = line;
		line_queue_count++;

		if (line->count[sym_counter])
			pcnt = 100.0 * line->count[sym_counter] / (float)total;
		if (pcnt >= (float)sym_pcnt_filter) {
			if (displayed <= print_entries)
				show_lines(line_queue, line_queue_count, total);
			else more++;
			displayed += line_queue_count;
			line_queue_count = 0;
			line_queue = NULL;
		} else if (line_queue_count > TRACE_COUNT) {
			line_queue = line_queue->next;
			line_queue_count--;
		}

		line->count[sym_counter] = zero ? 0 : line->count[sym_counter] * 7 / 8;
		line = line->next;
	}
381
	pthread_mutex_unlock(&syme->src->lock);
382 383 384
	if (more)
		printf("%d lines not displayed, maybe increase display entries [e]\n", more);
}
385

386
/*
387
 * Symbols will be added here in event__process_sample and will get out
388 389 390
 * after decayed.
 */
static LIST_HEAD(active_symbols);
391
static pthread_mutex_t active_symbols_lock = PTHREAD_MUTEX_INITIALIZER;
392 393 394 395 396 397

/*
 * Ordering weight: count-1 * count-2 * ... / count-n
 */
static double sym_weight(const struct sym_entry *sym)
{
398
	double weight = sym->snap_count;
399 400
	int counter;

401 402 403
	if (!display_weighted)
		return weight;

404 405 406 407 408 409 410 411
	for (counter = 1; counter < nr_counters-1; counter++)
		weight *= sym->count[counter];

	weight /= (sym->count[counter] + 1);

	return weight;
}

412 413
static long			samples;
static long			userspace_samples;
414
static long			exact_samples;
415 416
static const char		CONSOLE_CLEAR[] = "";

417
static void __list_insert_active_sym(struct sym_entry *syme)
418 419 420 421
{
	list_add(&syme->node, &active_symbols);
}

422 423 424 425 426 427 428
static void list_remove_active_sym(struct sym_entry *syme)
{
	pthread_mutex_lock(&active_symbols_lock);
	list_del_init(&syme->node);
	pthread_mutex_unlock(&active_symbols_lock);
}

429 430 431 432 433 434 435 436 437 438
static void rb_insert_active_sym(struct rb_root *tree, struct sym_entry *se)
{
	struct rb_node **p = &tree->rb_node;
	struct rb_node *parent = NULL;
	struct sym_entry *iter;

	while (*p != NULL) {
		parent = *p;
		iter = rb_entry(parent, struct sym_entry, rb_node);

439
		if (se->weight > iter->weight)
440 441 442 443 444 445 446 447
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
	}

	rb_link_node(&se->rb_node, parent, p);
	rb_insert_color(&se->rb_node, tree);
}
448 449 450

static void print_sym_table(void)
{
451
	int printed = 0, j;
452
	int counter, snap = !display_weighted ? sym_counter : 0;
453 454
	float samples_per_sec = samples/delay_secs;
	float ksamples_per_sec = (samples-userspace_samples)/delay_secs;
455
	float esamples_percent = (100.0*exact_samples)/samples;
456
	float sum_ksamples = 0.0;
457 458 459
	struct sym_entry *syme, *n;
	struct rb_root tmp = RB_ROOT;
	struct rb_node *nd;
460
	int sym_width = 0, dso_width = 0, max_dso_width;
461
	const int win_width = winsize.ws_col - 1;
462

463
	samples = userspace_samples = exact_samples = 0;
464

465
	/* Sort the active symbols */
466 467 468 469 470
	pthread_mutex_lock(&active_symbols_lock);
	syme = list_entry(active_symbols.next, struct sym_entry, node);
	pthread_mutex_unlock(&active_symbols_lock);

	list_for_each_entry_safe_from(syme, n, &active_symbols, node) {
471
		syme->snap_count = syme->count[snap];
472
		if (syme->snap_count != 0) {
473

474 475 476 477 478 479 480
			if ((hide_user_symbols &&
			     syme->origin == PERF_RECORD_MISC_USER) ||
			    (hide_kernel_symbols &&
			     syme->origin == PERF_RECORD_MISC_KERNEL)) {
				list_remove_active_sym(syme);
				continue;
			}
481
			syme->weight = sym_weight(syme);
482
			rb_insert_active_sym(&tmp, syme);
483
			sum_ksamples += syme->snap_count;
484 485

			for (j = 0; j < nr_counters; j++)
486 487
				syme->count[j] = zero ? 0 : syme->count[j] * 7 / 8;
		} else
488
			list_remove_active_sym(syme);
489 490
	}

491
	puts(CONSOLE_CLEAR);
492

493
	printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
494
	printf( "   PerfTop:%8.0f irqs/sec  kernel:%4.1f%%  exact: %4.1f%% [",
495
		samples_per_sec,
496 497
		100.0 - (100.0*((samples_per_sec-ksamples_per_sec)/samples_per_sec)),
		esamples_percent);
498

499
	if (nr_counters == 1 || !display_weighted) {
500
		printf("%Ld", (u64)attrs[0].sample_period);
I
Ingo Molnar 已提交
501 502 503 504 505
		if (freq)
			printf("Hz ");
		else
			printf(" ");
	}
506

507 508 509
	if (!display_weighted)
		printf("%s", event_name(sym_counter));
	else for (counter = 0; counter < nr_counters; counter++) {
510 511 512 513 514 515 516 517
		if (counter)
			printf("/");

		printf("%s", event_name(counter));
	}

	printf( "], ");

518 519
	if (target_pid != -1)
		printf(" (target_pid: %d", target_pid);
520 521 522 523 524 525
	else
		printf(" (all");

	if (profile_cpu != -1)
		printf(", cpu: %d)\n", profile_cpu);
	else {
526
		if (target_pid != -1)
527 528 529 530 531
			printf(")\n");
		else
			printf(", %d CPUs)\n", nr_cpus);
	}

532
	printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
533

534 535 536 537 538
	if (sym_filter_entry) {
		show_details(sym_filter_entry);
		return;
	}

539 540 541 542 543 544 545 546 547
	/*
	 * Find the longest symbol name that will be displayed
	 */
	for (nd = rb_first(&tmp); nd; nd = rb_next(nd)) {
		syme = rb_entry(nd, struct sym_entry, rb_node);
		if (++printed > print_entries ||
		    (int)syme->snap_count < count_filter)
			continue;

548 549 550
		if (syme->map->dso->long_name_len > dso_width)
			dso_width = syme->map->dso->long_name_len;

551 552 553 554 555 556
		if (syme->name_len > sym_width)
			sym_width = syme->name_len;
	}

	printed = 0;

557 558 559 560
	max_dso_width = winsize.ws_col - sym_width - 29;
	if (dso_width > max_dso_width)
		dso_width = max_dso_width;
	putchar('\n');
561
	if (nr_counters == 1)
562
		printf("             samples  pcnt");
563
	else
564
		printf("   weight    samples  pcnt");
565

566 567
	if (verbose)
		printf("         RIP       ");
568
	printf(" %-*.*s DSO\n", sym_width, sym_width, "function");
569
	printf("   %s    _______ _____",
570 571
	       nr_counters == 1 ? "      " : "______");
	if (verbose)
572
		printf(" ________________");
573
	printf(" %-*.*s", sym_width, sym_width, graph_line);
574
	printf(" %-*.*s", dso_width, dso_width, graph_line);
575
	puts("\n");
576

577
	for (nd = rb_first(&tmp); nd; nd = rb_next(nd)) {
578
		struct symbol *sym;
579
		double pcnt;
580

581
		syme = rb_entry(nd, struct sym_entry, rb_node);
582
		sym = sym_entry__symbol(syme);
583

584
		if (++printed > print_entries || (int)syme->snap_count < count_filter)
585
			continue;
586

587 588
		pcnt = 100.0 - (100.0 * ((sum_ksamples - syme->snap_count) /
					 sum_ksamples));
589

590
		if (nr_counters == 1 || !display_weighted)
591
			printf("%20.2f ", syme->weight);
592
		else
593
			printf("%9.1f %10ld ", syme->weight, syme->snap_count);
594

595
		percent_color_fprintf(stdout, "%4.1f%%", pcnt);
596
		if (verbose)
597
			printf(" %016llx", sym->start);
598
		printf(" %-*.*s", sym_width, sym_width, sym->name);
599 600 601 602
		printf(" %-*.*s\n", dso_width, dso_width,
		       dso_width >= syme->map->dso->long_name_len ?
					syme->map->dso->long_name :
					syme->map->dso->short_name);
603 604 605
	}
}

606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648
static void prompt_integer(int *target, const char *msg)
{
	char *buf = malloc(0), *p;
	size_t dummy = 0;
	int tmp;

	fprintf(stdout, "\n%s: ", msg);
	if (getline(&buf, &dummy, stdin) < 0)
		return;

	p = strchr(buf, '\n');
	if (p)
		*p = 0;

	p = buf;
	while(*p) {
		if (!isdigit(*p))
			goto out_free;
		p++;
	}
	tmp = strtoul(buf, NULL, 10);
	*target = tmp;
out_free:
	free(buf);
}

static void prompt_percent(int *target, const char *msg)
{
	int tmp = 0;

	prompt_integer(&tmp, msg);
	if (tmp >= 0 && tmp <= 100)
		*target = tmp;
}

static void prompt_symbol(struct sym_entry **target, const char *msg)
{
	char *buf = malloc(0), *p;
	struct sym_entry *syme = *target, *n, *found = NULL;
	size_t dummy = 0;

	/* zero counters of active symbol */
	if (syme) {
649
		pthread_mutex_lock(&syme->src->lock);
650 651
		__zero_source_counters(syme);
		*target = NULL;
652
		pthread_mutex_unlock(&syme->src->lock);
653 654 655 656 657 658 659 660 661 662 663 664 665 666 667
	}

	fprintf(stdout, "\n%s: ", msg);
	if (getline(&buf, &dummy, stdin) < 0)
		goto out_free;

	p = strchr(buf, '\n');
	if (p)
		*p = 0;

	pthread_mutex_lock(&active_symbols_lock);
	syme = list_entry(active_symbols.next, struct sym_entry, node);
	pthread_mutex_unlock(&active_symbols_lock);

	list_for_each_entry_safe_from(syme, n, &active_symbols, node) {
668
		struct symbol *sym = sym_entry__symbol(syme);
669 670 671 672 673 674 675 676

		if (!strcmp(buf, sym->name)) {
			found = syme;
			break;
		}
	}

	if (!found) {
677
		fprintf(stderr, "Sorry, %s is not active.\n", buf);
678 679 680 681 682 683 684 685 686
		sleep(1);
		return;
	} else
		parse_source(found);

out_free:
	free(buf);
}

687
static void print_mapped_keys(void)
688
{
689 690 691
	char *name = NULL;

	if (sym_filter_entry) {
692
		struct symbol *sym = sym_entry__symbol(sym_filter_entry);
693 694 695 696 697 698 699 700 701 702 703 704
		name = sym->name;
	}

	fprintf(stdout, "\nMapped keys:\n");
	fprintf(stdout, "\t[d]     display refresh delay.             \t(%d)\n", delay_secs);
	fprintf(stdout, "\t[e]     display entries (lines).           \t(%d)\n", print_entries);

	if (nr_counters > 1)
		fprintf(stdout, "\t[E]     active event counter.              \t(%s)\n", event_name(sym_counter));

	fprintf(stdout, "\t[f]     profile display filter (count).    \t(%d)\n", count_filter);

705 706 707
	fprintf(stdout, "\t[F]     annotate display filter (percent). \t(%d%%)\n", sym_pcnt_filter);
	fprintf(stdout, "\t[s]     annotate symbol.                   \t(%s)\n", name?: "NULL");
	fprintf(stdout, "\t[S]     stop annotation.\n");
708 709 710 711

	if (nr_counters > 1)
		fprintf(stdout, "\t[w]     toggle display weighted/count[E]r. \t(%d)\n", display_weighted ? 1 : 0);

712
	fprintf(stdout,
713
		"\t[K]     hide kernel_symbols symbols.     \t(%s)\n",
714 715 716 717
		hide_kernel_symbols ? "yes" : "no");
	fprintf(stdout,
		"\t[U]     hide user symbols.               \t(%s)\n",
		hide_user_symbols ? "yes" : "no");
718
	fprintf(stdout, "\t[z]     toggle sample zeroing.             \t(%d)\n", zero ? 1 : 0);
719 720 721 722 723 724 725 726 727 728 729 730
	fprintf(stdout, "\t[qQ]    quit.\n");
}

static int key_mapped(int c)
{
	switch (c) {
		case 'd':
		case 'e':
		case 'f':
		case 'z':
		case 'q':
		case 'Q':
731 732
		case 'K':
		case 'U':
733 734 735
		case 'F':
		case 's':
		case 'S':
736 737 738 739
			return 1;
		case 'E':
		case 'w':
			return nr_counters > 1 ? 1 : 0;
740 741
		default:
			break;
742 743 744
	}

	return 0;
745 746 747 748
}

static void handle_keypress(int c)
{
749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771
	if (!key_mapped(c)) {
		struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
		struct termios tc, save;

		print_mapped_keys();
		fprintf(stdout, "\nEnter selection, or unmapped key to continue: ");
		fflush(stdout);

		tcgetattr(0, &save);
		tc = save;
		tc.c_lflag &= ~(ICANON | ECHO);
		tc.c_cc[VMIN] = 0;
		tc.c_cc[VTIME] = 0;
		tcsetattr(0, TCSANOW, &tc);

		poll(&stdin_poll, 1, -1);
		c = getc(stdin);

		tcsetattr(0, TCSAFLUSH, &save);
		if (!key_mapped(c))
			return;
	}

772 773 774
	switch (c) {
		case 'd':
			prompt_integer(&delay_secs, "Enter display delay");
775 776
			if (delay_secs < 1)
				delay_secs = 1;
777 778 779
			break;
		case 'e':
			prompt_integer(&print_entries, "Enter display entries (lines)");
780
			if (print_entries == 0) {
781
				sig_winch_handler(SIGWINCH);
782 783 784
				signal(SIGWINCH, sig_winch_handler);
			} else
				signal(SIGWINCH, SIG_DFL);
785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808
			break;
		case 'E':
			if (nr_counters > 1) {
				int i;

				fprintf(stderr, "\nAvailable events:");
				for (i = 0; i < nr_counters; i++)
					fprintf(stderr, "\n\t%d %s", i, event_name(i));

				prompt_integer(&sym_counter, "Enter details event counter");

				if (sym_counter >= nr_counters) {
					fprintf(stderr, "Sorry, no such event, using %s.\n", event_name(0));
					sym_counter = 0;
					sleep(1);
				}
			} else sym_counter = 0;
			break;
		case 'f':
			prompt_integer(&count_filter, "Enter display event count filter");
			break;
		case 'F':
			prompt_percent(&sym_pcnt_filter, "Enter details display event filter (percent)");
			break;
809 810 811
		case 'K':
			hide_kernel_symbols = !hide_kernel_symbols;
			break;
812 813 814
		case 'q':
		case 'Q':
			printf("exiting.\n");
815 816
			if (dump_symtab)
				dsos__fprintf(stderr);
817 818 819 820 821 822 823 824 825 826
			exit(0);
		case 's':
			prompt_symbol(&sym_filter_entry, "Enter details symbol");
			break;
		case 'S':
			if (!sym_filter_entry)
				break;
			else {
				struct sym_entry *syme = sym_filter_entry;

827
				pthread_mutex_lock(&syme->src->lock);
828 829
				sym_filter_entry = NULL;
				__zero_source_counters(syme);
830
				pthread_mutex_unlock(&syme->src->lock);
831 832
			}
			break;
833 834 835
		case 'U':
			hide_user_symbols = !hide_user_symbols;
			break;
836 837 838
		case 'w':
			display_weighted = ~display_weighted;
			break;
839 840 841
		case 'z':
			zero = ~zero;
			break;
842 843
		default:
			break;
844 845 846
	}
}

847
static void *display_thread(void *arg __used)
848
{
849
	struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
850 851 852 853 854 855 856 857
	struct termios tc, save;
	int delay_msecs, c;

	tcgetattr(0, &save);
	tc = save;
	tc.c_lflag &= ~(ICANON | ECHO);
	tc.c_cc[VMIN] = 0;
	tc.c_cc[VTIME] = 0;
858

859 860 861 862 863
repeat:
	delay_msecs = delay_secs * 1000;
	tcsetattr(0, TCSANOW, &tc);
	/* trash return*/
	getc(stdin);
864

865
	do {
866
		print_sym_table();
867 868
	} while (!poll(&stdin_poll, 1, delay_msecs) == 1);

869 870 871 872 873
	c = getc(stdin);
	tcsetattr(0, TCSAFLUSH, &save);

	handle_keypress(c);
	goto repeat;
874 875 876 877

	return NULL;
}

878
/* Tag samples to be skipped. */
879
static const char *skip_symbols[] = {
880 881 882 883 884
	"default_idle",
	"cpu_idle",
	"enter_idle",
	"exit_idle",
	"mwait_idle",
885
	"mwait_idle_with_hints",
886
	"poll_idle",
887 888
	"ppc64_runlatch_off",
	"pseries_dedicated_idle_sleep",
889 890 891
	NULL
};

892
static int symbol_filter(struct map *map, struct symbol *sym)
893
{
894 895
	struct sym_entry *syme;
	const char *name = sym->name;
896
	int i;
897

898 899 900 901 902 903 904
	/*
	 * ppc64 uses function descriptors and appends a '.' to the
	 * start of every instruction address. Remove it.
	 */
	if (name[0] == '.')
		name++;

905 906 907 908 909 910 911
	if (!strcmp(name, "_text") ||
	    !strcmp(name, "_etext") ||
	    !strcmp(name, "_sinittext") ||
	    !strncmp("init_module", name, 11) ||
	    !strncmp("cleanup_module", name, 14) ||
	    strstr(name, "_text_start") ||
	    strstr(name, "_text_end"))
912 913
		return 1;

914
	syme = symbol__priv(sym);
915
	syme->map = map;
916
	syme->src = NULL;
917 918 919 920 921 922

	if (!sym_filter_entry && sym_filter && !strcmp(name, sym_filter)) {
		/* schedule initial sym_filter_entry setup */
		sym_filter_entry_sched = syme;
		sym_filter = NULL;
	}
923

924 925 926 927 928 929
	for (i = 0; skip_symbols[i]; i++) {
		if (!strcmp(skip_symbols[i], name)) {
			syme->skip = 1;
			break;
		}
	}
930

931 932 933
	if (!syme->skip)
		syme->name_len = strlen(sym->name);

934 935 936
	return 0;
}

937 938
static void event__process_sample(const event_t *self,
				 struct perf_session *session, int counter)
939
{
940 941
	u64 ip = self->ip.ip;
	struct sym_entry *syme;
942
	struct addr_location al;
943
	u8 origin = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
944

945 946
	++samples;

947
	switch (origin) {
948
	case PERF_RECORD_MISC_USER:
949
		++userspace_samples;
950 951
		if (hide_user_symbols)
			return;
952
		break;
953
	case PERF_RECORD_MISC_KERNEL:
954 955
		if (hide_kernel_symbols)
			return;
956 957 958 959 960
		break;
	default:
		return;
	}

961 962 963
	if (self->header.misc & PERF_RECORD_MISC_EXACT)
		exact_samples++;

964
	if (event__preprocess_sample(self, session, &al, symbol_filter) < 0 ||
965
	    al.filtered)
966
		return;
967

968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989
	if (al.sym == NULL) {
		/*
		 * As we do lazy loading of symtabs we only will know if the
		 * specified vmlinux file is invalid when we actually have a
		 * hit in kernel space and then try to load it. So if we get
		 * here and there are _no_ symbols in the DSO backing the
		 * kernel map, bail out.
		 *
		 * We may never get here, for instance, if we use -K/
		 * --hide-kernel-symbols, even if the user specifies an
		 * invalid --vmlinux ;-)
		 */
		if (al.map == session->vmlinux_maps[MAP__FUNCTION] &&
		    RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION])) {
			pr_err("The %s file can't be used\n",
			       symbol_conf.vmlinux_name);
			exit(1);
		}

		return;
	}

990 991 992 993 994 995 996
	/* let's see, whether we need to install initial sym_filter_entry */
	if (sym_filter_entry_sched) {
		sym_filter_entry = sym_filter_entry_sched;
		sym_filter_entry_sched = NULL;
		parse_source(sym_filter_entry);
	}

997
	syme = symbol__priv(al.sym);
998 999
	if (!syme->skip) {
		syme->count[counter]++;
1000
		syme->origin = origin;
1001 1002 1003 1004 1005 1006
		record_precise_ip(syme, counter, ip);
		pthread_mutex_lock(&active_symbols_lock);
		if (list_empty(&syme->node) || !syme->node.next)
			__list_insert_active_sym(syme);
		pthread_mutex_unlock(&active_symbols_lock);
	}
1007 1008
}

1009
static int event__process(event_t *event, struct perf_session *session)
1010 1011 1012
{
	switch (event->header.type) {
	case PERF_RECORD_COMM:
1013
		event__process_comm(event, session);
1014 1015
		break;
	case PERF_RECORD_MMAP:
1016
		event__process_mmap(event, session);
1017
		break;
1018 1019 1020 1021
	case PERF_RECORD_FORK:
	case PERF_RECORD_EXIT:
		event__process_task(event, session);
		break;
1022 1023
	default:
		break;
1024 1025
	}

1026
	return 0;
1027 1028 1029
}

struct mmap_data {
1030 1031
	int			counter;
	void			*base;
1032
	int			mask;
1033
	unsigned int		prev;
1034 1035 1036 1037
};

static unsigned int mmap_read_head(struct mmap_data *md)
{
1038
	struct perf_event_mmap_page *pc = md->base;
1039 1040 1041 1042 1043 1044 1045 1046
	int head;

	head = pc->data_head;
	rmb();

	return head;
}

1047 1048
static void perf_session__mmap_read_counter(struct perf_session *self,
					    struct mmap_data *md)
1049 1050 1051 1052 1053 1054 1055 1056
{
	unsigned int head = mmap_read_head(md);
	unsigned int old = md->prev;
	unsigned char *data = md->base + page_size;
	int diff;

	/*
	 * If we're further behind than half the buffer, there's a chance
1057
	 * the writer will bite our tail and mess up the samples under us.
1058 1059 1060 1061 1062 1063 1064
	 *
	 * If we somehow ended up ahead of the head, we got messed up.
	 *
	 * In either case, truncate and restart at head.
	 */
	diff = head - old;
	if (diff > md->mask / 2 || diff < 0) {
1065
		fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077

		/*
		 * head points to a known good entry, start there.
		 */
		old = head;
	}

	for (; old != head;) {
		event_t *event = (event_t *)&data[old & md->mask];

		event_t event_copy;

1078
		size_t size = event->header.size;
1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099

		/*
		 * Event straddles the mmap boundary -- header should always
		 * be inside due to u64 alignment of output.
		 */
		if ((old & md->mask) + size != ((old + size) & md->mask)) {
			unsigned int offset = old;
			unsigned int len = min(sizeof(*event), size), cpy;
			void *dst = &event_copy;

			do {
				cpy = min(md->mask + 1 - (offset & md->mask), len);
				memcpy(dst, &data[offset & md->mask], cpy);
				offset += cpy;
				dst += cpy;
				len -= cpy;
			} while (len);

			event = &event_copy;
		}

1100
		if (event->header.type == PERF_RECORD_SAMPLE)
1101
			event__process_sample(event, self, md->counter);
1102
		else
1103
			event__process(event, self);
1104 1105 1106 1107 1108 1109
		old += size;
	}

	md->prev = old;
}

M
Mike Galbraith 已提交
1110 1111 1112
static struct pollfd event_array[MAX_NR_CPUS * MAX_COUNTERS];
static struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS];

1113
static void perf_session__mmap_read(struct perf_session *self)
1114 1115 1116 1117 1118
{
	int i, counter;

	for (i = 0; i < nr_cpus; i++) {
		for (counter = 0; counter < nr_counters; counter++)
1119
			perf_session__mmap_read_counter(self, &mmap_array[i][counter]);
1120 1121 1122
	}
}

1123 1124 1125 1126
int nr_poll;
int group_fd;

static void start_counter(int i, int counter)
1127
{
1128
	struct perf_event_attr *attr;
1129
	int cpu;
1130 1131 1132

	cpu = profile_cpu;
	if (target_pid == -1 && profile_cpu == -1)
1133
		cpu = cpumap[i];
1134 1135 1136 1137

	attr = attrs + counter;

	attr->sample_type	= PERF_SAMPLE_IP | PERF_SAMPLE_TID;
1138 1139 1140 1141 1142 1143 1144

	if (freq) {
		attr->sample_type	|= PERF_SAMPLE_PERIOD;
		attr->freq		= 1;
		attr->sample_freq	= freq;
	}

1145
	attr->inherit		= (cpu < 0) && inherit;
1146
	attr->mmap		= 1;
1147 1148

try_again:
1149
	fd[i][counter] = sys_perf_event_open(attr, target_pid, cpu, group_fd, 0);
1150 1151 1152 1153

	if (fd[i][counter] < 0) {
		int err = errno;

P
Pekka Enberg 已提交
1154
		if (err == EPERM || err == EACCES)
1155
			die("No permission - are you root?\n");
1156 1157 1158 1159 1160 1161
		/*
		 * If it's cycles then fall back to hrtimer
		 * based cpu-clock-tick sw counter, which
		 * is always available even if no PMU support:
		 */
		if (attr->type == PERF_TYPE_HARDWARE
1162
			&& attr->config == PERF_COUNT_HW_CPU_CYCLES) {
1163

1164 1165 1166
			if (verbose)
				warning(" ... trying to fall back to cpu-clock-ticks\n");

1167
			attr->type = PERF_TYPE_SOFTWARE;
1168
			attr->config = PERF_COUNT_SW_CPU_CLOCK;
1169 1170
			goto try_again;
		}
1171 1172 1173
		printf("\n");
		error("perfcounter syscall returned with %d (%s)\n",
			fd[i][counter], strerror(err));
1174
		die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202
		exit(-1);
	}
	assert(fd[i][counter] >= 0);
	fcntl(fd[i][counter], F_SETFL, O_NONBLOCK);

	/*
	 * First counter acts as the group leader:
	 */
	if (group && group_fd == -1)
		group_fd = fd[i][counter];

	event_array[nr_poll].fd = fd[i][counter];
	event_array[nr_poll].events = POLLIN;
	nr_poll++;

	mmap_array[i][counter].counter = counter;
	mmap_array[i][counter].prev = 0;
	mmap_array[i][counter].mask = mmap_pages*page_size - 1;
	mmap_array[i][counter].base = mmap(NULL, (mmap_pages+1)*page_size,
			PROT_READ, MAP_SHARED, fd[i][counter], 0);
	if (mmap_array[i][counter].base == MAP_FAILED)
		die("failed to mmap with %d (%s)\n", errno, strerror(errno));
}

static int __cmd_top(void)
{
	pthread_t thread;
	int i, counter;
1203
	int ret;
1204
	/*
1205 1206
	 * FIXME: perf_session__new should allow passing a O_MMAP, so that all this
	 * mmap reading, etc is encapsulated in it. Use O_WRONLY for now.
1207
	 */
1208
	struct perf_session *session = perf_session__new(NULL, O_WRONLY, false);
1209 1210
	if (session == NULL)
		return -ENOMEM;
1211

1212
	if (target_pid != -1)
1213
		event__synthesize_thread(target_pid, event__process, session);
1214
	else
1215
		event__synthesize_threads(event__process, session);
1216

1217 1218
	for (i = 0; i < nr_cpus; i++) {
		group_fd = -1;
1219 1220
		for (counter = 0; counter < nr_counters; counter++)
			start_counter(i, counter);
1221 1222
	}

1223 1224 1225
	/* Wait for a minimal set of events before starting the snapshot */
	poll(event_array, nr_poll, 100);

1226
	perf_session__mmap_read(session);
1227

1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243
	if (pthread_create(&thread, NULL, display_thread, NULL)) {
		printf("Could not create display thread.\n");
		exit(-1);
	}

	if (realtime_prio) {
		struct sched_param param;

		param.sched_priority = realtime_prio;
		if (sched_setscheduler(0, SCHED_FIFO, &param)) {
			printf("Could not set realtime priority.\n");
			exit(-1);
		}
	}

	while (1) {
1244
		int hits = samples;
1245

1246
		perf_session__mmap_read(session);
1247

1248
		if (hits == samples)
1249 1250 1251 1252 1253
			ret = poll(event_array, nr_poll, 100);
	}

	return 0;
}
1254 1255 1256 1257 1258 1259 1260 1261

static const char * const top_usage[] = {
	"perf top [<options>]",
	NULL
};

static const struct option options[] = {
	OPT_CALLBACK('e', "event", NULL, "event",
1262 1263
		     "event selector. use 'perf list' to list available events",
		     parse_events),
1264 1265 1266 1267 1268 1269 1270 1271
	OPT_INTEGER('c', "count", &default_interval,
		    "event period to sample"),
	OPT_INTEGER('p', "pid", &target_pid,
		    "profile events on existing pid"),
	OPT_BOOLEAN('a', "all-cpus", &system_wide,
			    "system-wide collection from all CPUs"),
	OPT_INTEGER('C', "CPU", &profile_cpu,
		    "CPU to profile on"),
1272 1273
	OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
		   "file", "vmlinux pathname"),
1274 1275
	OPT_BOOLEAN('K', "hide_kernel_symbols", &hide_kernel_symbols,
		    "hide kernel symbols"),
1276 1277 1278 1279
	OPT_INTEGER('m', "mmap-pages", &mmap_pages,
		    "number of mmap data pages"),
	OPT_INTEGER('r', "realtime", &realtime_prio,
		    "collect data with this RT SCHED_FIFO priority"),
M
Mike Galbraith 已提交
1280
	OPT_INTEGER('d', "delay", &delay_secs,
1281 1282 1283
		    "number of seconds to delay between refreshes"),
	OPT_BOOLEAN('D', "dump-symtab", &dump_symtab,
			    "dump the symbol table used for profiling"),
1284
	OPT_INTEGER('f', "count-filter", &count_filter,
1285 1286 1287
		    "only display functions with more events than this"),
	OPT_BOOLEAN('g', "group", &group,
			    "put the counters into a counter group"),
1288 1289
	OPT_BOOLEAN('i', "inherit", &inherit,
		    "child tasks inherit counters"),
1290
	OPT_STRING('s', "sym-annotate", &sym_filter, "symbol name",
1291
		    "symbol to annotate"),
A
Anton Blanchard 已提交
1292
	OPT_BOOLEAN('z', "zero", &zero,
1293
		    "zero history across updates"),
1294
	OPT_INTEGER('F', "freq", &freq,
1295
		    "profile at this frequency"),
1296 1297
	OPT_INTEGER('E', "entries", &print_entries,
		    "display this many functions"),
1298 1299
	OPT_BOOLEAN('U', "hide_user_symbols", &hide_user_symbols,
		    "hide user symbols"),
1300 1301
	OPT_BOOLEAN('v', "verbose", &verbose,
		    "be more verbose (show counter open errors, etc)"),
1302 1303 1304
	OPT_END()
};

1305
int cmd_top(int argc, const char **argv, const char *prefix __used)
1306
{
1307
	int counter;
1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321

	page_size = sysconf(_SC_PAGE_SIZE);

	argc = parse_options(argc, argv, options, top_usage, 0);
	if (argc)
		usage_with_options(top_usage, options);

	/* CPU and PID are mutually exclusive */
	if (target_pid != -1 && profile_cpu != -1) {
		printf("WARNING: PID switch overriding CPU\n");
		sleep(1);
		profile_cpu = -1;
	}

1322
	if (!nr_counters)
1323 1324
		nr_counters = 1;

1325 1326
	symbol_conf.priv_size = (sizeof(struct sym_entry) +
				 (nr_counters + 1) * sizeof(unsigned long));
1327 1328

	symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
1329
	if (symbol__init() < 0)
1330
		return -1;
1331

1332 1333 1334
	if (delay_secs < 1)
		delay_secs = 1;

1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346
	/*
	 * User specified count overrides default frequency.
	 */
	if (default_interval)
		freq = 0;
	else if (freq) {
		default_interval = freq;
	} else {
		fprintf(stderr, "frequency and count are zero, aborting\n");
		exit(EXIT_FAILURE);
	}

1347 1348 1349
	/*
	 * Fill in the ones not specifically initialized via -c:
	 */
1350
	for (counter = 0; counter < nr_counters; counter++) {
1351
		if (attrs[counter].sample_period)
1352 1353
			continue;

1354
		attrs[counter].sample_period = default_interval;
1355 1356 1357 1358
	}

	if (target_pid != -1 || profile_cpu != -1)
		nr_cpus = 1;
1359 1360
	else
		nr_cpus = read_cpu_map();
1361

1362
	get_term_dimensions(&winsize);
1363
	if (print_entries == 0) {
1364
		update_print_entries(&winsize);
1365 1366 1367
		signal(SIGWINCH, sig_winch_handler);
	}

1368 1369
	return __cmd_top();
}