builtin-top.c 33.3 KB
Newer Older
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
 * builtin-top.c
 *
 * Builtin top command: Display a continuously updated profile of
 * any workload, CPU or specific PID.
 *
 * Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
 *
 * Improvements and fixes by:
 *
 *   Arjan van de Ven <arjan@linux.intel.com>
 *   Yanmin Zhang <yanmin.zhang@intel.com>
 *   Wu Fengguang <fengguang.wu@intel.com>
 *   Mike Galbraith <efault@gmx.de>
 *   Paul Mackerras <paulus@samba.org>
 *
 * Released under the GPL v2. (and only v2, not any later version)
18
 */
19
#include "builtin.h"
20

21
#include "perf.h"
22

23
#include "util/color.h"
24
#include "util/evlist.h"
25
#include "util/evsel.h"
26 27
#include "util/session.h"
#include "util/symbol.h"
28
#include "util/thread.h"
29
#include "util/util.h"
30
#include <linux/rbtree.h>
31 32
#include "util/parse-options.h"
#include "util/parse-events.h"
33
#include "util/cpumap.h"
34
#include "util/xyarray.h"
35

36 37
#include "util/debug.h"

38 39
#include <assert.h>
#include <fcntl.h>
40

41
#include <stdio.h>
42 43
#include <termios.h>
#include <unistd.h>
44
#include <inttypes.h>
45

46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61
#include <errno.h>
#include <time.h>
#include <sched.h>
#include <pthread.h>

#include <sys/syscall.h>
#include <sys/ioctl.h>
#include <sys/poll.h>
#include <sys/prctl.h>
#include <sys/wait.h>
#include <sys/uio.h>
#include <sys/mman.h>

#include <linux/unistd.h>
#include <linux/types.h>

62
#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
63

64 65
struct perf_evlist		*evsel_list;

66
static bool			system_wide			=  false;
67

68
static int			default_interval		=      0;
69

70
static int			count_filter			=      5;
71
static int			print_entries;
72

73
static int			target_pid			=     -1;
74
static int			target_tid			=     -1;
75
static struct thread_map	*threads;
76
static bool			inherit				=  false;
77
static struct cpu_map		*cpus;
78
static int			realtime_prio			=      0;
79
static bool			group				=  false;
80
static unsigned int		page_size;
81
static unsigned int		mmap_pages			=    128;
82
static int			freq				=   1000; /* 1 KHz */
83

84
static int			delay_secs			=      2;
85 86
static bool			zero                            =  false;
static bool			dump_symtab                     =  false;
87

88 89
static bool			hide_kernel_symbols		=  false;
static bool			hide_user_symbols		=  false;
90
static struct winsize		winsize;
91

92 93 94 95 96 97 98 99 100 101 102
/*
 * Source
 */

struct source_line {
	u64			eip;
	unsigned long		count[MAX_COUNTERS];
	char			*line;
	struct source_line	*next;
};

103
static const char		*sym_filter			=   NULL;
104
struct sym_entry		*sym_filter_entry		=   NULL;
105
struct sym_entry		*sym_filter_entry_sched		=   NULL;
106 107
static int			sym_pcnt_filter			=      5;
static int			sym_counter			=      0;
108
static struct perf_evsel	*sym_evsel			=   NULL;
109
static int			display_weighted		=     -1;
110
static const char		*cpu_list;
111

112 113 114 115
/*
 * Symbols
 */

116 117 118 119 120 121 122
struct sym_entry_source {
	struct source_line	*source;
	struct source_line	*lines;
	struct source_line	**lines_tail;
	pthread_mutex_t		lock;
};

123
struct sym_entry {
124 125
	struct rb_node		rb_node;
	struct list_head	node;
126 127
	unsigned long		snap_count;
	double			weight;
128
	int			skip;
129
	u16			name_len;
130
	u8			origin;
131
	struct map		*map;
132
	struct sym_entry_source	*src;
133
	unsigned long		count[0];
134 135
};

136 137 138 139
/*
 * Source functions
 */

140 141
static inline struct symbol *sym_entry__symbol(struct sym_entry *self)
{
142
       return ((void *)self) + symbol_conf.priv_size;
143 144
}

145
void get_term_dimensions(struct winsize *ws)
146
{
147 148 149 150 151 152 153 154 155 156
	char *s = getenv("LINES");

	if (s != NULL) {
		ws->ws_row = atoi(s);
		s = getenv("COLUMNS");
		if (s != NULL) {
			ws->ws_col = atoi(s);
			if (ws->ws_row && ws->ws_col)
				return;
		}
157
	}
158 159 160 161
#ifdef TIOCGWINSZ
	if (ioctl(1, TIOCGWINSZ, ws) == 0 &&
	    ws->ws_row && ws->ws_col)
		return;
162
#endif
163 164
	ws->ws_row = 25;
	ws->ws_col = 80;
165 166
}

167
static void update_print_entries(struct winsize *ws)
168
{
169 170
	print_entries = ws->ws_row;

171 172 173 174 175 176
	if (print_entries > 9)
		print_entries -= 9;
}

static void sig_winch_handler(int sig __used)
{
177 178
	get_term_dimensions(&winsize);
	update_print_entries(&winsize);
179 180
}

181
static int parse_source(struct sym_entry *syme)
182 183
{
	struct symbol *sym;
184
	struct sym_entry_source *source;
185
	struct map *map;
186
	FILE *file;
187
	char command[PATH_MAX*2];
188 189
	const char *path;
	u64 len;
190 191

	if (!syme)
192 193 194 195 196 197 198 199 200 201
		return -1;

	sym = sym_entry__symbol(syme);
	map = syme->map;

	/*
	 * We can't annotate with just /proc/kallsyms
	 */
	if (map->dso->origin == DSO__ORIG_KERNEL)
		return -1;
202

203
	if (syme->src == NULL) {
204
		syme->src = zalloc(sizeof(*source));
205
		if (syme->src == NULL)
206
			return -1;
207 208 209 210 211 212 213
		pthread_mutex_init(&syme->src->lock, NULL);
	}

	source = syme->src;

	if (source->lines) {
		pthread_mutex_lock(&source->lock);
214 215
		goto out_assign;
	}
216
	path = map->dso->long_name;
217 218 219

	len = sym->end - sym->start;

220
	sprintf(command,
221
		"objdump --start-address=%#0*" PRIx64 " --stop-address=%#0*" PRIx64 " -dS %s",
222 223
		BITS_PER_LONG / 4, map__rip_2objdump(map, sym->start),
		BITS_PER_LONG / 4, map__rip_2objdump(map, sym->end), path);
224 225 226

	file = popen(command, "r");
	if (!file)
227
		return -1;
228

229 230
	pthread_mutex_lock(&source->lock);
	source->lines_tail = &source->lines;
231 232 233
	while (!feof(file)) {
		struct source_line *src;
		size_t dummy = 0;
234
		char *c, *sep;
235 236 237 238 239 240 241 242 243 244 245 246 247 248 249

		src = malloc(sizeof(struct source_line));
		assert(src != NULL);
		memset(src, 0, sizeof(struct source_line));

		if (getline(&src->line, &dummy, file) < 0)
			break;
		if (!src->line)
			break;

		c = strchr(src->line, '\n');
		if (c)
			*c = 0;

		src->next = NULL;
250 251
		*source->lines_tail = src;
		source->lines_tail = &src->next;
252

253 254 255 256 257
		src->eip = strtoull(src->line, &sep, 16);
		if (*sep == ':')
			src->eip = map__objdump_2ip(map, src->eip);
		else /* this line has no ip info (e.g. source line) */
			src->eip = 0;
258 259 260 261
	}
	pclose(file);
out_assign:
	sym_filter_entry = syme;
262
	pthread_mutex_unlock(&source->lock);
263
	return 0;
264 265 266 267 268 269 270
}

static void __zero_source_counters(struct sym_entry *syme)
{
	int i;
	struct source_line *line;

271
	line = syme->src->lines;
272
	while (line) {
273
		for (i = 0; i < evsel_list->nr_entries; i++)
274 275 276 277 278 279 280 281 282 283 284 285
			line->count[i] = 0;
		line = line->next;
	}
}

static void record_precise_ip(struct sym_entry *syme, int counter, u64 ip)
{
	struct source_line *line;

	if (syme != sym_filter_entry)
		return;

286
	if (pthread_mutex_trylock(&syme->src->lock))
287 288
		return;

289
	if (syme->src == NULL || syme->src->source == NULL)
290 291
		goto out_unlock;

292
	for (line = syme->src->lines; line; line = line->next) {
293 294 295
		/* skip lines without IP info */
		if (line->eip == 0)
			continue;
296 297 298 299 300 301 302 303
		if (line->eip == ip) {
			line->count[counter]++;
			break;
		}
		if (line->eip > ip)
			break;
	}
out_unlock:
304
	pthread_mutex_unlock(&syme->src->lock);
305 306
}

307 308
#define PATTERN_LEN		(BITS_PER_LONG / 4 + 2)

309 310
static void lookup_sym_source(struct sym_entry *syme)
{
311
	struct symbol *symbol = sym_entry__symbol(syme);
312
	struct source_line *line;
313
	char pattern[PATTERN_LEN + 1];
314

315
	sprintf(pattern, "%0*" PRIx64 " <", BITS_PER_LONG / 4,
316
		map__rip_2objdump(syme->map, symbol->start));
317

318 319
	pthread_mutex_lock(&syme->src->lock);
	for (line = syme->src->lines; line; line = line->next) {
320
		if (memcmp(line->line, pattern, PATTERN_LEN) == 0) {
321
			syme->src->source = line;
322 323 324
			break;
		}
	}
325
	pthread_mutex_unlock(&syme->src->lock);
326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354
}

static void show_lines(struct source_line *queue, int count, int total)
{
	int i;
	struct source_line *line;

	line = queue;
	for (i = 0; i < count; i++) {
		float pcnt = 100.0*(float)line->count[sym_counter]/(float)total;

		printf("%8li %4.1f%%\t%s\n", line->count[sym_counter], pcnt, line->line);
		line = line->next;
	}
}

#define TRACE_COUNT     3

static void show_details(struct sym_entry *syme)
{
	struct symbol *symbol;
	struct source_line *line;
	struct source_line *line_queue = NULL;
	int displayed = 0;
	int line_queue_count = 0, total = 0, more = 0;

	if (!syme)
		return;

355
	if (!syme->src->source)
356 357
		lookup_sym_source(syme);

358
	if (!syme->src->source)
359 360
		return;

361
	symbol = sym_entry__symbol(syme);
362
	printf("Showing %s for %s\n", event_name(sym_evsel), symbol->name);
363 364
	printf("  Events  Pcnt (>=%d%%)\n", sym_pcnt_filter);

365 366
	pthread_mutex_lock(&syme->src->lock);
	line = syme->src->source;
367 368 369 370 371
	while (line) {
		total += line->count[sym_counter];
		line = line->next;
	}

372
	line = syme->src->source;
373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396
	while (line) {
		float pcnt = 0.0;

		if (!line_queue_count)
			line_queue = line;
		line_queue_count++;

		if (line->count[sym_counter])
			pcnt = 100.0 * line->count[sym_counter] / (float)total;
		if (pcnt >= (float)sym_pcnt_filter) {
			if (displayed <= print_entries)
				show_lines(line_queue, line_queue_count, total);
			else more++;
			displayed += line_queue_count;
			line_queue_count = 0;
			line_queue = NULL;
		} else if (line_queue_count > TRACE_COUNT) {
			line_queue = line_queue->next;
			line_queue_count--;
		}

		line->count[sym_counter] = zero ? 0 : line->count[sym_counter] * 7 / 8;
		line = line->next;
	}
397
	pthread_mutex_unlock(&syme->src->lock);
398 399 400
	if (more)
		printf("%d lines not displayed, maybe increase display entries [e]\n", more);
}
401

402
/*
403
 * Symbols will be added here in event__process_sample and will get out
404 405 406
 * after decayed.
 */
static LIST_HEAD(active_symbols);
407
static pthread_mutex_t active_symbols_lock = PTHREAD_MUTEX_INITIALIZER;
408 409 410 411 412 413

/*
 * Ordering weight: count-1 * count-2 * ... / count-n
 */
static double sym_weight(const struct sym_entry *sym)
{
414
	double weight = sym->snap_count;
415 416
	int counter;

417 418 419
	if (!display_weighted)
		return weight;

420
	for (counter = 1; counter < evsel_list->nr_entries - 1; counter++)
421 422 423 424 425 426 427
		weight *= sym->count[counter];

	weight /= (sym->count[counter] + 1);

	return weight;
}

428
static long			samples;
429
static long			kernel_samples, us_samples;
430
static long			exact_samples;
431
static long			guest_us_samples, guest_kernel_samples;
432 433
static const char		CONSOLE_CLEAR[] = "";

434
static void __list_insert_active_sym(struct sym_entry *syme)
435 436 437 438
{
	list_add(&syme->node, &active_symbols);
}

439 440 441 442 443 444 445
static void list_remove_active_sym(struct sym_entry *syme)
{
	pthread_mutex_lock(&active_symbols_lock);
	list_del_init(&syme->node);
	pthread_mutex_unlock(&active_symbols_lock);
}

446 447 448 449 450 451 452 453 454 455
static void rb_insert_active_sym(struct rb_root *tree, struct sym_entry *se)
{
	struct rb_node **p = &tree->rb_node;
	struct rb_node *parent = NULL;
	struct sym_entry *iter;

	while (*p != NULL) {
		parent = *p;
		iter = rb_entry(parent, struct sym_entry, rb_node);

456
		if (se->weight > iter->weight)
457 458 459 460 461 462 463 464
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
	}

	rb_link_node(&se->rb_node, parent, p);
	rb_insert_color(&se->rb_node, tree);
}
465 466 467

static void print_sym_table(void)
{
468
	int printed = 0, j;
469 470
	struct perf_evsel *counter;
	int snap = !display_weighted ? sym_counter : 0;
471
	float samples_per_sec = samples/delay_secs;
472 473 474 475
	float ksamples_per_sec = kernel_samples/delay_secs;
	float us_samples_per_sec = (us_samples)/delay_secs;
	float guest_kernel_samples_per_sec = (guest_kernel_samples)/delay_secs;
	float guest_us_samples_per_sec = (guest_us_samples)/delay_secs;
476
	float esamples_percent = (100.0*exact_samples)/samples;
477
	float sum_ksamples = 0.0;
478 479 480
	struct sym_entry *syme, *n;
	struct rb_root tmp = RB_ROOT;
	struct rb_node *nd;
481
	int sym_width = 0, dso_width = 0, dso_short_width = 0;
482
	const int win_width = winsize.ws_col - 1;
483

484 485
	samples = us_samples = kernel_samples = exact_samples = 0;
	guest_kernel_samples = guest_us_samples = 0;
486

487
	/* Sort the active symbols */
488 489 490 491 492
	pthread_mutex_lock(&active_symbols_lock);
	syme = list_entry(active_symbols.next, struct sym_entry, node);
	pthread_mutex_unlock(&active_symbols_lock);

	list_for_each_entry_safe_from(syme, n, &active_symbols, node) {
493
		syme->snap_count = syme->count[snap];
494
		if (syme->snap_count != 0) {
495

496 497 498 499 500 501 502
			if ((hide_user_symbols &&
			     syme->origin == PERF_RECORD_MISC_USER) ||
			    (hide_kernel_symbols &&
			     syme->origin == PERF_RECORD_MISC_KERNEL)) {
				list_remove_active_sym(syme);
				continue;
			}
503
			syme->weight = sym_weight(syme);
504
			rb_insert_active_sym(&tmp, syme);
505
			sum_ksamples += syme->snap_count;
506

507
			for (j = 0; j < evsel_list->nr_entries; j++)
508 509
				syme->count[j] = zero ? 0 : syme->count[j] * 7 / 8;
		} else
510
			list_remove_active_sym(syme);
511 512
	}

513
	puts(CONSOLE_CLEAR);
514

515
	printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539
	if (!perf_guest) {
		printf("   PerfTop:%8.0f irqs/sec  kernel:%4.1f%%"
			"  exact: %4.1f%% [",
			samples_per_sec,
			100.0 - (100.0 * ((samples_per_sec - ksamples_per_sec) /
					 samples_per_sec)),
			esamples_percent);
	} else {
		printf("   PerfTop:%8.0f irqs/sec  kernel:%4.1f%% us:%4.1f%%"
			" guest kernel:%4.1f%% guest us:%4.1f%%"
			" exact: %4.1f%% [",
			samples_per_sec,
			100.0 - (100.0 * ((samples_per_sec-ksamples_per_sec) /
					  samples_per_sec)),
			100.0 - (100.0 * ((samples_per_sec-us_samples_per_sec) /
					  samples_per_sec)),
			100.0 - (100.0 * ((samples_per_sec -
						guest_kernel_samples_per_sec) /
					  samples_per_sec)),
			100.0 - (100.0 * ((samples_per_sec -
					   guest_us_samples_per_sec) /
					  samples_per_sec)),
			esamples_percent);
	}
540

541
	if (evsel_list->nr_entries == 1 || !display_weighted) {
542
		struct perf_evsel *first;
543
		first = list_entry(evsel_list->entries.next, struct perf_evsel, node);
544
		printf("%" PRIu64, (uint64_t)first->attr.sample_period);
I
Ingo Molnar 已提交
545 546 547 548 549
		if (freq)
			printf("Hz ");
		else
			printf(" ");
	}
550

551
	if (!display_weighted)
552
		printf("%s", event_name(sym_evsel));
553
	else list_for_each_entry(counter, &evsel_list->entries, node) {
554
		if (counter->idx)
555 556 557 558 559 560 561
			printf("/");

		printf("%s", event_name(counter));
	}

	printf( "], ");

562 563
	if (target_pid != -1)
		printf(" (target_pid: %d", target_pid);
564 565
	else if (target_tid != -1)
		printf(" (target_tid: %d", target_tid);
566 567 568
	else
		printf(" (all");

569
	if (cpu_list)
570
		printf(", CPU%s: %s)\n", cpus->nr > 1 ? "s" : "", cpu_list);
571
	else {
572
		if (target_tid != -1)
573 574
			printf(")\n");
		else
575
			printf(", %d CPU%s)\n", cpus->nr, cpus->nr > 1 ? "s" : "");
576 577
	}

578
	printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
579

580 581 582 583 584
	if (sym_filter_entry) {
		show_details(sym_filter_entry);
		return;
	}

585 586 587 588 589 590 591 592 593
	/*
	 * Find the longest symbol name that will be displayed
	 */
	for (nd = rb_first(&tmp); nd; nd = rb_next(nd)) {
		syme = rb_entry(nd, struct sym_entry, rb_node);
		if (++printed > print_entries ||
		    (int)syme->snap_count < count_filter)
			continue;

594 595 596
		if (syme->map->dso->long_name_len > dso_width)
			dso_width = syme->map->dso->long_name_len;

597 598 599
		if (syme->map->dso->short_name_len > dso_short_width)
			dso_short_width = syme->map->dso->short_name_len;

600 601 602 603 604 605
		if (syme->name_len > sym_width)
			sym_width = syme->name_len;
	}

	printed = 0;

606 607 608 609 610
	if (sym_width + dso_width > winsize.ws_col - 29) {
		dso_width = dso_short_width;
		if (sym_width + dso_width > winsize.ws_col - 29)
			sym_width = winsize.ws_col - dso_width - 29;
	}
611
	putchar('\n');
612
	if (evsel_list->nr_entries == 1)
613
		printf("             samples  pcnt");
614
	else
615
		printf("   weight    samples  pcnt");
616

617 618
	if (verbose)
		printf("         RIP       ");
619
	printf(" %-*.*s DSO\n", sym_width, sym_width, "function");
620
	printf("   %s    _______ _____",
621
	       evsel_list->nr_entries == 1 ? "      " : "______");
622
	if (verbose)
623
		printf(" ________________");
624
	printf(" %-*.*s", sym_width, sym_width, graph_line);
625
	printf(" %-*.*s", dso_width, dso_width, graph_line);
626
	puts("\n");
627

628
	for (nd = rb_first(&tmp); nd; nd = rb_next(nd)) {
629
		struct symbol *sym;
630
		double pcnt;
631

632
		syme = rb_entry(nd, struct sym_entry, rb_node);
633
		sym = sym_entry__symbol(syme);
634
		if (++printed > print_entries || (int)syme->snap_count < count_filter)
635
			continue;
636

637 638
		pcnt = 100.0 - (100.0 * ((sum_ksamples - syme->snap_count) /
					 sum_ksamples));
639

640
		if (evsel_list->nr_entries == 1 || !display_weighted)
641
			printf("%20.2f ", syme->weight);
642
		else
643
			printf("%9.1f %10ld ", syme->weight, syme->snap_count);
644

645
		percent_color_fprintf(stdout, "%4.1f%%", pcnt);
646
		if (verbose)
647
			printf(" %016" PRIx64, sym->start);
648
		printf(" %-*.*s", sym_width, sym_width, sym->name);
649 650 651 652
		printf(" %-*.*s\n", dso_width, dso_width,
		       dso_width >= syme->map->dso->long_name_len ?
					syme->map->dso->long_name :
					syme->map->dso->short_name);
653 654 655
	}
}

656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698
static void prompt_integer(int *target, const char *msg)
{
	char *buf = malloc(0), *p;
	size_t dummy = 0;
	int tmp;

	fprintf(stdout, "\n%s: ", msg);
	if (getline(&buf, &dummy, stdin) < 0)
		return;

	p = strchr(buf, '\n');
	if (p)
		*p = 0;

	p = buf;
	while(*p) {
		if (!isdigit(*p))
			goto out_free;
		p++;
	}
	tmp = strtoul(buf, NULL, 10);
	*target = tmp;
out_free:
	free(buf);
}

static void prompt_percent(int *target, const char *msg)
{
	int tmp = 0;

	prompt_integer(&tmp, msg);
	if (tmp >= 0 && tmp <= 100)
		*target = tmp;
}

static void prompt_symbol(struct sym_entry **target, const char *msg)
{
	char *buf = malloc(0), *p;
	struct sym_entry *syme = *target, *n, *found = NULL;
	size_t dummy = 0;

	/* zero counters of active symbol */
	if (syme) {
699
		pthread_mutex_lock(&syme->src->lock);
700 701
		__zero_source_counters(syme);
		*target = NULL;
702
		pthread_mutex_unlock(&syme->src->lock);
703 704 705 706 707 708 709 710 711 712 713 714 715 716 717
	}

	fprintf(stdout, "\n%s: ", msg);
	if (getline(&buf, &dummy, stdin) < 0)
		goto out_free;

	p = strchr(buf, '\n');
	if (p)
		*p = 0;

	pthread_mutex_lock(&active_symbols_lock);
	syme = list_entry(active_symbols.next, struct sym_entry, node);
	pthread_mutex_unlock(&active_symbols_lock);

	list_for_each_entry_safe_from(syme, n, &active_symbols, node) {
718
		struct symbol *sym = sym_entry__symbol(syme);
719 720 721 722 723 724 725 726

		if (!strcmp(buf, sym->name)) {
			found = syme;
			break;
		}
	}

	if (!found) {
727
		fprintf(stderr, "Sorry, %s is not active.\n", buf);
728 729 730 731 732 733 734 735 736
		sleep(1);
		return;
	} else
		parse_source(found);

out_free:
	free(buf);
}

737
static void print_mapped_keys(void)
738
{
739 740 741
	char *name = NULL;

	if (sym_filter_entry) {
742
		struct symbol *sym = sym_entry__symbol(sym_filter_entry);
743 744 745 746 747 748 749
		name = sym->name;
	}

	fprintf(stdout, "\nMapped keys:\n");
	fprintf(stdout, "\t[d]     display refresh delay.             \t(%d)\n", delay_secs);
	fprintf(stdout, "\t[e]     display entries (lines).           \t(%d)\n", print_entries);

750
	if (evsel_list->nr_entries > 1)
751
		fprintf(stdout, "\t[E]     active event counter.              \t(%s)\n", event_name(sym_evsel));
752 753 754

	fprintf(stdout, "\t[f]     profile display filter (count).    \t(%d)\n", count_filter);

755 756 757
	fprintf(stdout, "\t[F]     annotate display filter (percent). \t(%d%%)\n", sym_pcnt_filter);
	fprintf(stdout, "\t[s]     annotate symbol.                   \t(%s)\n", name?: "NULL");
	fprintf(stdout, "\t[S]     stop annotation.\n");
758

759
	if (evsel_list->nr_entries > 1)
760 761
		fprintf(stdout, "\t[w]     toggle display weighted/count[E]r. \t(%d)\n", display_weighted ? 1 : 0);

762
	fprintf(stdout,
763
		"\t[K]     hide kernel_symbols symbols.     \t(%s)\n",
764 765 766 767
		hide_kernel_symbols ? "yes" : "no");
	fprintf(stdout,
		"\t[U]     hide user symbols.               \t(%s)\n",
		hide_user_symbols ? "yes" : "no");
768
	fprintf(stdout, "\t[z]     toggle sample zeroing.             \t(%d)\n", zero ? 1 : 0);
769 770 771 772 773 774 775 776 777 778 779 780
	fprintf(stdout, "\t[qQ]    quit.\n");
}

static int key_mapped(int c)
{
	switch (c) {
		case 'd':
		case 'e':
		case 'f':
		case 'z':
		case 'q':
		case 'Q':
781 782
		case 'K':
		case 'U':
783 784 785
		case 'F':
		case 's':
		case 'S':
786 787 788
			return 1;
		case 'E':
		case 'w':
789
			return evsel_list->nr_entries > 1 ? 1 : 0;
790 791
		default:
			break;
792 793 794
	}

	return 0;
795 796
}

797
static void handle_keypress(struct perf_session *session, int c)
798
{
799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821
	if (!key_mapped(c)) {
		struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
		struct termios tc, save;

		print_mapped_keys();
		fprintf(stdout, "\nEnter selection, or unmapped key to continue: ");
		fflush(stdout);

		tcgetattr(0, &save);
		tc = save;
		tc.c_lflag &= ~(ICANON | ECHO);
		tc.c_cc[VMIN] = 0;
		tc.c_cc[VTIME] = 0;
		tcsetattr(0, TCSANOW, &tc);

		poll(&stdin_poll, 1, -1);
		c = getc(stdin);

		tcsetattr(0, TCSAFLUSH, &save);
		if (!key_mapped(c))
			return;
	}

822 823 824
	switch (c) {
		case 'd':
			prompt_integer(&delay_secs, "Enter display delay");
825 826
			if (delay_secs < 1)
				delay_secs = 1;
827 828 829
			break;
		case 'e':
			prompt_integer(&print_entries, "Enter display entries (lines)");
830
			if (print_entries == 0) {
831
				sig_winch_handler(SIGWINCH);
832 833 834
				signal(SIGWINCH, sig_winch_handler);
			} else
				signal(SIGWINCH, SIG_DFL);
835 836
			break;
		case 'E':
837
			if (evsel_list->nr_entries > 1) {
838
				fprintf(stderr, "\nAvailable events:");
839

840
				list_for_each_entry(sym_evsel, &evsel_list->entries, node)
841
					fprintf(stderr, "\n\t%d %s", sym_evsel->idx, event_name(sym_evsel));
842 843 844

				prompt_integer(&sym_counter, "Enter details event counter");

845 846
				if (sym_counter >= evsel_list->nr_entries) {
					sym_evsel = list_entry(evsel_list->entries.next, struct perf_evsel, node);
847
					sym_counter = 0;
848
					fprintf(stderr, "Sorry, no such event, using %s.\n", event_name(sym_evsel));
849
					sleep(1);
850
					break;
851
				}
852
				list_for_each_entry(sym_evsel, &evsel_list->entries, node)
853 854
					if (sym_evsel->idx == sym_counter)
						break;
855 856 857 858 859 860 861 862
			} else sym_counter = 0;
			break;
		case 'f':
			prompt_integer(&count_filter, "Enter display event count filter");
			break;
		case 'F':
			prompt_percent(&sym_pcnt_filter, "Enter details display event filter (percent)");
			break;
863 864 865
		case 'K':
			hide_kernel_symbols = !hide_kernel_symbols;
			break;
866 867 868
		case 'q':
		case 'Q':
			printf("exiting.\n");
869
			if (dump_symtab)
870
				perf_session__fprintf_dsos(session, stderr);
871 872 873 874 875 876 877 878 879 880
			exit(0);
		case 's':
			prompt_symbol(&sym_filter_entry, "Enter details symbol");
			break;
		case 'S':
			if (!sym_filter_entry)
				break;
			else {
				struct sym_entry *syme = sym_filter_entry;

881
				pthread_mutex_lock(&syme->src->lock);
882 883
				sym_filter_entry = NULL;
				__zero_source_counters(syme);
884
				pthread_mutex_unlock(&syme->src->lock);
885 886
			}
			break;
887 888 889
		case 'U':
			hide_user_symbols = !hide_user_symbols;
			break;
890 891 892
		case 'w':
			display_weighted = ~display_weighted;
			break;
893
		case 'z':
894
			zero = !zero;
895
			break;
896 897
		default:
			break;
898 899 900
	}
}

901
static void *display_thread(void *arg __used)
902
{
903
	struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
904 905
	struct termios tc, save;
	int delay_msecs, c;
906
	struct perf_session *session = (struct perf_session *) arg;
907 908 909 910 911 912

	tcgetattr(0, &save);
	tc = save;
	tc.c_lflag &= ~(ICANON | ECHO);
	tc.c_cc[VMIN] = 0;
	tc.c_cc[VTIME] = 0;
913

914 915 916 917 918
repeat:
	delay_msecs = delay_secs * 1000;
	tcsetattr(0, TCSANOW, &tc);
	/* trash return*/
	getc(stdin);
919

920
	do {
921
		print_sym_table();
922 923
	} while (!poll(&stdin_poll, 1, delay_msecs) == 1);

924 925 926
	c = getc(stdin);
	tcsetattr(0, TCSAFLUSH, &save);

927
	handle_keypress(session, c);
928
	goto repeat;
929 930 931 932

	return NULL;
}

933
/* Tag samples to be skipped. */
934
static const char *skip_symbols[] = {
935
	"default_idle",
936
	"native_safe_halt",
937 938 939 940
	"cpu_idle",
	"enter_idle",
	"exit_idle",
	"mwait_idle",
941
	"mwait_idle_with_hints",
942
	"poll_idle",
943 944
	"ppc64_runlatch_off",
	"pseries_dedicated_idle_sleep",
945 946 947
	NULL
};

948
static int symbol_filter(struct map *map, struct symbol *sym)
949
{
950 951
	struct sym_entry *syme;
	const char *name = sym->name;
952
	int i;
953

954 955 956 957 958 959 960
	/*
	 * ppc64 uses function descriptors and appends a '.' to the
	 * start of every instruction address. Remove it.
	 */
	if (name[0] == '.')
		name++;

961 962 963 964 965 966 967
	if (!strcmp(name, "_text") ||
	    !strcmp(name, "_etext") ||
	    !strcmp(name, "_sinittext") ||
	    !strncmp("init_module", name, 11) ||
	    !strncmp("cleanup_module", name, 14) ||
	    strstr(name, "_text_start") ||
	    strstr(name, "_text_end"))
968 969
		return 1;

970
	syme = symbol__priv(sym);
971
	syme->map = map;
972
	syme->src = NULL;
973 974 975 976 977 978

	if (!sym_filter_entry && sym_filter && !strcmp(name, sym_filter)) {
		/* schedule initial sym_filter_entry setup */
		sym_filter_entry_sched = syme;
		sym_filter = NULL;
	}
979

980 981 982 983 984 985
	for (i = 0; skip_symbols[i]; i++) {
		if (!strcmp(skip_symbols[i], name)) {
			syme->skip = 1;
			break;
		}
	}
986

987 988 989
	if (!syme->skip)
		syme->name_len = strlen(sym->name);

990 991 992
	return 0;
}

993
static void event__process_sample(const event_t *self,
994
				  struct sample_data *sample,
995
				  struct perf_session *session)
996
{
997 998
	u64 ip = self->ip.ip;
	struct sym_entry *syme;
999
	struct addr_location al;
1000
	struct machine *machine;
1001
	u8 origin = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1002

1003 1004
	++samples;

1005
	switch (origin) {
1006
	case PERF_RECORD_MISC_USER:
1007
		++us_samples;
1008 1009
		if (hide_user_symbols)
			return;
1010
		machine = perf_session__find_host_machine(session);
1011
		break;
1012
	case PERF_RECORD_MISC_KERNEL:
1013
		++kernel_samples;
1014 1015
		if (hide_kernel_symbols)
			return;
1016
		machine = perf_session__find_host_machine(session);
1017 1018 1019
		break;
	case PERF_RECORD_MISC_GUEST_KERNEL:
		++guest_kernel_samples;
1020
		machine = perf_session__find_machine(session, self->ip.pid);
1021
		break;
1022 1023 1024 1025 1026 1027 1028
	case PERF_RECORD_MISC_GUEST_USER:
		++guest_us_samples;
		/*
		 * TODO: we don't process guest user from host side
		 * except simple counting.
		 */
		return;
1029 1030 1031 1032
	default:
		return;
	}

1033
	if (!machine && perf_guest) {
1034 1035 1036 1037 1038
		pr_err("Can't find guest [%d]'s kernel information\n",
			self->ip.pid);
		return;
	}

P
Peter Zijlstra 已提交
1039
	if (self->header.misc & PERF_RECORD_MISC_EXACT_IP)
1040 1041
		exact_samples++;

1042
	if (event__preprocess_sample(self, session, &al, sample,
1043
				     symbol_filter) < 0 ||
1044
	    al.filtered)
1045
		return;
1046

1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058
	if (al.sym == NULL) {
		/*
		 * As we do lazy loading of symtabs we only will know if the
		 * specified vmlinux file is invalid when we actually have a
		 * hit in kernel space and then try to load it. So if we get
		 * here and there are _no_ symbols in the DSO backing the
		 * kernel map, bail out.
		 *
		 * We may never get here, for instance, if we use -K/
		 * --hide-kernel-symbols, even if the user specifies an
		 * invalid --vmlinux ;-)
		 */
1059
		if (al.map == machine->vmlinux_maps[MAP__FUNCTION] &&
1060 1061 1062 1063 1064 1065 1066 1067 1068
		    RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION])) {
			pr_err("The %s file can't be used\n",
			       symbol_conf.vmlinux_name);
			exit(1);
		}

		return;
	}

1069 1070 1071 1072
	/* let's see, whether we need to install initial sym_filter_entry */
	if (sym_filter_entry_sched) {
		sym_filter_entry = sym_filter_entry_sched;
		sym_filter_entry_sched = NULL;
1073 1074 1075 1076 1077 1078
		if (parse_source(sym_filter_entry) < 0) {
			struct symbol *sym = sym_entry__symbol(sym_filter_entry);

			pr_err("Can't annotate %s", sym->name);
			if (sym_filter_entry->map->dso->origin == DSO__ORIG_KERNEL) {
				pr_err(": No vmlinux file was found in the path:\n");
1079
				machine__fprintf_vmlinux_path(machine, stderr);
1080 1081 1082 1083
			} else
				pr_err(".\n");
			exit(1);
		}
1084 1085
	}

1086
	syme = symbol__priv(al.sym);
1087
	if (!syme->skip) {
1088 1089
		struct perf_evsel *evsel;

1090
		syme->origin = origin;
1091 1092 1093
		evsel = perf_evlist__id2evsel(evsel_list, sample->id);
		assert(evsel != NULL);
		syme->count[evsel->idx]++;
1094
		record_precise_ip(syme, evsel->idx, ip);
1095 1096 1097 1098 1099
		pthread_mutex_lock(&active_symbols_lock);
		if (list_empty(&syme->node) || !syme->node.next)
			__list_insert_active_sym(syme);
		pthread_mutex_unlock(&active_symbols_lock);
	}
1100 1101
}

1102
static void perf_session__mmap_read_cpu(struct perf_session *self, int cpu)
1103
{
1104
	struct sample_data sample;
1105
	event_t *event;
1106

1107
	while ((event = perf_evlist__read_on_cpu(evsel_list, cpu)) != NULL) {
1108
		event__parse_sample(event, self, &sample);
1109

1110
		if (event->header.type == PERF_RECORD_SAMPLE)
1111
			event__process_sample(event, &sample, self);
1112
		else
1113
			event__process(event, &sample, self);
1114 1115 1116
	}
}

1117
static void perf_session__mmap_read(struct perf_session *self)
1118
{
1119 1120 1121 1122
	int i;

	for (i = 0; i < cpus->nr; i++)
		perf_session__mmap_read_cpu(self, i);
1123 1124
}

1125 1126 1127
static void start_counters(struct perf_evlist *evlist)
{
	struct perf_evsel *counter;
1128

1129 1130
	list_for_each_entry(counter, &evlist->entries, node) {
		struct perf_event_attr *attr = &counter->attr;
1131

1132 1133 1134 1135 1136 1137 1138
		attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID;

		if (freq) {
			attr->sample_type |= PERF_SAMPLE_PERIOD;
			attr->freq	  = 1;
			attr->sample_freq = freq;
		}
1139

1140 1141 1142 1143 1144
		if (evlist->nr_entries > 1) {
			attr->sample_type |= PERF_SAMPLE_ID;
			attr->read_format |= PERF_FORMAT_ID;
		}

1145 1146 1147
		attr->mmap = 1;
try_again:
		if (perf_evsel__open(counter, cpus, threads, group, inherit) < 0) {
1148 1149 1150
			int err = errno;

			if (err == EPERM || err == EACCES)
1151 1152 1153
				die("Permission error - are you root?\n"
					"\t Consider tweaking"
					" /proc/sys/kernel/perf_event_paranoid.\n");
1154 1155 1156 1157 1158
			/*
			 * If it's cycles then fall back to hrtimer
			 * based cpu-clock-tick sw counter, which
			 * is always available even if no PMU support:
			 */
1159 1160
			if (attr->type == PERF_TYPE_HARDWARE &&
			    attr->config == PERF_COUNT_HW_CPU_CYCLES) {
1161 1162 1163 1164 1165 1166 1167 1168 1169

				if (verbose)
					warning(" ... trying to fall back to cpu-clock-ticks\n");

				attr->type = PERF_TYPE_SOFTWARE;
				attr->config = PERF_COUNT_SW_CPU_CLOCK;
				goto try_again;
			}
			printf("\n");
1170 1171 1172
			error("sys_perf_event_open() syscall returned with %d "
			      "(%s).  /bin/dmesg may provide additional information.\n",
			      err, strerror(err));
1173 1174 1175
			die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
			exit(-1);
		}
1176
	}
1177 1178 1179

	if (perf_evlist__mmap(evlist, cpus, threads, mmap_pages, true) < 0)
		die("failed to mmap with %d (%s)\n", errno, strerror(errno));
1180 1181 1182 1183 1184
}

static int __cmd_top(void)
{
	pthread_t thread;
1185
	struct perf_evsel *first;
1186
	int ret;
1187
	/*
1188 1189
	 * FIXME: perf_session__new should allow passing a O_MMAP, so that all this
	 * mmap reading, etc is encapsulated in it. Use O_WRONLY for now.
1190
	 */
1191
	struct perf_session *session = perf_session__new(NULL, O_WRONLY, false, false, NULL);
1192 1193
	if (session == NULL)
		return -ENOMEM;
1194

1195 1196
	if (target_tid != -1)
		event__synthesize_thread(target_tid, event__process, session);
1197
	else
1198
		event__synthesize_threads(event__process, session);
1199

1200
	start_counters(evsel_list);
1201 1202
	first = list_entry(evsel_list->entries.next, struct perf_evsel, node);
	perf_session__set_sample_type(session, first->attr.sample_type);
1203

1204
	/* Wait for a minimal set of events before starting the snapshot */
1205
	poll(evsel_list->pollfd, evsel_list->nr_fds, 100);
1206

1207
	perf_session__mmap_read(session);
1208

1209
	if (pthread_create(&thread, NULL, display_thread, session)) {
1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224
		printf("Could not create display thread.\n");
		exit(-1);
	}

	if (realtime_prio) {
		struct sched_param param;

		param.sched_priority = realtime_prio;
		if (sched_setscheduler(0, SCHED_FIFO, &param)) {
			printf("Could not set realtime priority.\n");
			exit(-1);
		}
	}

	while (1) {
1225
		int hits = samples;
1226

1227
		perf_session__mmap_read(session);
1228

1229
		if (hits == samples)
1230
			ret = poll(evsel_list->pollfd, evsel_list->nr_fds, 100);
1231 1232 1233 1234
	}

	return 0;
}
1235 1236 1237 1238 1239 1240 1241

static const char * const top_usage[] = {
	"perf top [<options>]",
	NULL
};

static const struct option options[] = {
1242
	OPT_CALLBACK('e', "event", &evsel_list, "event",
1243 1244
		     "event selector. use 'perf list' to list available events",
		     parse_events),
1245 1246 1247
	OPT_INTEGER('c', "count", &default_interval,
		    "event period to sample"),
	OPT_INTEGER('p', "pid", &target_pid,
1248 1249 1250
		    "profile events on existing process id"),
	OPT_INTEGER('t', "tid", &target_tid,
		    "profile events on existing thread id"),
1251 1252
	OPT_BOOLEAN('a', "all-cpus", &system_wide,
			    "system-wide collection from all CPUs"),
1253 1254
	OPT_STRING('C', "cpu", &cpu_list, "cpu",
		    "list of cpus to monitor"),
1255 1256
	OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
		   "file", "vmlinux pathname"),
1257 1258
	OPT_BOOLEAN('K', "hide_kernel_symbols", &hide_kernel_symbols,
		    "hide kernel symbols"),
1259
	OPT_UINTEGER('m', "mmap-pages", &mmap_pages, "number of mmap data pages"),
1260 1261
	OPT_INTEGER('r', "realtime", &realtime_prio,
		    "collect data with this RT SCHED_FIFO priority"),
M
Mike Galbraith 已提交
1262
	OPT_INTEGER('d', "delay", &delay_secs,
1263 1264 1265
		    "number of seconds to delay between refreshes"),
	OPT_BOOLEAN('D', "dump-symtab", &dump_symtab,
			    "dump the symbol table used for profiling"),
1266
	OPT_INTEGER('f', "count-filter", &count_filter,
1267 1268 1269
		    "only display functions with more events than this"),
	OPT_BOOLEAN('g', "group", &group,
			    "put the counters into a counter group"),
1270 1271
	OPT_BOOLEAN('i', "inherit", &inherit,
		    "child tasks inherit counters"),
1272
	OPT_STRING('s', "sym-annotate", &sym_filter, "symbol name",
1273
		    "symbol to annotate"),
A
Anton Blanchard 已提交
1274
	OPT_BOOLEAN('z', "zero", &zero,
1275
		    "zero history across updates"),
1276
	OPT_INTEGER('F', "freq", &freq,
1277
		    "profile at this frequency"),
1278 1279
	OPT_INTEGER('E', "entries", &print_entries,
		    "display this many functions"),
1280 1281
	OPT_BOOLEAN('U', "hide_user_symbols", &hide_user_symbols,
		    "hide user symbols"),
1282
	OPT_INCR('v', "verbose", &verbose,
1283
		    "be more verbose (show counter open errors, etc)"),
1284 1285 1286
	OPT_END()
};

1287
int cmd_top(int argc, const char **argv, const char *prefix __used)
1288
{
1289 1290
	struct perf_evsel *pos;
	int status = -ENOMEM;
1291

1292 1293 1294 1295
	evsel_list = perf_evlist__new();
	if (evsel_list == NULL)
		return -ENOMEM;

1296 1297 1298 1299 1300 1301
	page_size = sysconf(_SC_PAGE_SIZE);

	argc = parse_options(argc, argv, options, top_usage, 0);
	if (argc)
		usage_with_options(top_usage, options);

1302
	if (target_pid != -1)
1303 1304
		target_tid = target_pid;

1305 1306 1307 1308
	threads = thread_map__new(target_pid, target_tid);
	if (threads == NULL) {
		pr_err("Problems finding threads of monitor\n");
		usage_with_options(top_usage, options);
1309 1310
	}

1311
	/* CPU and PID are mutually exclusive */
1312
	if (target_tid > 0 && cpu_list) {
1313 1314
		printf("WARNING: PID switch overriding CPU\n");
		sleep(1);
1315
		cpu_list = NULL;
1316 1317
	}

1318 1319
	if (!evsel_list->nr_entries &&
	    perf_evlist__add_default(evsel_list) < 0) {
1320 1321 1322
		pr_err("Not enough memory for event selector list\n");
		return -ENOMEM;
	}
1323

1324 1325 1326
	if (delay_secs < 1)
		delay_secs = 1;

1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338
	/*
	 * User specified count overrides default frequency.
	 */
	if (default_interval)
		freq = 0;
	else if (freq) {
		default_interval = freq;
	} else {
		fprintf(stderr, "frequency and count are zero, aborting\n");
		exit(EXIT_FAILURE);
	}

1339
	if (target_tid != -1)
1340
		cpus = cpu_map__dummy_new();
1341
	else
1342
		cpus = cpu_map__new(cpu_list);
1343

1344
	if (cpus == NULL)
1345
		usage_with_options(top_usage, options);
1346

1347
	list_for_each_entry(pos, &evsel_list->entries, node) {
1348
		if (perf_evsel__alloc_fd(pos, cpus->nr, threads->nr) < 0)
1349 1350 1351 1352 1353 1354 1355 1356 1357 1358
			goto out_free_fd;
		/*
		 * Fill in the ones not specifically initialized via -c:
		 */
		if (pos->attr.sample_period)
			continue;

		pos->attr.sample_period = default_interval;
	}

1359 1360
	if (perf_evlist__alloc_pollfd(evsel_list, cpus->nr, threads->nr) < 0 ||
	    perf_evlist__alloc_mmap(evsel_list, cpus->nr) < 0)
1361 1362
		goto out_free_fd;

1363
	sym_evsel = list_entry(evsel_list->entries.next, struct perf_evsel, node);
1364

1365
	symbol_conf.priv_size = (sizeof(struct sym_entry) +
1366
				 (evsel_list->nr_entries + 1) * sizeof(unsigned long));
1367 1368 1369 1370 1371

	symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
	if (symbol__init() < 0)
		return -1;

1372
	get_term_dimensions(&winsize);
1373
	if (print_entries == 0) {
1374
		update_print_entries(&winsize);
1375 1376 1377
		signal(SIGWINCH, sig_winch_handler);
	}

1378 1379
	status = __cmd_top();
out_free_fd:
1380
	perf_evlist__delete(evsel_list);
1381 1382

	return status;
1383
}