builtin-top.c 27.3 KB
Newer Older
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
 * builtin-top.c
 *
 * Builtin top command: Display a continuously updated profile of
 * any workload, CPU or specific PID.
 *
 * Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
 *
 * Improvements and fixes by:
 *
 *   Arjan van de Ven <arjan@linux.intel.com>
 *   Yanmin Zhang <yanmin.zhang@intel.com>
 *   Wu Fengguang <fengguang.wu@intel.com>
 *   Mike Galbraith <efault@gmx.de>
 *   Paul Mackerras <paulus@samba.org>
 *
 * Released under the GPL v2. (and only v2, not any later version)
18
 */
19
#include "builtin.h"
20

21
#include "perf.h"
22

23
#include "util/symbol.h"
24
#include "util/color.h"
25
#include "util/thread.h"
26
#include "util/util.h"
27
#include <linux/rbtree.h>
28 29
#include "util/parse-options.h"
#include "util/parse-events.h"
30

31 32
#include "util/debug.h"

33 34
#include <assert.h>
#include <fcntl.h>
35

36
#include <stdio.h>
37 38
#include <termios.h>
#include <unistd.h>
39

40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
#include <errno.h>
#include <time.h>
#include <sched.h>
#include <pthread.h>

#include <sys/syscall.h>
#include <sys/ioctl.h>
#include <sys/poll.h>
#include <sys/prctl.h>
#include <sys/wait.h>
#include <sys/uio.h>
#include <sys/mman.h>

#include <linux/unistd.h>
#include <linux/types.h>

56
static int			fd[MAX_NR_CPUS][MAX_COUNTERS];
57

58
static int			system_wide			=      0;
59

60
static int			default_interval		=      0;
61

62 63
static int			count_filter			=      5;
static int			print_entries			=     15;
64

65 66 67 68 69 70
static int			target_pid			=     -1;
static int			inherit				=      0;
static int			profile_cpu			=     -1;
static int			nr_cpus				=      0;
static unsigned int		realtime_prio			=      0;
static int			group				=      0;
71
static unsigned int		page_size;
72 73
static unsigned int		mmap_pages			=     16;
static int			freq				=   1000; /* 1 KHz */
74

75 76 77
static int			delay_secs			=      2;
static int			zero                            =      0;
static int			dump_symtab                     =      0;
78

79 80 81 82 83 84 85 86 87 88 89
/*
 * Source
 */

struct source_line {
	u64			eip;
	unsigned long		count[MAX_COUNTERS];
	char			*line;
	struct source_line	*next;
};

90 91 92 93 94
static char			*sym_filter			=   NULL;
struct sym_entry		*sym_filter_entry		=   NULL;
static int			sym_pcnt_filter			=      5;
static int			sym_counter			=      0;
static int			display_weighted		=     -1;
95

96 97 98 99 100
/*
 * Symbols
 */

struct sym_entry {
101 102
	struct rb_node		rb_node;
	struct list_head	node;
103
	unsigned long		count[MAX_COUNTERS];
104 105
	unsigned long		snap_count;
	double			weight;
106
	int			skip;
107
	struct map		*map;
108 109 110 111
	struct source_line	*source;
	struct source_line	*lines;
	struct source_line	**lines_tail;
	pthread_mutex_t		source_lock;
112 113
};

114 115 116 117 118 119 120
/*
 * Source functions
 */

static void parse_source(struct sym_entry *syme)
{
	struct symbol *sym;
121
	struct map *map;
122
	FILE *file;
123
	char command[PATH_MAX*2];
124 125
	const char *path;
	u64 len;
126 127 128 129 130 131 132 133 134 135

	if (!syme)
		return;

	if (syme->lines) {
		pthread_mutex_lock(&syme->source_lock);
		goto out_assign;
	}

	sym = (struct symbol *)(syme + 1);
136 137
	map = syme->map;
	path = map->dso->long_name;
138 139 140

	len = sym->end - sym->start;

141 142 143
	sprintf(command,
		"objdump --start-address=0x%016Lx "
			 "--stop-address=0x%016Lx -dS %s",
144 145
		map->unmap_ip(map, sym->start),
		map->unmap_ip(map, sym->end), path);
146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176

	file = popen(command, "r");
	if (!file)
		return;

	pthread_mutex_lock(&syme->source_lock);
	syme->lines_tail = &syme->lines;
	while (!feof(file)) {
		struct source_line *src;
		size_t dummy = 0;
		char *c;

		src = malloc(sizeof(struct source_line));
		assert(src != NULL);
		memset(src, 0, sizeof(struct source_line));

		if (getline(&src->line, &dummy, file) < 0)
			break;
		if (!src->line)
			break;

		c = strchr(src->line, '\n');
		if (c)
			*c = 0;

		src->next = NULL;
		*syme->lines_tail = src;
		syme->lines_tail = &src->next;

		if (strlen(src->line)>8 && src->line[8] == ':') {
			src->eip = strtoull(src->line, NULL, 16);
177
			src->eip = map->unmap_ip(map, src->eip);
178 179 180
		}
		if (strlen(src->line)>8 && src->line[16] == ':') {
			src->eip = strtoull(src->line, NULL, 16);
181
			src->eip = map->unmap_ip(map, src->eip);
182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318
		}
	}
	pclose(file);
out_assign:
	sym_filter_entry = syme;
	pthread_mutex_unlock(&syme->source_lock);
}

static void __zero_source_counters(struct sym_entry *syme)
{
	int i;
	struct source_line *line;

	line = syme->lines;
	while (line) {
		for (i = 0; i < nr_counters; i++)
			line->count[i] = 0;
		line = line->next;
	}
}

static void record_precise_ip(struct sym_entry *syme, int counter, u64 ip)
{
	struct source_line *line;

	if (syme != sym_filter_entry)
		return;

	if (pthread_mutex_trylock(&syme->source_lock))
		return;

	if (!syme->source)
		goto out_unlock;

	for (line = syme->lines; line; line = line->next) {
		if (line->eip == ip) {
			line->count[counter]++;
			break;
		}
		if (line->eip > ip)
			break;
	}
out_unlock:
	pthread_mutex_unlock(&syme->source_lock);
}

static void lookup_sym_source(struct sym_entry *syme)
{
	struct symbol *symbol = (struct symbol *)(syme + 1);
	struct source_line *line;
	char pattern[PATH_MAX];

	sprintf(pattern, "<%s>:", symbol->name);

	pthread_mutex_lock(&syme->source_lock);
	for (line = syme->lines; line; line = line->next) {
		if (strstr(line->line, pattern)) {
			syme->source = line;
			break;
		}
	}
	pthread_mutex_unlock(&syme->source_lock);
}

static void show_lines(struct source_line *queue, int count, int total)
{
	int i;
	struct source_line *line;

	line = queue;
	for (i = 0; i < count; i++) {
		float pcnt = 100.0*(float)line->count[sym_counter]/(float)total;

		printf("%8li %4.1f%%\t%s\n", line->count[sym_counter], pcnt, line->line);
		line = line->next;
	}
}

#define TRACE_COUNT     3

static void show_details(struct sym_entry *syme)
{
	struct symbol *symbol;
	struct source_line *line;
	struct source_line *line_queue = NULL;
	int displayed = 0;
	int line_queue_count = 0, total = 0, more = 0;

	if (!syme)
		return;

	if (!syme->source)
		lookup_sym_source(syme);

	if (!syme->source)
		return;

	symbol = (struct symbol *)(syme + 1);
	printf("Showing %s for %s\n", event_name(sym_counter), symbol->name);
	printf("  Events  Pcnt (>=%d%%)\n", sym_pcnt_filter);

	pthread_mutex_lock(&syme->source_lock);
	line = syme->source;
	while (line) {
		total += line->count[sym_counter];
		line = line->next;
	}

	line = syme->source;
	while (line) {
		float pcnt = 0.0;

		if (!line_queue_count)
			line_queue = line;
		line_queue_count++;

		if (line->count[sym_counter])
			pcnt = 100.0 * line->count[sym_counter] / (float)total;
		if (pcnt >= (float)sym_pcnt_filter) {
			if (displayed <= print_entries)
				show_lines(line_queue, line_queue_count, total);
			else more++;
			displayed += line_queue_count;
			line_queue_count = 0;
			line_queue = NULL;
		} else if (line_queue_count > TRACE_COUNT) {
			line_queue = line_queue->next;
			line_queue_count--;
		}

		line->count[sym_counter] = zero ? 0 : line->count[sym_counter] * 7 / 8;
		line = line->next;
	}
	pthread_mutex_unlock(&syme->source_lock);
	if (more)
		printf("%d lines not displayed, maybe increase display entries [e]\n", more);
}
319

320
/*
321
 * Symbols will be added here in event__process_sample and will get out
322 323 324
 * after decayed.
 */
static LIST_HEAD(active_symbols);
325
static pthread_mutex_t active_symbols_lock = PTHREAD_MUTEX_INITIALIZER;
326 327 328 329 330 331

/*
 * Ordering weight: count-1 * count-2 * ... / count-n
 */
static double sym_weight(const struct sym_entry *sym)
{
332
	double weight = sym->snap_count;
333 334
	int counter;

335 336 337
	if (!display_weighted)
		return weight;

338 339 340 341 342 343 344 345
	for (counter = 1; counter < nr_counters-1; counter++)
		weight *= sym->count[counter];

	weight /= (sym->count[counter] + 1);

	return weight;
}

346 347
static long			samples;
static long			userspace_samples;
348 349
static const char		CONSOLE_CLEAR[] = "";

350
static void __list_insert_active_sym(struct sym_entry *syme)
351 352 353 354
{
	list_add(&syme->node, &active_symbols);
}

355 356 357 358 359 360 361
static void list_remove_active_sym(struct sym_entry *syme)
{
	pthread_mutex_lock(&active_symbols_lock);
	list_del_init(&syme->node);
	pthread_mutex_unlock(&active_symbols_lock);
}

362 363 364 365 366 367 368 369 370 371
static void rb_insert_active_sym(struct rb_root *tree, struct sym_entry *se)
{
	struct rb_node **p = &tree->rb_node;
	struct rb_node *parent = NULL;
	struct sym_entry *iter;

	while (*p != NULL) {
		parent = *p;
		iter = rb_entry(parent, struct sym_entry, rb_node);

372
		if (se->weight > iter->weight)
373 374 375 376 377 378 379 380
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
	}

	rb_link_node(&se->rb_node, parent, p);
	rb_insert_color(&se->rb_node, tree);
}
381 382 383

static void print_sym_table(void)
{
384
	int printed = 0, j;
385
	int counter, snap = !display_weighted ? sym_counter : 0;
386 387 388
	float samples_per_sec = samples/delay_secs;
	float ksamples_per_sec = (samples-userspace_samples)/delay_secs;
	float sum_ksamples = 0.0;
389 390 391
	struct sym_entry *syme, *n;
	struct rb_root tmp = RB_ROOT;
	struct rb_node *nd;
392

393
	samples = userspace_samples = 0;
394

395
	/* Sort the active symbols */
396 397 398 399 400
	pthread_mutex_lock(&active_symbols_lock);
	syme = list_entry(active_symbols.next, struct sym_entry, node);
	pthread_mutex_unlock(&active_symbols_lock);

	list_for_each_entry_safe_from(syme, n, &active_symbols, node) {
401
		syme->snap_count = syme->count[snap];
402 403
		if (syme->snap_count != 0) {
			syme->weight = sym_weight(syme);
404
			rb_insert_active_sym(&tmp, syme);
405
			sum_ksamples += syme->snap_count;
406 407

			for (j = 0; j < nr_counters; j++)
408 409
				syme->count[j] = zero ? 0 : syme->count[j] * 7 / 8;
		} else
410
			list_remove_active_sym(syme);
411 412
	}

413
	puts(CONSOLE_CLEAR);
414 415 416

	printf(
"------------------------------------------------------------------------------\n");
417
	printf( "   PerfTop:%8.0f irqs/sec  kernel:%4.1f%% [",
418 419
		samples_per_sec,
		100.0 - (100.0*((samples_per_sec-ksamples_per_sec)/samples_per_sec)));
420

421
	if (nr_counters == 1 || !display_weighted) {
422
		printf("%Ld", (u64)attrs[0].sample_period);
I
Ingo Molnar 已提交
423 424 425 426 427
		if (freq)
			printf("Hz ");
		else
			printf(" ");
	}
428

429 430 431
	if (!display_weighted)
		printf("%s", event_name(sym_counter));
	else for (counter = 0; counter < nr_counters; counter++) {
432 433 434 435 436 437 438 439
		if (counter)
			printf("/");

		printf("%s", event_name(counter));
	}

	printf( "], ");

440 441
	if (target_pid != -1)
		printf(" (target_pid: %d", target_pid);
442 443 444 445 446 447
	else
		printf(" (all");

	if (profile_cpu != -1)
		printf(", cpu: %d)\n", profile_cpu);
	else {
448
		if (target_pid != -1)
449 450 451 452 453 454 455
			printf(")\n");
		else
			printf(", %d CPUs)\n", nr_cpus);
	}

	printf("------------------------------------------------------------------------------\n\n");

456 457 458 459 460
	if (sym_filter_entry) {
		show_details(sym_filter_entry);
		return;
	}

461
	if (nr_counters == 1)
462
		printf("             samples  pcnt");
463
	else
464
		printf("   weight    samples  pcnt");
465

466 467
	if (verbose)
		printf("         RIP       ");
468 469
	printf(" function                                 DSO\n");
	printf("   %s    _______ _____",
470 471
	       nr_counters == 1 ? "      " : "______");
	if (verbose)
472 473
		printf(" ________________");
	printf(" ________________________________ ________________\n\n");
474

475
	for (nd = rb_first(&tmp); nd; nd = rb_next(nd)) {
476
		struct symbol *sym;
477
		double pcnt;
478

479 480 481
		syme = rb_entry(nd, struct sym_entry, rb_node);
		sym = (struct symbol *)(syme + 1);

482
		if (++printed > print_entries || (int)syme->snap_count < count_filter)
483
			continue;
484

485 486
		pcnt = 100.0 - (100.0 * ((sum_ksamples - syme->snap_count) /
					 sum_ksamples));
487

488
		if (nr_counters == 1 || !display_weighted)
489
			printf("%20.2f ", syme->weight);
490
		else
491
			printf("%9.1f %10ld ", syme->weight, syme->snap_count);
492

493
		percent_color_fprintf(stdout, "%4.1f%%", pcnt);
494
		if (verbose)
495 496 497
			printf(" %016llx", sym->start);
		printf(" %-32s", sym->name);
		printf(" %s", syme->map->dso->short_name);
498
		printf("\n");
499 500 501
	}
}

502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582
static void prompt_integer(int *target, const char *msg)
{
	char *buf = malloc(0), *p;
	size_t dummy = 0;
	int tmp;

	fprintf(stdout, "\n%s: ", msg);
	if (getline(&buf, &dummy, stdin) < 0)
		return;

	p = strchr(buf, '\n');
	if (p)
		*p = 0;

	p = buf;
	while(*p) {
		if (!isdigit(*p))
			goto out_free;
		p++;
	}
	tmp = strtoul(buf, NULL, 10);
	*target = tmp;
out_free:
	free(buf);
}

static void prompt_percent(int *target, const char *msg)
{
	int tmp = 0;

	prompt_integer(&tmp, msg);
	if (tmp >= 0 && tmp <= 100)
		*target = tmp;
}

static void prompt_symbol(struct sym_entry **target, const char *msg)
{
	char *buf = malloc(0), *p;
	struct sym_entry *syme = *target, *n, *found = NULL;
	size_t dummy = 0;

	/* zero counters of active symbol */
	if (syme) {
		pthread_mutex_lock(&syme->source_lock);
		__zero_source_counters(syme);
		*target = NULL;
		pthread_mutex_unlock(&syme->source_lock);
	}

	fprintf(stdout, "\n%s: ", msg);
	if (getline(&buf, &dummy, stdin) < 0)
		goto out_free;

	p = strchr(buf, '\n');
	if (p)
		*p = 0;

	pthread_mutex_lock(&active_symbols_lock);
	syme = list_entry(active_symbols.next, struct sym_entry, node);
	pthread_mutex_unlock(&active_symbols_lock);

	list_for_each_entry_safe_from(syme, n, &active_symbols, node) {
		struct symbol *sym = (struct symbol *)(syme + 1);

		if (!strcmp(buf, sym->name)) {
			found = syme;
			break;
		}
	}

	if (!found) {
		fprintf(stderr, "Sorry, %s is not active.\n", sym_filter);
		sleep(1);
		return;
	} else
		parse_source(found);

out_free:
	free(buf);
}

583
static void print_mapped_keys(void)
584
{
585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600
	char *name = NULL;

	if (sym_filter_entry) {
		struct symbol *sym = (struct symbol *)(sym_filter_entry+1);
		name = sym->name;
	}

	fprintf(stdout, "\nMapped keys:\n");
	fprintf(stdout, "\t[d]     display refresh delay.             \t(%d)\n", delay_secs);
	fprintf(stdout, "\t[e]     display entries (lines).           \t(%d)\n", print_entries);

	if (nr_counters > 1)
		fprintf(stdout, "\t[E]     active event counter.              \t(%s)\n", event_name(sym_counter));

	fprintf(stdout, "\t[f]     profile display filter (count).    \t(%d)\n", count_filter);

601
	if (vmlinux_name) {
602 603 604 605 606 607 608 609
		fprintf(stdout, "\t[F]     annotate display filter (percent). \t(%d%%)\n", sym_pcnt_filter);
		fprintf(stdout, "\t[s]     annotate symbol.                   \t(%s)\n", name?: "NULL");
		fprintf(stdout, "\t[S]     stop annotation.\n");
	}

	if (nr_counters > 1)
		fprintf(stdout, "\t[w]     toggle display weighted/count[E]r. \t(%d)\n", display_weighted ? 1 : 0);

610
	fprintf(stdout, "\t[z]     toggle sample zeroing.             \t(%d)\n", zero ? 1 : 0);
611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629
	fprintf(stdout, "\t[qQ]    quit.\n");
}

static int key_mapped(int c)
{
	switch (c) {
		case 'd':
		case 'e':
		case 'f':
		case 'z':
		case 'q':
		case 'Q':
			return 1;
		case 'E':
		case 'w':
			return nr_counters > 1 ? 1 : 0;
		case 'F':
		case 's':
		case 'S':
630 631 632
			return vmlinux_name ? 1 : 0;
		default:
			break;
633 634 635
	}

	return 0;
636 637 638 639
}

static void handle_keypress(int c)
{
640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662
	if (!key_mapped(c)) {
		struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
		struct termios tc, save;

		print_mapped_keys();
		fprintf(stdout, "\nEnter selection, or unmapped key to continue: ");
		fflush(stdout);

		tcgetattr(0, &save);
		tc = save;
		tc.c_lflag &= ~(ICANON | ECHO);
		tc.c_cc[VMIN] = 0;
		tc.c_cc[VTIME] = 0;
		tcsetattr(0, TCSANOW, &tc);

		poll(&stdin_poll, 1, -1);
		c = getc(stdin);

		tcsetattr(0, TCSAFLUSH, &save);
		if (!key_mapped(c))
			return;
	}

663 664 665
	switch (c) {
		case 'd':
			prompt_integer(&delay_secs, "Enter display delay");
666 667
			if (delay_secs < 1)
				delay_secs = 1;
668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713
			break;
		case 'e':
			prompt_integer(&print_entries, "Enter display entries (lines)");
			break;
		case 'E':
			if (nr_counters > 1) {
				int i;

				fprintf(stderr, "\nAvailable events:");
				for (i = 0; i < nr_counters; i++)
					fprintf(stderr, "\n\t%d %s", i, event_name(i));

				prompt_integer(&sym_counter, "Enter details event counter");

				if (sym_counter >= nr_counters) {
					fprintf(stderr, "Sorry, no such event, using %s.\n", event_name(0));
					sym_counter = 0;
					sleep(1);
				}
			} else sym_counter = 0;
			break;
		case 'f':
			prompt_integer(&count_filter, "Enter display event count filter");
			break;
		case 'F':
			prompt_percent(&sym_pcnt_filter, "Enter details display event filter (percent)");
			break;
		case 'q':
		case 'Q':
			printf("exiting.\n");
			exit(0);
		case 's':
			prompt_symbol(&sym_filter_entry, "Enter details symbol");
			break;
		case 'S':
			if (!sym_filter_entry)
				break;
			else {
				struct sym_entry *syme = sym_filter_entry;

				pthread_mutex_lock(&syme->source_lock);
				sym_filter_entry = NULL;
				__zero_source_counters(syme);
				pthread_mutex_unlock(&syme->source_lock);
			}
			break;
714 715 716
		case 'w':
			display_weighted = ~display_weighted;
			break;
717 718 719
		case 'z':
			zero = ~zero;
			break;
720 721
		default:
			break;
722 723 724
	}
}

725
static void *display_thread(void *arg __used)
726
{
727
	struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
728 729 730 731 732 733 734 735
	struct termios tc, save;
	int delay_msecs, c;

	tcgetattr(0, &save);
	tc = save;
	tc.c_lflag &= ~(ICANON | ECHO);
	tc.c_cc[VMIN] = 0;
	tc.c_cc[VTIME] = 0;
736

737 738 739 740 741
repeat:
	delay_msecs = delay_secs * 1000;
	tcsetattr(0, TCSANOW, &tc);
	/* trash return*/
	getc(stdin);
742

743
	do {
744
		print_sym_table();
745 746
	} while (!poll(&stdin_poll, 1, delay_msecs) == 1);

747 748 749 750 751
	c = getc(stdin);
	tcsetattr(0, TCSAFLUSH, &save);

	handle_keypress(c);
	goto repeat;
752 753 754 755

	return NULL;
}

756
/* Tag samples to be skipped. */
757
static const char *skip_symbols[] = {
758 759 760 761 762
	"default_idle",
	"cpu_idle",
	"enter_idle",
	"exit_idle",
	"mwait_idle",
763
	"mwait_idle_with_hints",
764
	"poll_idle",
765 766
	"ppc64_runlatch_off",
	"pseries_dedicated_idle_sleep",
767 768 769
	NULL
};

770
static int symbol_filter(struct map *map, struct symbol *sym)
771
{
772 773
	struct sym_entry *syme;
	const char *name = sym->name;
774
	int i;
775

776 777 778 779 780 781 782
	/*
	 * ppc64 uses function descriptors and appends a '.' to the
	 * start of every instruction address. Remove it.
	 */
	if (name[0] == '.')
		name++;

783 784 785 786 787 788 789
	if (!strcmp(name, "_text") ||
	    !strcmp(name, "_etext") ||
	    !strcmp(name, "_sinittext") ||
	    !strncmp("init_module", name, 11) ||
	    !strncmp("cleanup_module", name, 14) ||
	    strstr(name, "_text_start") ||
	    strstr(name, "_text_end"))
790 791
		return 1;

792 793
	syme = dso__sym_priv(map->dso, sym);
	syme->map = map;
794 795 796 797
	pthread_mutex_init(&syme->source_lock, NULL);
	if (!sym_filter_entry && sym_filter && !strcmp(name, sym_filter))
		sym_filter_entry = syme;

798 799 800 801 802 803
	for (i = 0; skip_symbols[i]; i++) {
		if (!strcmp(skip_symbols[i], name)) {
			syme->skip = 1;
			break;
		}
	}
804 805 806 807

	return 0;
}

808
static int parse_symbols(void)
809
{
810
	if (dsos__load_kernel(vmlinux_name, sizeof(struct sym_entry),
811
			      symbol_filter, 1) <= 0)
812
		return -1;
813

814
	if (dump_symtab)
815
		dsos__fprintf(stderr);
816

817
	return 0;
818 819
}

820
static void event__process_sample(const event_t *self, int counter)
821
{
822
	u64 ip = self->ip.ip;
823
	struct map *map;
824 825 826 827 828 829 830 831
	struct sym_entry *syme;
	struct symbol *sym;

	switch (self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK) {
	case PERF_RECORD_MISC_USER: {
		struct thread *thread = threads__findnew(self->ip.pid);

		if (thread == NULL)
832
			return;
833 834 835 836

		map = thread__find_map(thread, ip);
		if (map != NULL) {
			ip = map->map_ip(map, ip);
837
			sym = map__find_symbol(map, ip, symbol_filter);
838 839 840 841
			if (sym == NULL)
				return;
			userspace_samples++;
			break;
842 843
		}
	}
844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862
		/*
		 * If this is outside of all known maps,
		 * and is a negative address, try to look it
		 * up in the kernel dso, as it might be a
		 * vsyscall or vdso (which executes in user-mode).
		 */
		if ((long long)ip >= 0)
			return;
		/* Fall thru */
	case PERF_RECORD_MISC_KERNEL:
		sym = kernel_maps__find_symbol(ip, &map);
		if (sym == NULL)
			return;
		break;
	default:
		return;
	}

	syme = dso__sym_priv(map->dso, sym);
863

864 865 866 867 868 869 870 871 872 873
	if (!syme->skip) {
		syme->count[counter]++;
		record_precise_ip(syme, counter, ip);
		pthread_mutex_lock(&active_symbols_lock);
		if (list_empty(&syme->node) || !syme->node.next)
			__list_insert_active_sym(syme);
		pthread_mutex_unlock(&active_symbols_lock);
		++samples;
		return;
	}
874 875
}

876
static void event__process_mmap(event_t *self)
877
{
878 879 880 881
	struct thread *thread = threads__findnew(self->mmap.pid);

	if (thread != NULL) {
		struct map *map = map__new(&self->mmap, NULL, 0,
882
					   sizeof(struct sym_entry));
883 884 885 886
		if (map != NULL)
			thread__insert_map(thread, map);
	}
}
887

888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906
static void event__process_comm(event_t *self)
{
	struct thread *thread = threads__findnew(self->comm.pid);

	if (thread != NULL)
		thread__set_comm(thread, self->comm.comm);
}

static int event__process(event_t *event)
{
	switch (event->header.type) {
	case PERF_RECORD_COMM:
		event__process_comm(event);
		break;
	case PERF_RECORD_MMAP:
		event__process_mmap(event);
		break;
	default:
		break;
907 908
	}

909
	return 0;
910 911 912
}

struct mmap_data {
913 914
	int			counter;
	void			*base;
915
	int			mask;
916
	unsigned int		prev;
917 918 919 920
};

static unsigned int mmap_read_head(struct mmap_data *md)
{
921
	struct perf_event_mmap_page *pc = md->base;
922 923 924 925 926 927 928 929
	int head;

	head = pc->data_head;
	rmb();

	return head;
}

930
static void mmap_read_counter(struct mmap_data *md)
931 932 933 934 935 936 937 938
{
	unsigned int head = mmap_read_head(md);
	unsigned int old = md->prev;
	unsigned char *data = md->base + page_size;
	int diff;

	/*
	 * If we're further behind than half the buffer, there's a chance
939
	 * the writer will bite our tail and mess up the samples under us.
940 941 942 943 944 945 946
	 *
	 * If we somehow ended up ahead of the head, we got messed up.
	 *
	 * In either case, truncate and restart at head.
	 */
	diff = head - old;
	if (diff > md->mask / 2 || diff < 0) {
947
		fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
948 949 950 951 952 953 954 955 956 957 958 959

		/*
		 * head points to a known good entry, start there.
		 */
		old = head;
	}

	for (; old != head;) {
		event_t *event = (event_t *)&data[old & md->mask];

		event_t event_copy;

960
		size_t size = event->header.size;
961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981

		/*
		 * Event straddles the mmap boundary -- header should always
		 * be inside due to u64 alignment of output.
		 */
		if ((old & md->mask) + size != ((old + size) & md->mask)) {
			unsigned int offset = old;
			unsigned int len = min(sizeof(*event), size), cpy;
			void *dst = &event_copy;

			do {
				cpy = min(md->mask + 1 - (offset & md->mask), len);
				memcpy(dst, &data[offset & md->mask], cpy);
				offset += cpy;
				dst += cpy;
				len -= cpy;
			} while (len);

			event = &event_copy;
		}

982 983 984 985
		if (event->header.type == PERF_RECORD_SAMPLE)
			event__process_sample(event, md->counter);
		else
			event__process(event);
986 987 988 989 990 991
		old += size;
	}

	md->prev = old;
}

M
Mike Galbraith 已提交
992 993 994
static struct pollfd event_array[MAX_NR_CPUS * MAX_COUNTERS];
static struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS];

995 996 997 998 999 1000 1001 1002 1003 1004
static void mmap_read(void)
{
	int i, counter;

	for (i = 0; i < nr_cpus; i++) {
		for (counter = 0; counter < nr_counters; counter++)
			mmap_read_counter(&mmap_array[i][counter]);
	}
}

1005 1006 1007 1008
int nr_poll;
int group_fd;

static void start_counter(int i, int counter)
1009
{
1010
	struct perf_event_attr *attr;
1011
	int cpu;
1012 1013 1014 1015 1016 1017 1018 1019

	cpu = profile_cpu;
	if (target_pid == -1 && profile_cpu == -1)
		cpu = i;

	attr = attrs + counter;

	attr->sample_type	= PERF_SAMPLE_IP | PERF_SAMPLE_TID;
1020 1021 1022 1023 1024 1025 1026

	if (freq) {
		attr->sample_type	|= PERF_SAMPLE_PERIOD;
		attr->freq		= 1;
		attr->sample_freq	= freq;
	}

1027
	attr->inherit		= (cpu < 0) && inherit;
1028
	attr->mmap		= 1;
1029 1030

try_again:
1031
	fd[i][counter] = sys_perf_event_open(attr, target_pid, cpu, group_fd, 0);
1032 1033 1034 1035 1036

	if (fd[i][counter] < 0) {
		int err = errno;

		if (err == EPERM)
1037
			die("No permission - are you root?\n");
1038 1039 1040 1041 1042 1043
		/*
		 * If it's cycles then fall back to hrtimer
		 * based cpu-clock-tick sw counter, which
		 * is always available even if no PMU support:
		 */
		if (attr->type == PERF_TYPE_HARDWARE
1044
			&& attr->config == PERF_COUNT_HW_CPU_CYCLES) {
1045

1046 1047 1048
			if (verbose)
				warning(" ... trying to fall back to cpu-clock-ticks\n");

1049
			attr->type = PERF_TYPE_SOFTWARE;
1050
			attr->config = PERF_COUNT_SW_CPU_CLOCK;
1051 1052
			goto try_again;
		}
1053 1054 1055
		printf("\n");
		error("perfcounter syscall returned with %d (%s)\n",
			fd[i][counter], strerror(err));
1056
		die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084
		exit(-1);
	}
	assert(fd[i][counter] >= 0);
	fcntl(fd[i][counter], F_SETFL, O_NONBLOCK);

	/*
	 * First counter acts as the group leader:
	 */
	if (group && group_fd == -1)
		group_fd = fd[i][counter];

	event_array[nr_poll].fd = fd[i][counter];
	event_array[nr_poll].events = POLLIN;
	nr_poll++;

	mmap_array[i][counter].counter = counter;
	mmap_array[i][counter].prev = 0;
	mmap_array[i][counter].mask = mmap_pages*page_size - 1;
	mmap_array[i][counter].base = mmap(NULL, (mmap_pages+1)*page_size,
			PROT_READ, MAP_SHARED, fd[i][counter], 0);
	if (mmap_array[i][counter].base == MAP_FAILED)
		die("failed to mmap with %d (%s)\n", errno, strerror(errno));
}

static int __cmd_top(void)
{
	pthread_t thread;
	int i, counter;
1085 1086
	int ret;

1087 1088 1089 1090 1091
	if (target_pid != -1)
		event__synthesize_thread(target_pid, event__process);
	else
		event__synthesize_threads(event__process);

1092 1093
	for (i = 0; i < nr_cpus; i++) {
		group_fd = -1;
1094 1095
		for (counter = 0; counter < nr_counters; counter++)
			start_counter(i, counter);
1096 1097
	}

1098 1099 1100 1101 1102
	/* Wait for a minimal set of events before starting the snapshot */
	poll(event_array, nr_poll, 100);

	mmap_read();

1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118
	if (pthread_create(&thread, NULL, display_thread, NULL)) {
		printf("Could not create display thread.\n");
		exit(-1);
	}

	if (realtime_prio) {
		struct sched_param param;

		param.sched_priority = realtime_prio;
		if (sched_setscheduler(0, SCHED_FIFO, &param)) {
			printf("Could not set realtime priority.\n");
			exit(-1);
		}
	}

	while (1) {
1119
		int hits = samples;
1120

1121
		mmap_read();
1122

1123
		if (hits == samples)
1124 1125 1126 1127 1128
			ret = poll(event_array, nr_poll, 100);
	}

	return 0;
}
1129 1130 1131 1132 1133 1134 1135 1136

static const char * const top_usage[] = {
	"perf top [<options>]",
	NULL
};

static const struct option options[] = {
	OPT_CALLBACK('e', "event", NULL, "event",
1137 1138
		     "event selector. use 'perf list' to list available events",
		     parse_events),
1139 1140 1141 1142 1143 1144 1145 1146
	OPT_INTEGER('c', "count", &default_interval,
		    "event period to sample"),
	OPT_INTEGER('p', "pid", &target_pid,
		    "profile events on existing pid"),
	OPT_BOOLEAN('a', "all-cpus", &system_wide,
			    "system-wide collection from all CPUs"),
	OPT_INTEGER('C', "CPU", &profile_cpu,
		    "CPU to profile on"),
1147
	OPT_STRING('k', "vmlinux", &vmlinux_name, "file", "vmlinux pathname"),
1148 1149 1150 1151
	OPT_INTEGER('m', "mmap-pages", &mmap_pages,
		    "number of mmap data pages"),
	OPT_INTEGER('r', "realtime", &realtime_prio,
		    "collect data with this RT SCHED_FIFO priority"),
M
Mike Galbraith 已提交
1152
	OPT_INTEGER('d', "delay", &delay_secs,
1153 1154 1155
		    "number of seconds to delay between refreshes"),
	OPT_BOOLEAN('D', "dump-symtab", &dump_symtab,
			    "dump the symbol table used for profiling"),
1156
	OPT_INTEGER('f', "count-filter", &count_filter,
1157 1158 1159
		    "only display functions with more events than this"),
	OPT_BOOLEAN('g', "group", &group,
			    "put the counters into a counter group"),
1160 1161
	OPT_BOOLEAN('i', "inherit", &inherit,
		    "child tasks inherit counters"),
1162 1163
	OPT_STRING('s', "sym-annotate", &sym_filter, "symbol name",
		    "symbol to annotate - requires -k option"),
A
Anton Blanchard 已提交
1164
	OPT_BOOLEAN('z', "zero", &zero,
1165
		    "zero history across updates"),
1166
	OPT_INTEGER('F', "freq", &freq,
1167
		    "profile at this frequency"),
1168 1169
	OPT_INTEGER('E', "entries", &print_entries,
		    "display this many functions"),
1170 1171
	OPT_BOOLEAN('v', "verbose", &verbose,
		    "be more verbose (show counter open errors, etc)"),
1172 1173 1174
	OPT_END()
};

1175
int cmd_top(int argc, const char **argv, const char *prefix __used)
1176 1177 1178
{
	int counter;

1179 1180
	symbol__init();

1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193
	page_size = sysconf(_SC_PAGE_SIZE);

	argc = parse_options(argc, argv, options, top_usage, 0);
	if (argc)
		usage_with_options(top_usage, options);

	/* CPU and PID are mutually exclusive */
	if (target_pid != -1 && profile_cpu != -1) {
		printf("WARNING: PID switch overriding CPU\n");
		sleep(1);
		profile_cpu = -1;
	}

1194
	if (!nr_counters)
1195 1196
		nr_counters = 1;

1197 1198 1199
	if (delay_secs < 1)
		delay_secs = 1;

1200
	parse_symbols();
1201
	parse_source(sym_filter_entry);
1202

1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215

	/*
	 * User specified count overrides default frequency.
	 */
	if (default_interval)
		freq = 0;
	else if (freq) {
		default_interval = freq;
	} else {
		fprintf(stderr, "frequency and count are zero, aborting\n");
		exit(EXIT_FAILURE);
	}

1216 1217 1218
	/*
	 * Fill in the ones not specifically initialized via -c:
	 */
1219
	for (counter = 0; counter < nr_counters; counter++) {
1220
		if (attrs[counter].sample_period)
1221 1222
			continue;

1223
		attrs[counter].sample_period = default_interval;
1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234
	}

	nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
	assert(nr_cpus <= MAX_NR_CPUS);
	assert(nr_cpus >= 0);

	if (target_pid != -1 || profile_cpu != -1)
		nr_cpus = 1;

	return __cmd_top();
}