builtin-stat.c 17.8 KB
Newer Older
1
/*
2 3 4 5 6 7
 * builtin-stat.c
 *
 * Builtin stat command: Give a precise performance counters summary
 * overview about any workload, CPU or specific PID.
 *
 * Sample output:
8

9 10
   $ perf stat ~/hackbench 10
   Time: 0.104
11

12
    Performance counter stats for '/home/mingo/hackbench':
13

14 15 16 17 18 19 20 21
       1255.538611  task clock ticks     #      10.143 CPU utilization factor
             54011  context switches     #       0.043 M/sec
               385  CPU migrations       #       0.000 M/sec
             17755  pagefaults           #       0.014 M/sec
        3808323185  CPU cycles           #    3033.219 M/sec
        1575111190  instructions         #    1254.530 M/sec
          17367895  cache references     #      13.833 M/sec
           7674421  cache misses         #       6.112 M/sec
22

23
    Wall-clock time elapsed:   123.786620 msecs
24

25 26 27 28 29 30 31 32 33 34
 *
 * Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
 *
 * Improvements and fixes by:
 *
 *   Arjan van de Ven <arjan@linux.intel.com>
 *   Yanmin Zhang <yanmin.zhang@intel.com>
 *   Wu Fengguang <fengguang.wu@intel.com>
 *   Mike Galbraith <efault@gmx.de>
 *   Paul Mackerras <paulus@samba.org>
35
 *   Jaswinder Singh Rajput <jaswinder@kernel.org>
36 37
 *
 * Released under the GPL v2. (and only v2, not any later version)
38 39
 */

40
#include "perf.h"
41
#include "builtin.h"
42
#include "util/util.h"
43 44
#include "util/parse-options.h"
#include "util/parse-events.h"
45 46
#include "util/event.h"
#include "util/debug.h"
47
#include "util/header.h"
48
#include "util/cpumap.h"
49
#include "util/thread.h"
50 51

#include <sys/prctl.h>
52
#include <math.h>
53
#include <locale.h>
54

55
static struct perf_event_attr default_attrs[] = {
56

57 58 59 60
  { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK		},
  { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES	},
  { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS		},
  { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS		},
61

62 63 64 65
  { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES		},
  { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS		},
  { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS	},
  { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES		},
I
Ingo Molnar 已提交
66 67
  { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_REFERENCES	},
  { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_MISSES		},
68

69
};
70

71
static bool			system_wide			=  false;
72
static int			nr_cpus				=  0;
73
static int			run_idx				=  0;
74

75
static int			run_count			=  1;
76
static bool			no_inherit			= false;
77
static bool			scale				=  true;
78
static bool			no_aggr				= false;
79
static pid_t			target_pid			= -1;
80 81 82
static pid_t			target_tid			= -1;
static pid_t			*all_tids			=  NULL;
static int			thread_num			=  0;
83
static pid_t			child_pid			= -1;
84
static bool			null_run			=  false;
85
static bool			big_num				=  false;
86
static const char		*cpu_list;
87

88

89
static int			*fd[MAX_NR_CPUS][MAX_COUNTERS];
90

91
static int			event_scaled[MAX_COUNTERS];
92

93 94 95 96 97 98
static struct {
	u64 val;
	u64 ena;
	u64 run;
} cpu_counts[MAX_NR_CPUS][MAX_COUNTERS];

99 100
static volatile int done = 0;

101 102
struct stats
{
103
	double n, mean, M2;
104
};
105

106 107
static void update_stats(struct stats *stats, u64 val)
{
108
	double delta;
109

110 111 112 113
	stats->n++;
	delta = val - stats->mean;
	stats->mean += delta / stats->n;
	stats->M2 += delta*(val - stats->mean);
114 115
}

116 117
static double avg_stats(struct stats *stats)
{
118
	return stats->mean;
119
}
120

121
/*
122 123
 * http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
 *
124 125 126
 *       (\Sum n_i^2) - ((\Sum n_i)^2)/n
 * s^2 = -------------------------------
 *                  n - 1
127 128 129 130 131 132 133 134 135
 *
 * http://en.wikipedia.org/wiki/Stddev
 *
 * The std dev of the mean is related to the std dev by:
 *
 *             s
 * s_mean = -------
 *          sqrt(n)
 *
136 137 138
 */
static double stddev_stats(struct stats *stats)
{
139 140
	double variance = stats->M2 / (stats->n - 1);
	double variance_mean = variance / stats->n;
141

142
	return sqrt(variance_mean);
143
}
144

145
struct stats			event_res_stats[MAX_COUNTERS][3];
146 147 148
struct stats			runtime_nsecs_stats[MAX_NR_CPUS];
struct stats			runtime_cycles_stats[MAX_NR_CPUS];
struct stats			runtime_branches_stats[MAX_NR_CPUS];
149
struct stats			walltime_nsecs_stats;
150

151 152 153 154
#define MATCH_EVENT(t, c, counter)			\
	(attrs[counter].type == PERF_TYPE_##t &&	\
	 attrs[counter].config == PERF_COUNT_##c)

155
#define ERR_PERF_OPEN \
156
"Error: counter %d, sys_perf_event_open() syscall returned with %d (%s)\n"
157

158
static int create_perf_stat_counter(int counter)
159
{
160
	struct perf_event_attr *attr = attrs + counter;
161
	int thread;
162
	int ncreated = 0;
163

164
	if (scale)
165 166
		attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
				    PERF_FORMAT_TOTAL_TIME_RUNNING;
167 168

	if (system_wide) {
169
		int cpu;
170

171
		for (cpu = 0; cpu < nr_cpus; cpu++) {
172 173
			fd[cpu][counter][0] = sys_perf_event_open(attr,
					-1, cpumap[cpu], -1, 0);
174 175 176 177 178
			if (fd[cpu][counter][0] < 0)
				pr_debug(ERR_PERF_OPEN, counter,
					 fd[cpu][counter][0], strerror(errno));
			else
				++ncreated;
179 180
		}
	} else {
181 182
		attr->inherit = !no_inherit;
		if (target_pid == -1 && target_tid == -1) {
183 184 185
			attr->disabled = 1;
			attr->enable_on_exec = 1;
		}
186 187 188
		for (thread = 0; thread < thread_num; thread++) {
			fd[0][counter][thread] = sys_perf_event_open(attr,
				all_tids[thread], -1, -1, 0);
189 190 191 192 193 194
			if (fd[0][counter][thread] < 0)
				pr_debug(ERR_PERF_OPEN, counter,
					 fd[0][counter][thread],
					 strerror(errno));
			else
				++ncreated;
195
		}
196
	}
197 198

	return ncreated;
199 200
}

201 202 203 204 205
/*
 * Does the counter have nsecs as a unit?
 */
static inline int nsec_counter(int counter)
{
206 207
	if (MATCH_EVENT(SOFTWARE, SW_CPU_CLOCK, counter) ||
	    MATCH_EVENT(SOFTWARE, SW_TASK_CLOCK, counter))
208 209 210 211 212 213
		return 1;

	return 0;
}

/*
214
 * Read out the results of a single counter:
215
 * aggregate counts across CPUs in system-wide mode
216
 */
217
static void read_counter_aggr(int counter)
218
{
219
	u64 count[3], single_count[3];
220
	int cpu;
221
	size_t res, nv;
222
	int scaled;
223
	int i, thread;
224 225

	count[0] = count[1] = count[2] = 0;
226

227
	nv = scale ? 3 : 1;
228
	for (cpu = 0; cpu < nr_cpus; cpu++) {
229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244
		for (thread = 0; thread < thread_num; thread++) {
			if (fd[cpu][counter][thread] < 0)
				continue;

			res = read(fd[cpu][counter][thread],
					single_count, nv * sizeof(u64));
			assert(res == nv * sizeof(u64));

			close(fd[cpu][counter][thread]);
			fd[cpu][counter][thread] = -1;

			count[0] += single_count[0];
			if (scale) {
				count[1] += single_count[1];
				count[2] += single_count[2];
			}
245 246 247 248 249 250
		}
	}

	scaled = 0;
	if (scale) {
		if (count[2] == 0) {
251
			event_scaled[counter] = -1;
252
			count[0] = 0;
253 254
			return;
		}
255

256
		if (count[2] < count[1]) {
257
			event_scaled[counter] = 1;
258 259 260 261
			count[0] = (unsigned long long)
				((double)count[0] * count[1] / count[2] + 0.5);
		}
	}
262 263 264 265 266 267 268 269 270

	for (i = 0; i < 3; i++)
		update_stats(&event_res_stats[counter][i], count[i]);

	if (verbose) {
		fprintf(stderr, "%s: %Ld %Ld %Ld\n", event_name(counter),
				count[0], count[1], count[2]);
	}

271 272 273
	/*
	 * Save the full runtime - to allow normalization during printout:
	 */
274
	if (MATCH_EVENT(SOFTWARE, SW_TASK_CLOCK, counter))
275
		update_stats(&runtime_nsecs_stats[0], count[0]);
276
	if (MATCH_EVENT(HARDWARE, HW_CPU_CYCLES, counter))
277
		update_stats(&runtime_cycles_stats[0], count[0]);
278
	if (MATCH_EVENT(HARDWARE, HW_BRANCH_INSTRUCTIONS, counter))
279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326
		update_stats(&runtime_branches_stats[0], count[0]);
}

/*
 * Read out the results of a single counter:
 * do not aggregate counts across CPUs in system-wide mode
 */
static void read_counter(int counter)
{
	u64 count[3];
	int cpu;
	size_t res, nv;

	count[0] = count[1] = count[2] = 0;

	nv = scale ? 3 : 1;

	for (cpu = 0; cpu < nr_cpus; cpu++) {

		if (fd[cpu][counter][0] < 0)
			continue;

		res = read(fd[cpu][counter][0], count, nv * sizeof(u64));

		assert(res == nv * sizeof(u64));

		close(fd[cpu][counter][0]);
		fd[cpu][counter][0] = -1;

		if (scale) {
			if (count[2] == 0) {
				count[0] = 0;
			} else if (count[2] < count[1]) {
				count[0] = (unsigned long long)
				((double)count[0] * count[1] / count[2] + 0.5);
			}
		}
		cpu_counts[cpu][counter].val = count[0]; /* scaled count */
		cpu_counts[cpu][counter].ena = count[1];
		cpu_counts[cpu][counter].run = count[2];

		if (MATCH_EVENT(SOFTWARE, SW_TASK_CLOCK, counter))
			update_stats(&runtime_nsecs_stats[cpu], count[0]);
		if (MATCH_EVENT(HARDWARE, HW_CPU_CYCLES, counter))
			update_stats(&runtime_cycles_stats[cpu], count[0]);
		if (MATCH_EVENT(HARDWARE, HW_BRANCH_INSTRUCTIONS, counter))
			update_stats(&runtime_branches_stats[cpu], count[0]);
	}
327 328
}

329
static int run_perf_stat(int argc __used, const char **argv)
330 331 332
{
	unsigned long long t0, t1;
	int status = 0;
333
	int counter, ncreated = 0;
334
	int child_ready_pipe[2], go_pipe[2];
335
	const bool forks = (argc > 0);
336
	char buf;
337 338 339 340

	if (!system_wide)
		nr_cpus = 1;

341
	if (forks && (pipe(child_ready_pipe) < 0 || pipe(go_pipe) < 0)) {
342 343 344 345
		perror("failed to create pipes");
		exit(1);
	}

346
	if (forks) {
347
		if ((child_pid = fork()) < 0)
348 349
			perror("failed to fork");

350
		if (!child_pid) {
351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377
			close(child_ready_pipe[0]);
			close(go_pipe[1]);
			fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);

			/*
			 * Do a dummy execvp to get the PLT entry resolved,
			 * so we avoid the resolver overhead on the real
			 * execvp call.
			 */
			execvp("", (char **)argv);

			/*
			 * Tell the parent we're ready to go
			 */
			close(child_ready_pipe[1]);

			/*
			 * Wait until the parent tells us to go.
			 */
			if (read(go_pipe[0], &buf, 1) == -1)
				perror("unable to read pipe");

			execvp(argv[0], (char **)argv);

			perror(argv[0]);
			exit(-1);
		}
378

379 380 381
		if (target_tid == -1 && target_pid == -1 && !system_wide)
			all_tids[0] = child_pid;

382
		/*
383
		 * Wait for the child to be ready to exec.
384 385
		 */
		close(child_ready_pipe[1]);
386 387
		close(go_pipe[0]);
		if (read(child_ready_pipe[0], &buf, 1) == -1)
388
			perror("unable to read pipe");
389
		close(child_ready_pipe[0]);
390 391
	}

392
	for (counter = 0; counter < nr_counters; counter++)
393 394 395 396 397 398 399 400 401 402
		ncreated += create_perf_stat_counter(counter);

	if (ncreated == 0) {
		pr_err("No permission to collect %sstats.\n"
		       "Consider tweaking /proc/sys/kernel/perf_event_paranoid.\n",
		       system_wide ? "system-wide " : "");
		if (child_pid != -1)
			kill(child_pid, SIGTERM);
		return -1;
	}
403 404 405 406 407 408

	/*
	 * Enable counters and exec the command:
	 */
	t0 = rdclock();

409 410 411 412
	if (forks) {
		close(go_pipe[1]);
		wait(&status);
	} else {
413
		while(!done) sleep(1);
414
	}
415 416 417

	t1 = rdclock();

418
	update_stats(&walltime_nsecs_stats, t1 - t0);
419

420 421 422 423 424 425 426
	if (no_aggr) {
		for (counter = 0; counter < nr_counters; counter++)
			read_counter(counter);
	} else {
		for (counter = 0; counter < nr_counters; counter++)
			read_counter_aggr(counter);
	}
427 428 429
	return WEXITSTATUS(status);
}

430
static void print_noise(int counter, double avg)
431
{
432 433 434 435 436
	if (run_count == 1)
		return;

	fprintf(stderr, "   ( +- %7.3f%% )",
			100 * stddev_stats(&event_res_stats[counter][0]) / avg);
437 438
}

439
static void nsec_printout(int cpu, int counter, double avg)
I
Ingo Molnar 已提交
440
{
441
	double msecs = avg / 1e6;
I
Ingo Molnar 已提交
442

443 444 445 446 447
	if (no_aggr)
		fprintf(stderr, "CPU%-4d %18.6f  %-24s",
			cpumap[cpu], msecs, event_name(counter));
	else
		fprintf(stderr, " %18.6f  %-24s", msecs, event_name(counter));
I
Ingo Molnar 已提交
448

449
	if (MATCH_EVENT(SOFTWARE, SW_TASK_CLOCK, counter)) {
450 451
		fprintf(stderr, " # %10.3f CPUs ",
				avg / avg_stats(&walltime_nsecs_stats));
I
Ingo Molnar 已提交
452 453 454
	}
}

455
static void abs_printout(int cpu, int counter, double avg)
I
Ingo Molnar 已提交
456
{
457
	double total, ratio = 0.0;
458 459 460 461 462 463
	char cpustr[16] = { '\0', };

	if (no_aggr)
		sprintf(cpustr, "CPU%-4d", cpumap[cpu]);
	else
		cpu = 0;
464

465
	if (big_num)
466 467
		fprintf(stderr, "%s %'18.0f  %-24s",
			cpustr, avg, event_name(counter));
468
	else
469 470
		fprintf(stderr, "%s %18.0f  %-24s",
			cpustr, avg, event_name(counter));
I
Ingo Molnar 已提交
471

472
	if (MATCH_EVENT(HARDWARE, HW_INSTRUCTIONS, counter)) {
473
		total = avg_stats(&runtime_cycles_stats[cpu]);
474 475 476 477 478

		if (total)
			ratio = avg / total;

		fprintf(stderr, " # %10.3f IPC  ", ratio);
479
	} else if (MATCH_EVENT(HARDWARE, HW_BRANCH_MISSES, counter) &&
480 481
			runtime_branches_stats[cpu].n != 0) {
		total = avg_stats(&runtime_branches_stats[cpu]);
482 483 484 485

		if (total)
			ratio = avg * 100 / total;

I
Ingo Molnar 已提交
486
		fprintf(stderr, " # %10.3f %%    ", ratio);
487

488 489
	} else if (runtime_nsecs_stats[cpu].n != 0) {
		total = avg_stats(&runtime_nsecs_stats[cpu]);
490 491 492 493 494

		if (total)
			ratio = 1000.0 * avg / total;

		fprintf(stderr, " # %10.3f M/sec", ratio);
I
Ingo Molnar 已提交
495 496 497
	}
}

498 499
/*
 * Print out the results of a single counter:
500
 * aggregated counts in system-wide mode
501
 */
502
static void print_counter_aggr(int counter)
503
{
504
	double avg = avg_stats(&event_res_stats[counter][0]);
505
	int scaled = event_scaled[counter];
506 507

	if (scaled == -1) {
508
		fprintf(stderr, " %18s  %-24s\n",
509 510 511
			"<not counted>", event_name(counter));
		return;
	}
512

I
Ingo Molnar 已提交
513
	if (nsec_counter(counter))
514
		nsec_printout(-1, counter, avg);
I
Ingo Molnar 已提交
515
	else
516
		abs_printout(-1, counter, avg);
517 518

	print_noise(counter, avg);
519 520 521 522 523 524

	if (scaled) {
		double avg_enabled, avg_running;

		avg_enabled = avg_stats(&event_res_stats[counter][1]);
		avg_running = avg_stats(&event_res_stats[counter][2]);
525

526
		fprintf(stderr, "  (scaled from %.2f%%)",
527 528
				100 * avg_running / avg_enabled);
	}
I
Ingo Molnar 已提交
529

530 531 532
	fprintf(stderr, "\n");
}

533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568
/*
 * Print out the results of a single counter:
 * does not use aggregated count in system-wide
 */
static void print_counter(int counter)
{
	u64 ena, run, val;
	int cpu;

	for (cpu = 0; cpu < nr_cpus; cpu++) {
		val = cpu_counts[cpu][counter].val;
		ena = cpu_counts[cpu][counter].ena;
		run = cpu_counts[cpu][counter].run;
		if (run == 0 || ena == 0) {
			fprintf(stderr, "CPU%-4d %18s  %-24s", cpumap[cpu],
					"<not counted>", event_name(counter));

			fprintf(stderr, "\n");
			continue;
		}

		if (nsec_counter(counter))
			nsec_printout(cpu, counter, val);
		else
			abs_printout(cpu, counter, val);

		print_noise(counter, 1.0);

		if (run != ena) {
			fprintf(stderr, "  (scaled from %.2f%%)",
					100.0 * run / ena);
		}
		fprintf(stderr, "\n");
	}
}

569 570 571 572
static void print_stat(int argc, const char **argv)
{
	int i, counter;

573 574 575
	fflush(stdout);

	fprintf(stderr, "\n");
576
	fprintf(stderr, " Performance counter stats for ");
577
	if(target_pid == -1 && target_tid == -1) {
578 579 580
		fprintf(stderr, "\'%s", argv[0]);
		for (i = 1; i < argc; i++)
			fprintf(stderr, " %s", argv[i]);
581 582 583 584
	} else if (target_pid != -1)
		fprintf(stderr, "process id \'%d", target_pid);
	else
		fprintf(stderr, "thread id \'%d", target_tid);
I
Ingo Molnar 已提交
585

586 587 588 589
	fprintf(stderr, "\'");
	if (run_count > 1)
		fprintf(stderr, " (%d runs)", run_count);
	fprintf(stderr, ":\n\n");
590

591 592 593 594 595 596 597
	if (no_aggr) {
		for (counter = 0; counter < nr_counters; counter++)
			print_counter(counter);
	} else {
		for (counter = 0; counter < nr_counters; counter++)
			print_counter_aggr(counter);
	}
598 599

	fprintf(stderr, "\n");
600
	fprintf(stderr, " %18.9f  seconds time elapsed",
601
			avg_stats(&walltime_nsecs_stats)/1e9);
I
Ingo Molnar 已提交
602 603
	if (run_count > 1) {
		fprintf(stderr, "   ( +- %7.3f%% )",
604 605
				100*stddev_stats(&walltime_nsecs_stats) /
				avg_stats(&walltime_nsecs_stats));
I
Ingo Molnar 已提交
606 607
	}
	fprintf(stderr, "\n\n");
608 609
}

610 611
static volatile int signr = -1;

612
static void skip_signal(int signo)
613
{
614
	if(child_pid == -1)
615 616
		done = 1;

617 618 619 620 621
	signr = signo;
}

static void sig_atexit(void)
{
622 623 624
	if (child_pid != -1)
		kill(child_pid, SIGTERM);

625 626 627 628 629
	if (signr == -1)
		return;

	signal(signr, SIG_DFL);
	kill(getpid(), signr);
630 631 632
}

static const char * const stat_usage[] = {
633
	"perf stat [<options>] [<command>]",
634 635 636 637 638
	NULL
};

static const struct option options[] = {
	OPT_CALLBACK('e', "event", NULL, "event",
639 640
		     "event selector. use 'perf list' to list available events",
		     parse_events),
641 642
	OPT_BOOLEAN('i', "no-inherit", &no_inherit,
		    "child tasks do not inherit counters"),
643
	OPT_INTEGER('p', "pid", &target_pid,
644 645 646
		    "stat events on existing process id"),
	OPT_INTEGER('t', "tid", &target_tid,
		    "stat events on existing thread id"),
647
	OPT_BOOLEAN('a', "all-cpus", &system_wide,
648
		    "system-wide collection from all CPUs"),
649
	OPT_BOOLEAN('c', "scale", &scale,
650
		    "scale/normalize counters"),
651
	OPT_INCR('v', "verbose", &verbose,
652
		    "be more verbose (show counter open errors, etc)"),
653 654
	OPT_INTEGER('r', "repeat", &run_count,
		    "repeat command and print average + stddev (max: 100)"),
655 656
	OPT_BOOLEAN('n', "null", &null_run,
		    "null run - dont start any counters"),
657 658
	OPT_BOOLEAN('B', "big-num", &big_num,
		    "print large numbers with thousands\' separators"),
659 660
	OPT_STRING('C', "cpu", &cpu_list, "cpu",
		    "list of cpus to monitor in system-wide"),
661 662
	OPT_BOOLEAN('A', "no-aggr", &no_aggr,
		    "disable CPU count aggregation"),
663 664 665
	OPT_END()
};

666
int cmd_stat(int argc, const char **argv, const char *prefix __used)
667
{
668
	int status;
669
	int i,j;
670

671 672
	setlocale(LC_ALL, "");

673 674
	argc = parse_options(argc, argv, options, stat_usage,
		PARSE_OPT_STOP_AT_NON_OPTION);
675
	if (!argc && target_pid == -1 && target_tid == -1)
676
		usage_with_options(stat_usage, options);
677
	if (run_count <= 0)
678
		usage_with_options(stat_usage, options);
679

680 681 682 683
	/* no_aggr is for system-wide only */
	if (no_aggr && !system_wide)
		usage_with_options(stat_usage, options);

684 685 686 687 688
	/* Set attrs and nr_counters if no event is selected and !null_run */
	if (!null_run && !nr_counters) {
		memcpy(attrs, default_attrs, sizeof(default_attrs));
		nr_counters = ARRAY_SIZE(default_attrs);
	}
689

690
	if (system_wide)
691
		nr_cpus = read_cpu_map(cpu_list);
692 693
	else
		nr_cpus = 1;
694

695 696 697
	if (nr_cpus < 1)
		usage_with_options(stat_usage, options);

698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722
	if (target_pid != -1) {
		target_tid = target_pid;
		thread_num = find_all_tid(target_pid, &all_tids);
		if (thread_num <= 0) {
			fprintf(stderr, "Can't find all threads of pid %d\n",
					target_pid);
			usage_with_options(stat_usage, options);
		}
	} else {
		all_tids=malloc(sizeof(pid_t));
		if (!all_tids)
			return -ENOMEM;

		all_tids[0] = target_tid;
		thread_num = 1;
	}

	for (i = 0; i < MAX_NR_CPUS; i++) {
		for (j = 0; j < MAX_COUNTERS; j++) {
			fd[i][j] = malloc(sizeof(int)*thread_num);
			if (!fd[i][j])
				return -ENOMEM;
		}
	}

I
Ingo Molnar 已提交
723 724 725 726 727 728
	/*
	 * We dont want to block the signals - that would cause
	 * child tasks to inherit that and Ctrl-C would not work.
	 * What we want is for Ctrl-C to work in the exec()-ed
	 * task, but being ignored by perf stat itself:
	 */
729
	atexit(sig_atexit);
I
Ingo Molnar 已提交
730 731 732 733
	signal(SIGINT,  skip_signal);
	signal(SIGALRM, skip_signal);
	signal(SIGABRT, skip_signal);

734 735 736
	status = 0;
	for (run_idx = 0; run_idx < run_count; run_idx++) {
		if (run_count != 1 && verbose)
737
			fprintf(stderr, "[ perf stat: executing run #%d ... ]\n", run_idx + 1);
738 739 740
		status = run_perf_stat(argc, argv);
	}

741 742
	if (status != -1)
		print_stat(argc, argv);
743 744

	return status;
745
}