builtin-stat.c 33.3 KB
Newer Older
1
/*
2 3 4 5 6 7
 * builtin-stat.c
 *
 * Builtin stat command: Give a precise performance counters summary
 * overview about any workload, CPU or specific PID.
 *
 * Sample output:
8

9
   $ perf stat ./hackbench 10
10

11
  Time: 0.118
12

13
  Performance counter stats for './hackbench 10':
14

15 16 17 18 19 20 21 22 23 24 25 26 27
       1708.761321 task-clock                #   11.037 CPUs utilized
            41,190 context-switches          #    0.024 M/sec
             6,735 CPU-migrations            #    0.004 M/sec
            17,318 page-faults               #    0.010 M/sec
     5,205,202,243 cycles                    #    3.046 GHz
     3,856,436,920 stalled-cycles-frontend   #   74.09% frontend cycles idle
     1,600,790,871 stalled-cycles-backend    #   30.75% backend  cycles idle
     2,603,501,247 instructions              #    0.50  insns per cycle
                                             #    1.48  stalled cycles per insn
       484,357,498 branches                  #  283.455 M/sec
         6,388,934 branch-misses             #    1.32% of all branches

        0.154822978  seconds time elapsed
28

29
 *
30
 * Copyright (C) 2008-2011, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
31 32 33 34 35 36 37 38
 *
 * Improvements and fixes by:
 *
 *   Arjan van de Ven <arjan@linux.intel.com>
 *   Yanmin Zhang <yanmin.zhang@intel.com>
 *   Wu Fengguang <fengguang.wu@intel.com>
 *   Mike Galbraith <efault@gmx.de>
 *   Paul Mackerras <paulus@samba.org>
39
 *   Jaswinder Singh Rajput <jaswinder@kernel.org>
40 41
 *
 * Released under the GPL v2. (and only v2, not any later version)
42 43
 */

44
#include "perf.h"
45
#include "builtin.h"
46
#include "util/util.h"
47 48
#include "util/parse-options.h"
#include "util/parse-events.h"
49
#include "util/event.h"
50
#include "util/evlist.h"
51
#include "util/evsel.h"
52
#include "util/debug.h"
53
#include "util/color.h"
54
#include "util/stat.h"
55
#include "util/header.h"
56
#include "util/cpumap.h"
57
#include "util/thread.h"
58
#include "util/thread_map.h"
59 60

#include <sys/prctl.h>
61
#include <locale.h>
62

S
Stephane Eranian 已提交
63
#define DEFAULT_SEPARATOR	" "
64 65
#define CNTR_NOT_SUPPORTED	"<not supported>"
#define CNTR_NOT_COUNTED	"<not counted>"
S
Stephane Eranian 已提交
66

67
static struct perf_event_attr default_attrs[] = {
68

69 70 71 72
  { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK		},
  { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES	},
  { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS		},
  { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS		},
73

74
  { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES		},
75 76
  { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_FRONTEND	},
  { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_BACKEND	},
77 78 79
  { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS		},
  { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS	},
  { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES		},
80

81
};
82

83
/*
84
 * Detailed stats (-d), covering the L1 and last level data caches:
85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
 */
static struct perf_event_attr detailed_attrs[] = {

  { .type = PERF_TYPE_HW_CACHE,
    .config =
	 PERF_COUNT_HW_CACHE_L1D		<<  0  |
	(PERF_COUNT_HW_CACHE_OP_READ		<<  8) |
	(PERF_COUNT_HW_CACHE_RESULT_ACCESS	<< 16)				},

  { .type = PERF_TYPE_HW_CACHE,
    .config =
	 PERF_COUNT_HW_CACHE_L1D		<<  0  |
	(PERF_COUNT_HW_CACHE_OP_READ		<<  8) |
	(PERF_COUNT_HW_CACHE_RESULT_MISS	<< 16)				},

  { .type = PERF_TYPE_HW_CACHE,
    .config =
	 PERF_COUNT_HW_CACHE_LL			<<  0  |
	(PERF_COUNT_HW_CACHE_OP_READ		<<  8) |
	(PERF_COUNT_HW_CACHE_RESULT_ACCESS	<< 16)				},

  { .type = PERF_TYPE_HW_CACHE,
    .config =
	 PERF_COUNT_HW_CACHE_LL			<<  0  |
	(PERF_COUNT_HW_CACHE_OP_READ		<<  8) |
	(PERF_COUNT_HW_CACHE_RESULT_MISS	<< 16)				},
};

113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
/*
 * Very detailed stats (-d -d), covering the instruction cache and the TLB caches:
 */
static struct perf_event_attr very_detailed_attrs[] = {

  { .type = PERF_TYPE_HW_CACHE,
    .config =
	 PERF_COUNT_HW_CACHE_L1I		<<  0  |
	(PERF_COUNT_HW_CACHE_OP_READ		<<  8) |
	(PERF_COUNT_HW_CACHE_RESULT_ACCESS	<< 16)				},

  { .type = PERF_TYPE_HW_CACHE,
    .config =
	 PERF_COUNT_HW_CACHE_L1I		<<  0  |
	(PERF_COUNT_HW_CACHE_OP_READ		<<  8) |
	(PERF_COUNT_HW_CACHE_RESULT_MISS	<< 16)				},

  { .type = PERF_TYPE_HW_CACHE,
    .config =
	 PERF_COUNT_HW_CACHE_DTLB		<<  0  |
	(PERF_COUNT_HW_CACHE_OP_READ		<<  8) |
	(PERF_COUNT_HW_CACHE_RESULT_ACCESS	<< 16)				},

  { .type = PERF_TYPE_HW_CACHE,
    .config =
	 PERF_COUNT_HW_CACHE_DTLB		<<  0  |
	(PERF_COUNT_HW_CACHE_OP_READ		<<  8) |
	(PERF_COUNT_HW_CACHE_RESULT_MISS	<< 16)				},

  { .type = PERF_TYPE_HW_CACHE,
    .config =
	 PERF_COUNT_HW_CACHE_ITLB		<<  0  |
	(PERF_COUNT_HW_CACHE_OP_READ		<<  8) |
	(PERF_COUNT_HW_CACHE_RESULT_ACCESS	<< 16)				},

  { .type = PERF_TYPE_HW_CACHE,
    .config =
	 PERF_COUNT_HW_CACHE_ITLB		<<  0  |
	(PERF_COUNT_HW_CACHE_OP_READ		<<  8) |
	(PERF_COUNT_HW_CACHE_RESULT_MISS	<< 16)				},

};

/*
 * Very, very detailed stats (-d -d -d), adding prefetch events:
 */
static struct perf_event_attr very_very_detailed_attrs[] = {

  { .type = PERF_TYPE_HW_CACHE,
    .config =
	 PERF_COUNT_HW_CACHE_L1D		<<  0  |
	(PERF_COUNT_HW_CACHE_OP_PREFETCH	<<  8) |
	(PERF_COUNT_HW_CACHE_RESULT_ACCESS	<< 16)				},

  { .type = PERF_TYPE_HW_CACHE,
    .config =
	 PERF_COUNT_HW_CACHE_L1D		<<  0  |
	(PERF_COUNT_HW_CACHE_OP_PREFETCH	<<  8) |
	(PERF_COUNT_HW_CACHE_RESULT_MISS	<< 16)				},
};



176
static struct perf_evlist	*evsel_list;
177

178 179 180
static struct perf_target	target = {
	.uid	= UINT_MAX,
};
181

182
static int			run_idx				=  0;
183
static int			run_count			=  1;
184
static bool			no_inherit			= false;
185
static bool			scale				=  true;
186
static bool			no_aggr				= false;
187
static pid_t			child_pid			= -1;
188
static bool			null_run			=  false;
189
static int			detailed_run			=  0;
I
Ingo Molnar 已提交
190
static bool			sync_run			=  false;
191
static bool			big_num				=  true;
S
Stephane Eranian 已提交
192 193 194
static int			big_num_opt			=  -1;
static const char		*csv_sep			= NULL;
static bool			csv_output			= false;
195
static bool			group				= false;
196 197
static const char		*output_name			= NULL;
static FILE			*output				= NULL;
198
static int			output_fd;
199

200 201
static volatile int done = 0;

202 203 204 205
struct perf_stat {
	struct stats	  res_stats[3];
};

206
static int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel)
207
{
208
	evsel->priv = zalloc(sizeof(struct perf_stat));
209 210 211 212 213 214 215 216 217
	return evsel->priv == NULL ? -ENOMEM : 0;
}

static void perf_evsel__free_stat_priv(struct perf_evsel *evsel)
{
	free(evsel->priv);
	evsel->priv = NULL;
}

Y
Yan, Zheng 已提交
218 219 220 221 222 223 224 225 226 227
static inline struct cpu_map *perf_evsel__cpus(struct perf_evsel *evsel)
{
	return (evsel->cpus && !target.cpu_list) ? evsel->cpus : evsel_list->cpus;
}

static inline int perf_evsel__nr_cpus(struct perf_evsel *evsel)
{
	return perf_evsel__cpus(evsel)->nr;
}

228 229 230 231 232 233 234 235 236 237 238 239
static struct stats runtime_nsecs_stats[MAX_NR_CPUS];
static struct stats runtime_cycles_stats[MAX_NR_CPUS];
static struct stats runtime_stalled_cycles_front_stats[MAX_NR_CPUS];
static struct stats runtime_stalled_cycles_back_stats[MAX_NR_CPUS];
static struct stats runtime_branches_stats[MAX_NR_CPUS];
static struct stats runtime_cacherefs_stats[MAX_NR_CPUS];
static struct stats runtime_l1_dcache_stats[MAX_NR_CPUS];
static struct stats runtime_l1_icache_stats[MAX_NR_CPUS];
static struct stats runtime_ll_cache_stats[MAX_NR_CPUS];
static struct stats runtime_itlb_cache_stats[MAX_NR_CPUS];
static struct stats runtime_dtlb_cache_stats[MAX_NR_CPUS];
static struct stats walltime_nsecs_stats;
240

241 242
static int create_perf_stat_counter(struct perf_evsel *evsel,
				    struct perf_evsel *first)
243
{
244
	struct perf_event_attr *attr = &evsel->attr;
245 246
	bool exclude_guest_missing = false;
	int ret;
247

248
	if (scale)
249 250
		attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
				    PERF_FORMAT_TOTAL_TIME_RUNNING;
251

252 253
	attr->inherit = !no_inherit;

254 255 256 257
retry:
	if (exclude_guest_missing)
		evsel->attr.exclude_guest = evsel->attr.exclude_host = 0;

258
	if (perf_target__has_cpu(&target)) {
Y
Yan, Zheng 已提交
259
		ret = perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel));
260 261 262 263 264
		if (ret)
			goto check_ret;
		return 0;
	}

265
	if (!perf_target__has_task(&target) && (!group || evsel == first)) {
266 267
		attr->disabled = 1;
		attr->enable_on_exec = 1;
268
	}
269

270
	ret = perf_evsel__open_per_thread(evsel, evsel_list->threads);
271 272 273 274 275 276 277 278 279 280 281 282 283 284
	if (!ret)
		return 0;
	/* fall through */
check_ret:
	if (ret && errno == EINVAL) {
		if (!exclude_guest_missing &&
		    (evsel->attr.exclude_guest || evsel->attr.exclude_host)) {
			pr_debug("Old kernel, cannot exclude "
				 "guest or host samples.\n");
			exclude_guest_missing = true;
			goto retry;
		}
	}
	return ret;
285 286
}

287 288 289
/*
 * Does the counter have nsecs as a unit?
 */
290
static inline int nsec_counter(struct perf_evsel *evsel)
291
{
292 293
	if (perf_evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK) ||
	    perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK))
294 295 296 297 298
		return 1;

	return 0;
}

I
Ingo Molnar 已提交
299 300 301 302 303 304 305 306 307 308 309
/*
 * Update various tracking values we maintain to print
 * more semantic information such as miss/hit ratios,
 * instruction rates, etc:
 */
static void update_shadow_stats(struct perf_evsel *counter, u64 *count)
{
	if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK))
		update_stats(&runtime_nsecs_stats[0], count[0]);
	else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
		update_stats(&runtime_cycles_stats[0], count[0]);
310 311
	else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND))
		update_stats(&runtime_stalled_cycles_front_stats[0], count[0]);
312
	else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND))
313
		update_stats(&runtime_stalled_cycles_back_stats[0], count[0]);
I
Ingo Molnar 已提交
314 315 316 317
	else if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS))
		update_stats(&runtime_branches_stats[0], count[0]);
	else if (perf_evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES))
		update_stats(&runtime_cacherefs_stats[0], count[0]);
318 319
	else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1D))
		update_stats(&runtime_l1_dcache_stats[0], count[0]);
320 321 322 323 324 325 326 327
	else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1I))
		update_stats(&runtime_l1_icache_stats[0], count[0]);
	else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_LL))
		update_stats(&runtime_ll_cache_stats[0], count[0]);
	else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_DTLB))
		update_stats(&runtime_dtlb_cache_stats[0], count[0]);
	else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_ITLB))
		update_stats(&runtime_itlb_cache_stats[0], count[0]);
I
Ingo Molnar 已提交
328 329
}

330
/*
331
 * Read out the results of a single counter:
332
 * aggregate counts across CPUs in system-wide mode
333
 */
334
static int read_counter_aggr(struct perf_evsel *counter)
335
{
336
	struct perf_stat *ps = counter->priv;
337 338
	u64 *count = counter->counts->aggr.values;
	int i;
339

Y
Yan, Zheng 已提交
340
	if (__perf_evsel__read(counter, perf_evsel__nr_cpus(counter),
341
			       evsel_list->threads->nr, scale) < 0)
342
		return -1;
343 344

	for (i = 0; i < 3; i++)
345
		update_stats(&ps->res_stats[i], count[i]);
346 347

	if (verbose) {
348
		fprintf(output, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
349
			perf_evsel__name(counter), count[0], count[1], count[2]);
350 351
	}

352 353 354
	/*
	 * Save the full runtime - to allow normalization during printout:
	 */
I
Ingo Molnar 已提交
355
	update_shadow_stats(counter, count);
356 357

	return 0;
358 359 360 361 362 363
}

/*
 * Read out the results of a single counter:
 * do not aggregate counts across CPUs in system-wide mode
 */
364
static int read_counter(struct perf_evsel *counter)
365
{
366
	u64 *count;
367 368
	int cpu;

Y
Yan, Zheng 已提交
369
	for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) {
370 371
		if (__perf_evsel__read_on_cpu(counter, cpu, 0, scale) < 0)
			return -1;
372

373
		count = counter->counts->cpu[cpu].values;
374

I
Ingo Molnar 已提交
375
		update_shadow_stats(counter, count);
376
	}
377 378

	return 0;
379 380
}

381
static int run_perf_stat(int argc __maybe_unused, const char **argv)
382 383
{
	unsigned long long t0, t1;
384
	struct perf_evsel *counter, *first;
385
	int status = 0;
386
	int child_ready_pipe[2], go_pipe[2];
387
	const bool forks = (argc > 0);
388
	char buf;
389

390
	if (forks && (pipe(child_ready_pipe) < 0 || pipe(go_pipe) < 0)) {
391
		perror("failed to create pipes");
392
		return -1;
393 394
	}

395
	if (forks) {
396
		if ((child_pid = fork()) < 0)
397 398
			perror("failed to fork");

399
		if (!child_pid) {
400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426
			close(child_ready_pipe[0]);
			close(go_pipe[1]);
			fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);

			/*
			 * Do a dummy execvp to get the PLT entry resolved,
			 * so we avoid the resolver overhead on the real
			 * execvp call.
			 */
			execvp("", (char **)argv);

			/*
			 * Tell the parent we're ready to go
			 */
			close(child_ready_pipe[1]);

			/*
			 * Wait until the parent tells us to go.
			 */
			if (read(go_pipe[0], &buf, 1) == -1)
				perror("unable to read pipe");

			execvp(argv[0], (char **)argv);

			perror(argv[0]);
			exit(-1);
		}
427

428
		if (perf_target__none(&target))
429
			evsel_list->threads->map[0] = child_pid;
430

431
		/*
432
		 * Wait for the child to be ready to exec.
433 434
		 */
		close(child_ready_pipe[1]);
435 436
		close(go_pipe[0]);
		if (read(child_ready_pipe[0], &buf, 1) == -1)
437
			perror("unable to read pipe");
438
		close(child_ready_pipe[0]);
439 440
	}

441
	if (group)
442
		perf_evlist__set_leader(evsel_list);
443

444
	first = perf_evlist__first(evsel_list);
445

446
	list_for_each_entry(counter, &evsel_list->entries, node) {
447
		if (create_perf_stat_counter(counter, first) < 0) {
448 449 450 451
			/*
			 * PPC returns ENXIO for HW counters until 2.6.37
			 * (behavior changed with commit b0a873e).
			 */
452
			if (errno == EINVAL || errno == ENOSYS ||
453 454
			    errno == ENOENT || errno == EOPNOTSUPP ||
			    errno == ENXIO) {
455 456
				if (verbose)
					ui__warning("%s event is not supported by the kernel.\n",
457
						    perf_evsel__name(counter));
458
				counter->supported = false;
459
				continue;
460
			}
461 462

			if (errno == EPERM || errno == EACCES) {
463 464 465
				error("You may not have permission to collect %sstats.\n"
				      "\t Consider tweaking"
				      " /proc/sys/kernel/perf_event_paranoid or running as root.",
466
				      target.system_wide ? "system-wide " : "");
467 468 469 470 471 472 473
			} else {
				error("open_counter returned with %d (%s). "
				      "/bin/dmesg may provide additional information.\n",
				       errno, strerror(errno));
			}
			if (child_pid != -1)
				kill(child_pid, SIGTERM);
474 475

			pr_err("Not all events could be opened.\n");
476 477
			return -1;
		}
478
		counter->supported = true;
479
	}
480

481 482 483 484 485 486
	if (perf_evlist__set_filters(evsel_list)) {
		error("failed to set filter with %d (%s)\n", errno,
			strerror(errno));
		return -1;
	}

487 488 489 490 491
	/*
	 * Enable counters and exec the command:
	 */
	t0 = rdclock();

492 493 494
	if (forks) {
		close(go_pipe[1]);
		wait(&status);
495 496
		if (WIFSIGNALED(status))
			psignal(WTERMSIG(status), argv[0]);
497
	} else {
498
		while(!done) sleep(1);
499
	}
500 501 502

	t1 = rdclock();

503
	update_stats(&walltime_nsecs_stats, t1 - t0);
504

505
	if (no_aggr) {
506
		list_for_each_entry(counter, &evsel_list->entries, node) {
507
			read_counter(counter);
Y
Yan, Zheng 已提交
508
			perf_evsel__close_fd(counter, perf_evsel__nr_cpus(counter), 1);
509
		}
510
	} else {
511
		list_for_each_entry(counter, &evsel_list->entries, node) {
512
			read_counter_aggr(counter);
Y
Yan, Zheng 已提交
513
			perf_evsel__close_fd(counter, perf_evsel__nr_cpus(counter),
514
					     evsel_list->threads->nr);
515
		}
516
	}
517

518 519 520
	return WEXITSTATUS(status);
}

521 522
static void print_noise_pct(double total, double avg)
{
523
	double pct = rel_stddev_stats(total, avg);
524

525
	if (csv_output)
526
		fprintf(output, "%s%.2f%%", csv_sep, pct);
527
	else if (pct)
528
		fprintf(output, "  ( +-%6.2f%% )", pct);
529 530
}

531
static void print_noise(struct perf_evsel *evsel, double avg)
532
{
533 534
	struct perf_stat *ps;

535 536 537
	if (run_count == 1)
		return;

538
	ps = evsel->priv;
539
	print_noise_pct(stddev_stats(&ps->res_stats[0]), avg);
540 541
}

542
static void nsec_printout(int cpu, struct perf_evsel *evsel, double avg)
I
Ingo Molnar 已提交
543
{
544
	double msecs = avg / 1e6;
S
Stephane Eranian 已提交
545
	char cpustr[16] = { '\0', };
546
	const char *fmt = csv_output ? "%s%.6f%s%s" : "%s%18.6f%s%-25s";
I
Ingo Molnar 已提交
547

548
	if (no_aggr)
S
Stephane Eranian 已提交
549 550
		sprintf(cpustr, "CPU%*d%s",
			csv_output ? 0 : -4,
Y
Yan, Zheng 已提交
551
			perf_evsel__cpus(evsel)->map[cpu], csv_sep);
S
Stephane Eranian 已提交
552

553
	fprintf(output, fmt, cpustr, msecs, csv_sep, perf_evsel__name(evsel));
S
Stephane Eranian 已提交
554

S
Stephane Eranian 已提交
555
	if (evsel->cgrp)
556
		fprintf(output, "%s%s", csv_sep, evsel->cgrp->name);
S
Stephane Eranian 已提交
557

S
Stephane Eranian 已提交
558 559
	if (csv_output)
		return;
I
Ingo Molnar 已提交
560

561
	if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK))
562 563
		fprintf(output, " # %8.3f CPUs utilized          ",
			avg / avg_stats(&walltime_nsecs_stats));
564 565
	else
		fprintf(output, "                                   ");
I
Ingo Molnar 已提交
566 567
}

568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594
/* used for get_ratio_color() */
enum grc_type {
	GRC_STALLED_CYCLES_FE,
	GRC_STALLED_CYCLES_BE,
	GRC_CACHE_MISSES,
	GRC_MAX_NR
};

static const char *get_ratio_color(enum grc_type type, double ratio)
{
	static const double grc_table[GRC_MAX_NR][3] = {
		[GRC_STALLED_CYCLES_FE] = { 50.0, 30.0, 10.0 },
		[GRC_STALLED_CYCLES_BE] = { 75.0, 50.0, 20.0 },
		[GRC_CACHE_MISSES] 	= { 20.0, 10.0, 5.0 },
	};
	const char *color = PERF_COLOR_NORMAL;

	if (ratio > grc_table[type][0])
		color = PERF_COLOR_RED;
	else if (ratio > grc_table[type][1])
		color = PERF_COLOR_MAGENTA;
	else if (ratio > grc_table[type][2])
		color = PERF_COLOR_YELLOW;

	return color;
}

595 596 597
static void print_stalled_cycles_frontend(int cpu,
					  struct perf_evsel *evsel
					  __maybe_unused, double avg)
598 599 600 601 602 603 604 605 606
{
	double total, ratio = 0.0;
	const char *color;

	total = avg_stats(&runtime_cycles_stats[cpu]);

	if (total)
		ratio = avg / total * 100.0;

607
	color = get_ratio_color(GRC_STALLED_CYCLES_FE, ratio);
608

609 610 611
	fprintf(output, " #  ");
	color_fprintf(output, color, "%6.2f%%", ratio);
	fprintf(output, " frontend cycles idle   ");
612 613
}

614 615 616
static void print_stalled_cycles_backend(int cpu,
					 struct perf_evsel *evsel
					 __maybe_unused, double avg)
617 618 619 620 621 622 623 624 625
{
	double total, ratio = 0.0;
	const char *color;

	total = avg_stats(&runtime_cycles_stats[cpu]);

	if (total)
		ratio = avg / total * 100.0;

626
	color = get_ratio_color(GRC_STALLED_CYCLES_BE, ratio);
627

628 629 630
	fprintf(output, " #  ");
	color_fprintf(output, color, "%6.2f%%", ratio);
	fprintf(output, " backend  cycles idle   ");
631 632
}

633 634 635
static void print_branch_misses(int cpu,
				struct perf_evsel *evsel __maybe_unused,
				double avg)
636 637 638 639 640 641 642 643 644
{
	double total, ratio = 0.0;
	const char *color;

	total = avg_stats(&runtime_branches_stats[cpu]);

	if (total)
		ratio = avg / total * 100.0;

645
	color = get_ratio_color(GRC_CACHE_MISSES, ratio);
646

647 648 649
	fprintf(output, " #  ");
	color_fprintf(output, color, "%6.2f%%", ratio);
	fprintf(output, " of all branches        ");
650 651
}

652 653 654
static void print_l1_dcache_misses(int cpu,
				   struct perf_evsel *evsel __maybe_unused,
				   double avg)
655 656 657 658 659 660 661 662 663
{
	double total, ratio = 0.0;
	const char *color;

	total = avg_stats(&runtime_l1_dcache_stats[cpu]);

	if (total)
		ratio = avg / total * 100.0;

664
	color = get_ratio_color(GRC_CACHE_MISSES, ratio);
665

666 667 668
	fprintf(output, " #  ");
	color_fprintf(output, color, "%6.2f%%", ratio);
	fprintf(output, " of all L1-dcache hits  ");
669 670
}

671 672 673
static void print_l1_icache_misses(int cpu,
				   struct perf_evsel *evsel __maybe_unused,
				   double avg)
674 675 676 677 678 679 680 681 682
{
	double total, ratio = 0.0;
	const char *color;

	total = avg_stats(&runtime_l1_icache_stats[cpu]);

	if (total)
		ratio = avg / total * 100.0;

683
	color = get_ratio_color(GRC_CACHE_MISSES, ratio);
684

685 686 687
	fprintf(output, " #  ");
	color_fprintf(output, color, "%6.2f%%", ratio);
	fprintf(output, " of all L1-icache hits  ");
688 689
}

690 691 692
static void print_dtlb_cache_misses(int cpu,
				    struct perf_evsel *evsel __maybe_unused,
				    double avg)
693 694 695 696 697 698 699 700 701
{
	double total, ratio = 0.0;
	const char *color;

	total = avg_stats(&runtime_dtlb_cache_stats[cpu]);

	if (total)
		ratio = avg / total * 100.0;

702
	color = get_ratio_color(GRC_CACHE_MISSES, ratio);
703

704 705 706
	fprintf(output, " #  ");
	color_fprintf(output, color, "%6.2f%%", ratio);
	fprintf(output, " of all dTLB cache hits ");
707 708
}

709 710 711
static void print_itlb_cache_misses(int cpu,
				    struct perf_evsel *evsel __maybe_unused,
				    double avg)
712 713 714 715 716 717 718 719 720
{
	double total, ratio = 0.0;
	const char *color;

	total = avg_stats(&runtime_itlb_cache_stats[cpu]);

	if (total)
		ratio = avg / total * 100.0;

721
	color = get_ratio_color(GRC_CACHE_MISSES, ratio);
722

723 724 725
	fprintf(output, " #  ");
	color_fprintf(output, color, "%6.2f%%", ratio);
	fprintf(output, " of all iTLB cache hits ");
726 727
}

728 729 730
static void print_ll_cache_misses(int cpu,
				  struct perf_evsel *evsel __maybe_unused,
				  double avg)
731 732 733 734 735 736 737 738 739
{
	double total, ratio = 0.0;
	const char *color;

	total = avg_stats(&runtime_ll_cache_stats[cpu]);

	if (total)
		ratio = avg / total * 100.0;

740
	color = get_ratio_color(GRC_CACHE_MISSES, ratio);
741

742 743 744
	fprintf(output, " #  ");
	color_fprintf(output, color, "%6.2f%%", ratio);
	fprintf(output, " of all LL-cache hits   ");
745 746
}

747
static void abs_printout(int cpu, struct perf_evsel *evsel, double avg)
I
Ingo Molnar 已提交
748
{
749
	double total, ratio = 0.0;
750
	char cpustr[16] = { '\0', };
S
Stephane Eranian 已提交
751 752 753 754 755
	const char *fmt;

	if (csv_output)
		fmt = "%s%.0f%s%s";
	else if (big_num)
756
		fmt = "%s%'18.0f%s%-25s";
S
Stephane Eranian 已提交
757
	else
758
		fmt = "%s%18.0f%s%-25s";
759 760

	if (no_aggr)
S
Stephane Eranian 已提交
761 762
		sprintf(cpustr, "CPU%*d%s",
			csv_output ? 0 : -4,
Y
Yan, Zheng 已提交
763
			perf_evsel__cpus(evsel)->map[cpu], csv_sep);
764 765
	else
		cpu = 0;
766

767
	fprintf(output, fmt, cpustr, avg, csv_sep, perf_evsel__name(evsel));
S
Stephane Eranian 已提交
768

S
Stephane Eranian 已提交
769
	if (evsel->cgrp)
770
		fprintf(output, "%s%s", csv_sep, evsel->cgrp->name);
S
Stephane Eranian 已提交
771

S
Stephane Eranian 已提交
772 773
	if (csv_output)
		return;
I
Ingo Molnar 已提交
774

775
	if (perf_evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) {
776
		total = avg_stats(&runtime_cycles_stats[cpu]);
777 778 779 780

		if (total)
			ratio = avg / total;

781
		fprintf(output, " #   %5.2f  insns per cycle        ", ratio);
782

783 784
		total = avg_stats(&runtime_stalled_cycles_front_stats[cpu]);
		total = max(total, avg_stats(&runtime_stalled_cycles_back_stats[cpu]));
785 786 787

		if (total && avg) {
			ratio = total / avg;
788
			fprintf(output, "\n                                             #   %5.2f  stalled cycles per insn", ratio);
789 790
		}

791
	} else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES) &&
792
			runtime_branches_stats[cpu].n != 0) {
793
		print_branch_misses(cpu, evsel, avg);
794 795 796 797 798
	} else if (
		evsel->attr.type == PERF_TYPE_HW_CACHE &&
		evsel->attr.config ==  ( PERF_COUNT_HW_CACHE_L1D |
					((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
					((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) &&
799
			runtime_l1_dcache_stats[cpu].n != 0) {
800
		print_l1_dcache_misses(cpu, evsel, avg);
801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828
	} else if (
		evsel->attr.type == PERF_TYPE_HW_CACHE &&
		evsel->attr.config ==  ( PERF_COUNT_HW_CACHE_L1I |
					((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
					((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) &&
			runtime_l1_icache_stats[cpu].n != 0) {
		print_l1_icache_misses(cpu, evsel, avg);
	} else if (
		evsel->attr.type == PERF_TYPE_HW_CACHE &&
		evsel->attr.config ==  ( PERF_COUNT_HW_CACHE_DTLB |
					((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
					((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) &&
			runtime_dtlb_cache_stats[cpu].n != 0) {
		print_dtlb_cache_misses(cpu, evsel, avg);
	} else if (
		evsel->attr.type == PERF_TYPE_HW_CACHE &&
		evsel->attr.config ==  ( PERF_COUNT_HW_CACHE_ITLB |
					((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
					((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) &&
			runtime_itlb_cache_stats[cpu].n != 0) {
		print_itlb_cache_misses(cpu, evsel, avg);
	} else if (
		evsel->attr.type == PERF_TYPE_HW_CACHE &&
		evsel->attr.config ==  ( PERF_COUNT_HW_CACHE_LL |
					((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
					((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) &&
			runtime_ll_cache_stats[cpu].n != 0) {
		print_ll_cache_misses(cpu, evsel, avg);
829 830 831 832 833 834 835
	} else if (perf_evsel__match(evsel, HARDWARE, HW_CACHE_MISSES) &&
			runtime_cacherefs_stats[cpu].n != 0) {
		total = avg_stats(&runtime_cacherefs_stats[cpu]);

		if (total)
			ratio = avg * 100 / total;

836
		fprintf(output, " # %8.3f %% of all cache refs    ", ratio);
837

838 839
	} else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) {
		print_stalled_cycles_frontend(cpu, evsel, avg);
840
	} else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_BACKEND)) {
841
		print_stalled_cycles_backend(cpu, evsel, avg);
842
	} else if (perf_evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) {
843
		total = avg_stats(&runtime_nsecs_stats[cpu]);
844 845

		if (total)
846
			ratio = 1.0 * avg / total;
847

848
		fprintf(output, " # %8.3f GHz                    ", ratio);
849
	} else if (runtime_nsecs_stats[cpu].n != 0) {
N
Namhyung Kim 已提交
850 851
		char unit = 'M';

852
		total = avg_stats(&runtime_nsecs_stats[cpu]);
853 854

		if (total)
855
			ratio = 1000.0 * avg / total;
N
Namhyung Kim 已提交
856 857 858 859
		if (ratio < 0.001) {
			ratio *= 1000;
			unit = 'K';
		}
860

N
Namhyung Kim 已提交
861
		fprintf(output, " # %8.3f %c/sec                  ", ratio, unit);
862
	} else {
863
		fprintf(output, "                                   ");
I
Ingo Molnar 已提交
864 865 866
	}
}

867 868
/*
 * Print out the results of a single counter:
869
 * aggregated counts in system-wide mode
870
 */
871
static void print_counter_aggr(struct perf_evsel *counter)
872
{
873 874
	struct perf_stat *ps = counter->priv;
	double avg = avg_stats(&ps->res_stats[0]);
875
	int scaled = counter->counts->scaled;
876 877

	if (scaled == -1) {
878
		fprintf(output, "%*s%s%*s",
S
Stephane Eranian 已提交
879
			csv_output ? 0 : 18,
880
			counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED,
S
Stephane Eranian 已提交
881 882
			csv_sep,
			csv_output ? 0 : -24,
883
			perf_evsel__name(counter));
S
Stephane Eranian 已提交
884 885

		if (counter->cgrp)
886
			fprintf(output, "%s%s", csv_sep, counter->cgrp->name);
S
Stephane Eranian 已提交
887

888
		fputc('\n', output);
889 890
		return;
	}
891

I
Ingo Molnar 已提交
892
	if (nsec_counter(counter))
893
		nsec_printout(-1, counter, avg);
I
Ingo Molnar 已提交
894
	else
895
		abs_printout(-1, counter, avg);
896

897 898
	print_noise(counter, avg);

S
Stephane Eranian 已提交
899
	if (csv_output) {
900
		fputc('\n', output);
S
Stephane Eranian 已提交
901 902 903
		return;
	}

904 905 906
	if (scaled) {
		double avg_enabled, avg_running;

907 908
		avg_enabled = avg_stats(&ps->res_stats[1]);
		avg_running = avg_stats(&ps->res_stats[2]);
909

910
		fprintf(output, " [%5.2f%%]", 100 * avg_running / avg_enabled);
911
	}
912
	fprintf(output, "\n");
913 914
}

915 916 917 918
/*
 * Print out the results of a single counter:
 * does not use aggregated count in system-wide
 */
919
static void print_counter(struct perf_evsel *counter)
920 921 922 923
{
	u64 ena, run, val;
	int cpu;

Y
Yan, Zheng 已提交
924
	for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) {
925 926 927
		val = counter->counts->cpu[cpu].val;
		ena = counter->counts->cpu[cpu].ena;
		run = counter->counts->cpu[cpu].run;
928
		if (run == 0 || ena == 0) {
929
			fprintf(output, "CPU%*d%s%*s%s%*s",
S
Stephane Eranian 已提交
930
				csv_output ? 0 : -4,
Y
Yan, Zheng 已提交
931
				perf_evsel__cpus(counter)->map[cpu], csv_sep,
S
Stephane Eranian 已提交
932
				csv_output ? 0 : 18,
933 934
				counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED,
				csv_sep,
S
Stephane Eranian 已提交
935
				csv_output ? 0 : -24,
936
				perf_evsel__name(counter));
937

S
Stephane Eranian 已提交
938
			if (counter->cgrp)
939 940
				fprintf(output, "%s%s",
					csv_sep, counter->cgrp->name);
S
Stephane Eranian 已提交
941

942
			fputc('\n', output);
943 944 945 946 947 948 949 950
			continue;
		}

		if (nsec_counter(counter))
			nsec_printout(cpu, counter, val);
		else
			abs_printout(cpu, counter, val);

S
Stephane Eranian 已提交
951 952
		if (!csv_output) {
			print_noise(counter, 1.0);
953

954
			if (run != ena)
955 956
				fprintf(output, "  (%.2f%%)",
					100.0 * run / ena);
957
		}
958
		fputc('\n', output);
959 960 961
	}
}

962 963
static void print_stat(int argc, const char **argv)
{
964 965
	struct perf_evsel *counter;
	int i;
966

967 968
	fflush(stdout);

S
Stephane Eranian 已提交
969
	if (!csv_output) {
970 971
		fprintf(output, "\n");
		fprintf(output, " Performance counter stats for ");
972
		if (!perf_target__has_task(&target)) {
973
			fprintf(output, "\'%s", argv[0]);
S
Stephane Eranian 已提交
974
			for (i = 1; i < argc; i++)
975
				fprintf(output, " %s", argv[i]);
976 977
		} else if (target.pid)
			fprintf(output, "process id \'%s", target.pid);
S
Stephane Eranian 已提交
978
		else
979
			fprintf(output, "thread id \'%s", target.tid);
I
Ingo Molnar 已提交
980

981
		fprintf(output, "\'");
S
Stephane Eranian 已提交
982
		if (run_count > 1)
983 984
			fprintf(output, " (%d runs)", run_count);
		fprintf(output, ":\n\n");
S
Stephane Eranian 已提交
985
	}
986

987
	if (no_aggr) {
988
		list_for_each_entry(counter, &evsel_list->entries, node)
989 990
			print_counter(counter);
	} else {
991
		list_for_each_entry(counter, &evsel_list->entries, node)
992 993
			print_counter_aggr(counter);
	}
994

S
Stephane Eranian 已提交
995
	if (!csv_output) {
996
		if (!null_run)
997 998
			fprintf(output, "\n");
		fprintf(output, " %17.9f seconds time elapsed",
S
Stephane Eranian 已提交
999 1000
				avg_stats(&walltime_nsecs_stats)/1e9);
		if (run_count > 1) {
1001
			fprintf(output, "                                        ");
1002 1003
			print_noise_pct(stddev_stats(&walltime_nsecs_stats),
					avg_stats(&walltime_nsecs_stats));
S
Stephane Eranian 已提交
1004
		}
1005
		fprintf(output, "\n\n");
I
Ingo Molnar 已提交
1006
	}
1007 1008
}

1009 1010
static volatile int signr = -1;

1011
static void skip_signal(int signo)
1012
{
1013
	if(child_pid == -1)
1014 1015
		done = 1;

1016 1017 1018 1019 1020
	signr = signo;
}

static void sig_atexit(void)
{
1021 1022 1023
	if (child_pid != -1)
		kill(child_pid, SIGTERM);

1024 1025 1026 1027 1028
	if (signr == -1)
		return;

	signal(signr, SIG_DFL);
	kill(getpid(), signr);
1029 1030 1031
}

static const char * const stat_usage[] = {
1032
	"perf stat [<options>] [<command>]",
1033 1034 1035
	NULL
};

1036 1037
static int stat__set_big_num(const struct option *opt __maybe_unused,
			     const char *s __maybe_unused, int unset)
S
Stephane Eranian 已提交
1038 1039 1040 1041 1042
{
	big_num_opt = unset ? 0 : 1;
	return 0;
}

1043 1044
static bool append_file;

1045
static const struct option options[] = {
1046
	OPT_CALLBACK('e', "event", &evsel_list, "event",
1047
		     "event selector. use 'perf list' to list available events",
1048
		     parse_events_option),
1049 1050
	OPT_CALLBACK(0, "filter", &evsel_list, "filter",
		     "event filter", parse_filter),
1051 1052
	OPT_BOOLEAN('i', "no-inherit", &no_inherit,
		    "child tasks do not inherit counters"),
1053
	OPT_STRING('p', "pid", &target.pid, "pid",
1054
		   "stat events on existing process id"),
1055
	OPT_STRING('t', "tid", &target.tid, "tid",
1056
		   "stat events on existing thread id"),
1057
	OPT_BOOLEAN('a', "all-cpus", &target.system_wide,
1058
		    "system-wide collection from all CPUs"),
1059 1060
	OPT_BOOLEAN('g', "group", &group,
		    "put the counters into a counter group"),
1061
	OPT_BOOLEAN('c', "scale", &scale,
1062
		    "scale/normalize counters"),
1063
	OPT_INCR('v', "verbose", &verbose,
1064
		    "be more verbose (show counter open errors, etc)"),
1065 1066
	OPT_INTEGER('r', "repeat", &run_count,
		    "repeat command and print average + stddev (max: 100)"),
1067 1068
	OPT_BOOLEAN('n', "null", &null_run,
		    "null run - dont start any counters"),
1069
	OPT_INCR('d', "detailed", &detailed_run,
1070
		    "detailed run - start a lot of events"),
I
Ingo Molnar 已提交
1071 1072
	OPT_BOOLEAN('S', "sync", &sync_run,
		    "call sync() before starting a run"),
S
Stephane Eranian 已提交
1073 1074 1075
	OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL, 
			   "print large numbers with thousands\' separators",
			   stat__set_big_num),
1076
	OPT_STRING('C', "cpu", &target.cpu_list, "cpu",
1077
		    "list of cpus to monitor in system-wide"),
1078 1079
	OPT_BOOLEAN('A', "no-aggr", &no_aggr,
		    "disable CPU count aggregation"),
S
Stephane Eranian 已提交
1080 1081
	OPT_STRING('x', "field-separator", &csv_sep, "separator",
		   "print counts with custom separator"),
S
Stephane Eranian 已提交
1082 1083 1084
	OPT_CALLBACK('G', "cgroup", &evsel_list, "name",
		     "monitor event in cgroup name only",
		     parse_cgroups),
1085 1086 1087
	OPT_STRING('o', "output", &output_name, "file",
		    "output file name"),
	OPT_BOOLEAN(0, "append", &append_file, "append to the output file"),
1088 1089
	OPT_INTEGER(0, "log-fd", &output_fd,
		    "log output to fd, instead of stderr"),
1090 1091 1092
	OPT_END()
};

1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103
/*
 * Add default attributes, if there were no attributes specified or
 * if -d/--detailed, -d -d or -d -d -d is used:
 */
static int add_default_attributes(void)
{
	/* Set attrs if no event is selected and !null_run: */
	if (null_run)
		return 0;

	if (!evsel_list->nr_entries) {
1104
		if (perf_evlist__add_default_attrs(evsel_list, default_attrs) < 0)
1105
			return -1;
1106 1107 1108 1109 1110 1111 1112 1113
	}

	/* Detailed events get appended to the event list: */

	if (detailed_run <  1)
		return 0;

	/* Append detailed run extra attributes: */
1114
	if (perf_evlist__add_default_attrs(evsel_list, detailed_attrs) < 0)
1115
		return -1;
1116 1117 1118 1119 1120

	if (detailed_run < 2)
		return 0;

	/* Append very detailed run extra attributes: */
1121
	if (perf_evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0)
1122
		return -1;
1123 1124 1125 1126 1127

	if (detailed_run < 3)
		return 0;

	/* Append very, very detailed run extra attributes: */
1128
	return perf_evlist__add_default_attrs(evsel_list, very_very_detailed_attrs);
1129 1130
}

1131
int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
1132
{
1133 1134
	struct perf_evsel *pos;
	int status = -ENOMEM;
1135
	const char *mode;
1136

1137 1138
	setlocale(LC_ALL, "");

1139
	evsel_list = perf_evlist__new(NULL, NULL);
1140 1141 1142
	if (evsel_list == NULL)
		return -ENOMEM;

1143 1144
	argc = parse_options(argc, argv, options, stat_usage,
		PARSE_OPT_STOP_AT_NON_OPTION);
S
Stephane Eranian 已提交
1145

1146 1147 1148 1149
	output = stderr;
	if (output_name && strcmp(output_name, "-"))
		output = NULL;

1150 1151 1152 1153
	if (output_name && output_fd) {
		fprintf(stderr, "cannot use both --output and --log-fd\n");
		usage_with_options(stat_usage, options);
	}
1154 1155 1156 1157 1158 1159

	if (output_fd < 0) {
		fprintf(stderr, "argument to --log-fd must be a > 0\n");
		usage_with_options(stat_usage, options);
	}

1160 1161 1162 1163 1164 1165 1166
	if (!output) {
		struct timespec tm;
		mode = append_file ? "a" : "w";

		output = fopen(output_name, mode);
		if (!output) {
			perror("failed to create output file");
1167
			return -1;
1168 1169 1170
		}
		clock_gettime(CLOCK_REALTIME, &tm);
		fprintf(output, "# started on %s\n", ctime(&tm.tv_sec));
1171
	} else if (output_fd > 0) {
1172 1173 1174 1175 1176 1177
		mode = append_file ? "a" : "w";
		output = fdopen(output_fd, mode);
		if (!output) {
			perror("Failed opening logfd");
			return -errno;
		}
1178 1179
	}

1180
	if (csv_sep) {
S
Stephane Eranian 已提交
1181
		csv_output = true;
1182 1183 1184
		if (!strcmp(csv_sep, "\\t"))
			csv_sep = "\t";
	} else
S
Stephane Eranian 已提交
1185 1186 1187 1188 1189 1190
		csv_sep = DEFAULT_SEPARATOR;

	/*
	 * let the spreadsheet do the pretty-printing
	 */
	if (csv_output) {
J
Jim Cromie 已提交
1191
		/* User explicitly passed -B? */
S
Stephane Eranian 已提交
1192 1193 1194 1195 1196 1197 1198 1199
		if (big_num_opt == 1) {
			fprintf(stderr, "-B option not supported with -x\n");
			usage_with_options(stat_usage, options);
		} else /* Nope, so disable big number formatting */
			big_num = false;
	} else if (big_num_opt == 0) /* User passed --no-big-num */
		big_num = false;

1200
	if (!argc && !perf_target__has_task(&target))
1201
		usage_with_options(stat_usage, options);
1202
	if (run_count <= 0)
1203
		usage_with_options(stat_usage, options);
1204

S
Stephane Eranian 已提交
1205
	/* no_aggr, cgroup are for system-wide only */
1206
	if ((no_aggr || nr_cgroups) && !perf_target__has_cpu(&target)) {
S
Stephane Eranian 已提交
1207 1208 1209
		fprintf(stderr, "both cgroup and no-aggregation "
			"modes only available in system-wide mode\n");

1210
		usage_with_options(stat_usage, options);
S
Stephane Eranian 已提交
1211
	}
1212

1213 1214
	if (add_default_attributes())
		goto out;
1215

1216
	perf_target__validate(&target);
1217

1218
	if (perf_evlist__create_maps(evsel_list, &target) < 0) {
1219
		if (perf_target__has_task(&target))
1220
			pr_err("Problems finding threads of monitor\n");
1221
		if (perf_target__has_cpu(&target))
1222
			perror("failed to parse CPUs map");
1223

1224
		usage_with_options(stat_usage, options);
1225 1226
		return -1;
	}
1227

1228
	list_for_each_entry(pos, &evsel_list->entries, node) {
1229
		if (perf_evsel__alloc_stat_priv(pos) < 0 ||
Y
Yan, Zheng 已提交
1230
		    perf_evsel__alloc_counts(pos, perf_evsel__nr_cpus(pos)) < 0)
1231
			goto out_free_fd;
1232 1233
	}

I
Ingo Molnar 已提交
1234 1235 1236 1237 1238 1239
	/*
	 * We dont want to block the signals - that would cause
	 * child tasks to inherit that and Ctrl-C would not work.
	 * What we want is for Ctrl-C to work in the exec()-ed
	 * task, but being ignored by perf stat itself:
	 */
1240
	atexit(sig_atexit);
I
Ingo Molnar 已提交
1241 1242 1243 1244
	signal(SIGINT,  skip_signal);
	signal(SIGALRM, skip_signal);
	signal(SIGABRT, skip_signal);

1245 1246 1247
	status = 0;
	for (run_idx = 0; run_idx < run_count; run_idx++) {
		if (run_count != 1 && verbose)
1248 1249
			fprintf(output, "[ perf stat: executing run #%d ... ]\n",
				run_idx + 1);
I
Ingo Molnar 已提交
1250 1251 1252 1253

		if (sync_run)
			sync();

1254 1255 1256
		status = run_perf_stat(argc, argv);
	}

1257 1258
	if (status != -1)
		print_stat(argc, argv);
1259
out_free_fd:
1260
	list_for_each_entry(pos, &evsel_list->entries, node)
1261
		perf_evsel__free_stat_priv(pos);
1262
	perf_evlist__delete_maps(evsel_list);
1263 1264
out:
	perf_evlist__delete(evsel_list);
1265
	return status;
1266
}