builtin-stat.c 7.7 KB
Newer Older
1
/*
2
 * perf stat:  /usr/bin/time -alike performance counter statistics utility
3 4 5 6 7 8 9 10

          It summarizes the counter events of all tasks (and child tasks),
          covering all CPUs that the command (or workload) executes on.
          It only counts the per-task events of the workload started,
          independent of how many other tasks run on those CPUs.

   Sample output:

11
   $ perf stat -e 1 -e 3 -e 5 ls -lR /usr/include/ >/dev/null
12 13 14 15 16 17

   Performance counter stats for 'ls':

           163516953 instructions
                2295 cache-misses
             2855182 branch-misses
18 19 20 21 22 23 24 25 26 27 28 29
 *
 * Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
 *
 * Improvements and fixes by:
 *
 *   Arjan van de Ven <arjan@linux.intel.com>
 *   Yanmin Zhang <yanmin.zhang@intel.com>
 *   Wu Fengguang <fengguang.wu@intel.com>
 *   Mike Galbraith <efault@gmx.de>
 *   Paul Mackerras <paulus@samba.org>
 *
 * Released under the GPL v2. (and only v2, not any later version)
30 31
 */

32
#include "perf.h"
33
#include "builtin.h"
34
#include "util/util.h"
35 36
#include "util/parse-options.h"
#include "util/parse-events.h"
37 38

#include <sys/prctl.h>
39

40
static int			system_wide			=  0;
41
static int			inherit				=  1;
42

43
static __u64			default_event_id[MAX_COUNTERS]	= {
44 45 46 47 48 49 50 51 52 53
	EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK),
	EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES),
	EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS),
	EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS),

	EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES),
	EID(PERF_TYPE_HARDWARE, PERF_COUNT_INSTRUCTIONS),
	EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_REFERENCES),
	EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_MISSES),
};
54

55 56 57 58
static int			default_interval = 100000;
static int			event_count[MAX_COUNTERS];
static int			fd[MAX_NR_CPUS][MAX_COUNTERS];

59
static int			target_pid			= -1;
60 61 62
static int			nr_cpus				=  0;
static unsigned int		page_size;

63
static int			scale				=  1;
64 65 66 67 68 69 70 71 72 73

static const unsigned int default_count[] = {
	1000000,
	1000000,
	  10000,
	  10000,
	1000000,
	  10000,
};

74 75 76
static __u64			event_res[MAX_COUNTERS][3];
static __u64			event_scaled[MAX_COUNTERS];

77 78
static __u64			runtime_nsecs;

79 80 81 82 83 84 85
static void create_perfstat_counter(int counter)
{
	struct perf_counter_hw_event hw_event;

	memset(&hw_event, 0, sizeof(hw_event));
	hw_event.config		= event_id[counter];
	hw_event.record_type	= 0;
86
	hw_event.nmi		= 1;
87 88 89
	hw_event.exclude_kernel = event_mask[counter] & EVENT_MASK_KERNEL;
	hw_event.exclude_user   = event_mask[counter] & EVENT_MASK_USER;

90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
	if (scale)
		hw_event.read_format	= PERF_FORMAT_TOTAL_TIME_ENABLED |
					  PERF_FORMAT_TOTAL_TIME_RUNNING;

	if (system_wide) {
		int cpu;
		for (cpu = 0; cpu < nr_cpus; cpu ++) {
			fd[cpu][counter] = sys_perf_counter_open(&hw_event, -1, cpu, -1, 0);
			if (fd[cpu][counter] < 0) {
				printf("perfstat error: syscall returned with %d (%s)\n",
						fd[cpu][counter], strerror(errno));
				exit(-1);
			}
		}
	} else {
105
		hw_event.inherit	= inherit;
106 107 108 109 110 111 112 113 114 115 116
		hw_event.disabled	= 1;

		fd[0][counter] = sys_perf_counter_open(&hw_event, 0, -1, -1, 0);
		if (fd[0][counter] < 0) {
			printf("perfstat error: syscall returned with %d (%s)\n",
					fd[0][counter], strerror(errno));
			exit(-1);
		}
	}
}

117 118 119 120 121 122 123 124 125 126 127 128 129 130
/*
 * Does the counter have nsecs as a unit?
 */
static inline int nsec_counter(int counter)
{
	if (event_id[counter] == EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_CLOCK))
		return 1;
	if (event_id[counter] == EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK))
		return 1;

	return 0;
}

/*
131
 * Read out the results of a single counter:
132
 */
133
static void read_counter(int counter)
134
{
135
	__u64 *count, single_count[3];
136 137 138 139
	ssize_t res;
	int cpu, nv;
	int scaled;

140 141
	count = event_res[counter];

142
	count[0] = count[1] = count[2] = 0;
143

144 145 146 147 148 149 150 151 152 153 154 155 156 157 158
	nv = scale ? 3 : 1;
	for (cpu = 0; cpu < nr_cpus; cpu ++) {
		res = read(fd[cpu][counter], single_count, nv * sizeof(__u64));
		assert(res == nv * sizeof(__u64));

		count[0] += single_count[0];
		if (scale) {
			count[1] += single_count[1];
			count[2] += single_count[2];
		}
	}

	scaled = 0;
	if (scale) {
		if (count[2] == 0) {
159 160
			event_scaled[counter] = -1;
			count[0] = 0;
161 162
			return;
		}
163

164
		if (count[2] < count[1]) {
165
			event_scaled[counter] = 1;
166 167 168 169
			count[0] = (unsigned long long)
				((double)count[0] * count[1] / count[2] + 0.5);
		}
	}
170 171 172 173 174
	/*
	 * Save the full runtime - to allow normalization during printout:
	 */
	if (event_id[counter] == EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK))
		runtime_nsecs = count[0];
175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192
}

/*
 * Print out the results of a single counter:
 */
static void print_counter(int counter)
{
	__u64 *count;
	int scaled;

	count = event_res[counter];
	scaled = event_scaled[counter];

	if (scaled == -1) {
		fprintf(stderr, " %14s  %-20s\n",
			"<not counted>", event_name(counter));
		return;
	}
193 194 195 196 197 198 199

	if (nsec_counter(counter)) {
		double msecs = (double)count[0] / 1000000;

		fprintf(stderr, " %14.6f  %-20s (msecs)",
			msecs, event_name(counter));
	} else {
200
		fprintf(stderr, " %14Ld  %-20s",
201
			count[0], event_name(counter));
202 203 204
		if (runtime_nsecs)
			fprintf(stderr, " # %12.3f M/sec",
				(double)count[0]/runtime_nsecs*1000.0);
205 206 207 208 209 210 211
	}
	if (scaled)
		fprintf(stderr, "  (scaled from %.2f%%)",
			(double) count[2] / count[1] * 100);
	fprintf(stderr, "\n");
}

212
static int do_perfstat(int argc, const char **argv)
213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233
{
	unsigned long long t0, t1;
	int counter;
	int status;
	int pid;

	if (!system_wide)
		nr_cpus = 1;

	for (counter = 0; counter < nr_counters; counter++)
		create_perfstat_counter(counter);

	/*
	 * Enable counters and exec the command:
	 */
	t0 = rdclock();
	prctl(PR_TASK_PERF_COUNTERS_ENABLE);

	if ((pid = fork()) < 0)
		perror("failed to fork");
	if (!pid) {
234
		if (execvp(argv[0], (char **)argv)) {
235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250
			perror(argv[0]);
			exit(-1);
		}
	}
	while (wait(&status) >= 0)
		;
	prctl(PR_TASK_PERF_COUNTERS_DISABLE);
	t1 = rdclock();

	fflush(stdout);

	fprintf(stderr, "\n");
	fprintf(stderr, " Performance counter stats for \'%s\':\n",
		argv[0]);
	fprintf(stderr, "\n");

251 252 253
	for (counter = 0; counter < nr_counters; counter++)
		read_counter(counter);

254 255
	for (counter = 0; counter < nr_counters; counter++)
		print_counter(counter);
256 257 258 259 260 261 262 263 264 265


	fprintf(stderr, "\n");
	fprintf(stderr, " Wall-clock time elapsed: %12.6f msecs\n",
			(double)(t1-t0)/1e6);
	fprintf(stderr, "\n");

	return 0;
}

266
static void skip_signal(int signo)
267
{
268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304
}

static const char * const stat_usage[] = {
	"perf stat [<options>] <command>",
	NULL
};

static char events_help_msg[EVENTS_HELP_MAX];

static const struct option options[] = {
	OPT_CALLBACK('e', "event", NULL, "event",
		     events_help_msg, parse_events),
	OPT_INTEGER('c', "count", &default_interval,
		    "event period to sample"),
	OPT_BOOLEAN('i', "inherit", &inherit,
		    "child tasks inherit counters"),
	OPT_INTEGER('p', "pid", &target_pid,
		    "stat events on existing pid"),
	OPT_BOOLEAN('a', "all-cpus", &system_wide,
			    "system-wide collection from all CPUs"),
	OPT_BOOLEAN('l', "scale", &scale,
			    "scale/normalize counters"),
	OPT_END()
};

int cmd_stat(int argc, const char **argv, const char *prefix)
{
	int counter;

	page_size = sysconf(_SC_PAGE_SIZE);

	create_events_help(events_help_msg);
	memcpy(event_id, default_event_id, sizeof(default_event_id));

	argc = parse_options(argc, argv, options, stat_usage, 0);
	if (!argc)
		usage_with_options(stat_usage, options);
305 306 307 308 309 310 311 312 313 314 315 316 317 318 319

	if (!nr_counters) {
		nr_counters = 8;
	}

	for (counter = 0; counter < nr_counters; counter++) {
		if (event_count[counter])
			continue;

		event_count[counter] = default_interval;
	}
	nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
	assert(nr_cpus <= MAX_NR_CPUS);
	assert(nr_cpus >= 0);

I
Ingo Molnar 已提交
320 321 322 323 324 325 326 327 328 329
	/*
	 * We dont want to block the signals - that would cause
	 * child tasks to inherit that and Ctrl-C would not work.
	 * What we want is for Ctrl-C to work in the exec()-ed
	 * task, but being ignored by perf stat itself:
	 */
	signal(SIGINT,  skip_signal);
	signal(SIGALRM, skip_signal);
	signal(SIGABRT, skip_signal);

330 331
	return do_perfstat(argc, argv);
}