builtin-record.c 22.7 KB
Newer Older
I
Ingo Molnar 已提交
1
/*
2 3 4 5 6
 * builtin-record.c
 *
 * Builtin record command: Record the profile of a workload
 * (or a CPU, or a PID) into the perf.data output file - for
 * later analysis via perf report.
I
Ingo Molnar 已提交
7
 */
8 9
#define _FILE_OFFSET_BITS 64

10
#include "builtin.h"
11 12 13

#include "perf.h"

14
#include "util/build-id.h"
15
#include "util/util.h"
16
#include "util/parse-options.h"
17
#include "util/parse-events.h"
18

19
#include "util/header.h"
20
#include "util/event.h"
21
#include "util/evlist.h"
22
#include "util/evsel.h"
23
#include "util/debug.h"
24
#include "util/session.h"
25
#include "util/symbol.h"
26
#include "util/cpumap.h"
27
#include "util/thread_map.h"
28

29
#include <unistd.h>
30
#include <sched.h>
31
#include <sys/mman.h>
32

33 34 35 36 37
enum write_mode_t {
	WRITE_FORCE,
	WRITE_APPEND
};

38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56
struct perf_record {
	struct perf_event_ops	ops;
	struct perf_record_opts	opts;
	u64			bytes_written;
	const char		*output_name;
	struct perf_evlist	*evlist;
	struct perf_session	*session;
	const char		*progname;
	int			output;
	unsigned int		page_size;
	int			realtime_prio;
	enum write_mode_t	write_mode;
	bool			no_buildid;
	bool			no_buildid_cache;
	bool			force;
	bool			file_new;
	bool			append_file;
	long			samples;
	off_t			post_processing_offset;
57
};
58

59
static void advance_output(struct perf_record *rec, size_t size)
60
{
61
	rec->bytes_written += size;
62 63
}

64
static void write_output(struct perf_record *rec, void *buf, size_t size)
65 66
{
	while (size) {
67
		int ret = write(rec->output, buf, size);
68 69 70 71 72 73 74

		if (ret < 0)
			die("failed to write");

		size -= ret;
		buf += ret;

75
		rec->bytes_written += ret;
76 77 78
	}
}

79 80
static int process_synthesized_event(struct perf_event_ops *ops,
				     union perf_event *event,
81
				     struct perf_sample *sample __used,
82
				     struct perf_session *self __used)
83
{
84 85
	struct perf_record *rec = container_of(ops, struct perf_record, ops);
	write_output(rec, event, event->header.size);
86 87 88
	return 0;
}

89 90
static void perf_record__mmap_read(struct perf_record *rec,
				   struct perf_mmap *md)
91
{
92
	unsigned int head = perf_mmap__read_head(md);
93
	unsigned int old = md->prev;
94
	unsigned char *data = md->base + rec->page_size;
95 96 97
	unsigned long size;
	void *buf;

98 99 100
	if (old == head)
		return;

101
	rec->samples++;
102 103 104 105 106 107 108

	size = head - old;

	if ((old & md->mask) + size != (head & md->mask)) {
		buf = &data[old & md->mask];
		size = md->mask + 1 - (old & md->mask);
		old += size;
109

110
		write_output(rec, buf, size);
111 112 113 114 115
	}

	buf = &data[old & md->mask];
	size = head - old;
	old += size;
116

117
	write_output(rec, buf, size);
118 119

	md->prev = old;
120
	perf_mmap__write_tail(md, old);
121 122 123
}

static volatile int done = 0;
124
static volatile int signr = -1;
125
static volatile int child_finished = 0;
126

127
static void sig_handler(int sig)
128
{
129 130 131
	if (sig == SIGCHLD)
		child_finished = 1;

132
	done = 1;
133 134 135
	signr = sig;
}

136
static void perf_record__sig_exit(int exit_status __used, void *arg)
137
{
138
	struct perf_record *rec = arg;
139 140
	int status;

141
	if (rec->evlist->workload.pid > 0) {
142
		if (!child_finished)
143
			kill(rec->evlist->workload.pid, SIGTERM);
144 145 146

		wait(&status);
		if (WIFSIGNALED(status))
147
			psignal(WTERMSIG(status), rec->progname);
148
	}
149

150
	if (signr == -1 || signr == SIGUSR1)
151 152 153 154
		return;

	signal(signr, SIG_DFL);
	kill(getpid(), signr);
155 156
}

157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
static bool perf_evlist__equal(struct perf_evlist *evlist,
			       struct perf_evlist *other)
{
	struct perf_evsel *pos, *pair;

	if (evlist->nr_entries != other->nr_entries)
		return false;

	pair = list_entry(other->entries.next, struct perf_evsel, node);

	list_for_each_entry(pos, &evlist->entries, node) {
		if (memcmp(&pos->attr, &pair->attr, sizeof(pos->attr) != 0))
			return false;
		pair = list_entry(pair->node.next, struct perf_evsel, node);
	}

	return true;
}

176
static void perf_record__open(struct perf_record *rec)
177
{
178
	struct perf_evsel *pos, *first;
179 180 181
	struct perf_evlist *evlist = rec->evlist;
	struct perf_session *session = rec->session;
	struct perf_record_opts *opts = &rec->opts;
182

183 184
	first = list_entry(evlist->entries.next, struct perf_evsel, node);

185
	perf_evlist__config_attrs(evlist, opts);
186

187 188
	list_for_each_entry(pos, &evlist->entries, node) {
		struct perf_event_attr *attr = &pos->attr;
189
		struct xyarray *group_fd = NULL;
190 191 192 193 194 195 196 197 198 199 200 201 202
		/*
		 * Check if parse_single_tracepoint_event has already asked for
		 * PERF_SAMPLE_TIME.
		 *
		 * XXX this is kludgy but short term fix for problems introduced by
		 * eac23d1c that broke 'perf script' by having different sample_types
		 * when using multiple tracepoint events when we use a perf binary
		 * that tries to use sample_id_all on an older kernel.
		 *
		 * We need to move counter creation to perf_session, support
		 * different sample_types, etc.
		 */
		bool time_needed = attr->sample_type & PERF_SAMPLE_TIME;
203

204
		if (opts->group && pos != first)
205
			group_fd = first->fd;
206
retry_sample_id:
207
		attr->sample_id_all = opts->sample_id_all_avail ? 1 : 0;
208
try_again:
209
		if (perf_evsel__open(pos, evlist->cpus, evlist->threads,
210
				     opts->group, group_fd) < 0) {
211 212
			int err = errno;

213
			if (err == EPERM || err == EACCES) {
214
				ui__error_paranoid();
215
				exit(EXIT_FAILURE);
216
			} else if (err ==  ENODEV && opts->cpu_list) {
217 218
				die("No such device - did you specify"
					" an out-of-range profile CPU?\n");
219
			} else if (err == EINVAL && opts->sample_id_all_avail) {
220 221 222
				/*
				 * Old kernel, no attr->sample_id_type_all field
				 */
223 224
				opts->sample_id_all_avail = false;
				if (!opts->sample_time && !opts->raw_samples && !time_needed)
225 226
					attr->sample_type &= ~PERF_SAMPLE_TIME;

227
				goto retry_sample_id;
228
			}
229

230 231 232 233 234 235 236 237 238
			/*
			 * If it's cycles then fall back to hrtimer
			 * based cpu-clock-tick sw counter, which
			 * is always available even if no PMU support:
			 */
			if (attr->type == PERF_TYPE_HARDWARE
					&& attr->config == PERF_COUNT_HW_CPU_CYCLES) {

				if (verbose)
239 240
					ui__warning("The cycles event is not supported, "
						    "trying to fall back to cpu-clock-ticks\n");
241 242 243 244
				attr->type = PERF_TYPE_SOFTWARE;
				attr->config = PERF_COUNT_SW_CPU_CLOCK;
				goto try_again;
			}
245 246 247 248 249 250 251

			if (err == ENOENT) {
				ui__warning("The %s event is not supported.\n",
					    event_name(pos));
				exit(EXIT_FAILURE);
			}

252
			printf("\n");
253
			error("sys_perf_event_open() syscall returned with %d (%s).  /bin/dmesg may provide additional information.\n",
254
			      err, strerror(err));
255 256

#if defined(__i386__) || defined(__x86_64__)
257 258 259 260 261
			if (attr->type == PERF_TYPE_HARDWARE && err == EOPNOTSUPP)
				die("No hardware sampling interrupt available."
				    " No APIC? If so then you can boot the kernel"
				    " with the \"lapic\" boot parameter to"
				    " force-enable it.\n");
262 263
#endif

264
			die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
L
Li Zefan 已提交
265 266
		}
	}
267

268 269 270 271 272 273
	if (perf_evlist__set_filters(evlist)) {
		error("failed to set filter with %d (%s)\n", errno,
			strerror(errno));
		exit(-1);
	}

274
	if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0)
275 276
		die("failed to mmap with %d (%s)\n", errno, strerror(errno));

277
	if (rec->file_new)
278 279 280 281 282 283 284 285 286
		session->evlist = evlist;
	else {
		if (!perf_evlist__equal(session->evlist, evlist)) {
			fprintf(stderr, "incompatible append\n");
			exit(-1);
		}
 	}

	perf_session__update_sample_type(session);
287 288
}

289
static int process_buildids(struct perf_record *rec)
290
{
291
	u64 size = lseek(rec->output, 0, SEEK_CUR);
292

293 294 295
	if (size == 0)
		return 0;

296 297 298
	rec->session->fd = rec->output;
	return __perf_session__process_events(rec->session, rec->post_processing_offset,
					      size - rec->post_processing_offset,
299 300 301
					      size, &build_id__mark_dso_hit_ops);
}

302
static void perf_record__exit(int status __used, void *arg)
303
{
304 305 306 307 308 309 310 311 312 313 314
	struct perf_record *rec = arg;

	if (!rec->opts.pipe_output) {
		rec->session->header.data_size += rec->bytes_written;

		if (!rec->no_buildid)
			process_buildids(rec);
		perf_session__write_header(rec->session, rec->evlist,
					   rec->output, true);
		perf_session__delete(rec->session);
		perf_evlist__delete(rec->evlist);
315
		symbol__exit();
316
	}
317 318
}

319
static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
320 321
{
	int err;
322 323 324
	struct perf_event_ops *ops = data;
	struct perf_record *rec = container_of(ops, struct perf_record, ops);
	struct perf_session *psession = rec->session;
325

326
	if (machine__is_host(machine))
327 328 329 330 331 332 333 334 335 336
		return;

	/*
	 *As for guest kernel when processing subcommand record&report,
	 *we arrange module mmap prior to guest kernel mmap and trigger
	 *a preload dso because default guest module symbols are loaded
	 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
	 *method is used to avoid symbol missing when the first addr is
	 *in module instead of in guest kernel.
	 */
337
	err = perf_event__synthesize_modules(ops, process_synthesized_event,
338
					     psession, machine);
339 340
	if (err < 0)
		pr_err("Couldn't record guest kernel [%d]'s reference"
341
		       " relocation symbol.\n", machine->pid);
342 343 344 345 346

	/*
	 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
	 * have no _text sometimes.
	 */
347
	err = perf_event__synthesize_kernel_mmap(ops, process_synthesized_event,
348
						 psession, machine, "_text");
349
	if (err < 0)
350
		err = perf_event__synthesize_kernel_mmap(ops, process_synthesized_event,
351 352
							 psession, machine,
							 "_stext");
353 354
	if (err < 0)
		pr_err("Couldn't record guest kernel [%d]'s reference"
355
		       " relocation symbol.\n", machine->pid);
356 357
}

358 359 360 361 362
static struct perf_event_header finished_round_event = {
	.size = sizeof(struct perf_event_header),
	.type = PERF_RECORD_FINISHED_ROUND,
};

363
static void perf_record__mmap_read_all(struct perf_record *rec)
364
{
365
	int i;
366

367 368 369
	for (i = 0; i < rec->evlist->nr_mmaps; i++) {
		if (rec->evlist->mmap[i].base)
			perf_record__mmap_read(rec, &rec->evlist->mmap[i]);
370 371
	}

372 373
	if (perf_header__has_feat(&rec->session->header, HEADER_TRACE_INFO))
		write_output(rec, &finished_round_event, sizeof(finished_round_event));
374 375
}

376
static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
377
{
I
Ingo Molnar 已提交
378 379
	struct stat st;
	int flags;
380
	int err, output;
381
	unsigned long waking = 0;
382
	const bool forks = argc > 0;
383
	struct machine *machine;
384 385 386 387 388
	struct perf_event_ops *ops = &rec->ops;
	struct perf_record_opts *opts = &rec->opts;
	struct perf_evlist *evsel_list = rec->evlist;
	const char *output_name = rec->output_name;
	struct perf_session *session;
389

390
	rec->progname = argv[0];
391

392
	rec->page_size = sysconf(_SC_PAGE_SIZE);
393

394
	on_exit(perf_record__sig_exit, rec);
395 396
	signal(SIGCHLD, sig_handler);
	signal(SIGINT, sig_handler);
397
	signal(SIGUSR1, sig_handler);
398

399 400
	if (!output_name) {
		if (!fstat(STDOUT_FILENO, &st) && S_ISFIFO(st.st_mode))
401
			opts->pipe_output = true;
402
		else
403
			rec->output_name = output_name = "perf.data";
404 405 406
	}
	if (output_name) {
		if (!strcmp(output_name, "-"))
407
			opts->pipe_output = true;
408
		else if (!stat(output_name, &st) && st.st_size) {
409
			if (rec->write_mode == WRITE_FORCE) {
410 411 412 413 414 415
				char oldname[PATH_MAX];
				snprintf(oldname, sizeof(oldname), "%s.old",
					 output_name);
				unlink(oldname);
				rename(output_name, oldname);
			}
416 417
		} else if (rec->write_mode == WRITE_APPEND) {
			rec->write_mode = WRITE_FORCE;
418
		}
419 420
	}

421
	flags = O_CREAT|O_RDWR;
422 423
	if (rec->write_mode == WRITE_APPEND)
		rec->file_new = 0;
I
Ingo Molnar 已提交
424 425 426
	else
		flags |= O_TRUNC;

427
	if (opts->pipe_output)
428 429 430
		output = STDOUT_FILENO;
	else
		output = open(output_name, flags, S_IRUSR | S_IWUSR);
431 432 433 434 435
	if (output < 0) {
		perror("failed to create output file");
		exit(-1);
	}

436 437
	rec->output = output;

438
	session = perf_session__new(output_name, O_WRONLY,
439
				    rec->write_mode == WRITE_FORCE, false, NULL);
440
	if (session == NULL) {
441 442 443 444
		pr_err("Not enough memory for reading perf file header\n");
		return -1;
	}

445 446 447
	rec->session = session;

	if (!rec->no_buildid)
448 449
		perf_header__set_feat(&session->header, HEADER_BUILD_ID);

450
	if (!rec->file_new) {
451
		err = perf_session__read_header(session, output);
452
		if (err < 0)
453
			goto out_delete_session;
454 455
	}

456
	if (have_tracepoints(&evsel_list->entries))
457
		perf_header__set_feat(&session->header, HEADER_TRACE_INFO);
458

459 460 461 462 463 464 465 466 467 468 469 470 471
	perf_header__set_feat(&session->header, HEADER_HOSTNAME);
	perf_header__set_feat(&session->header, HEADER_OSRELEASE);
	perf_header__set_feat(&session->header, HEADER_ARCH);
	perf_header__set_feat(&session->header, HEADER_CPUDESC);
	perf_header__set_feat(&session->header, HEADER_NRCPUS);
	perf_header__set_feat(&session->header, HEADER_EVENT_DESC);
	perf_header__set_feat(&session->header, HEADER_CMDLINE);
	perf_header__set_feat(&session->header, HEADER_VERSION);
	perf_header__set_feat(&session->header, HEADER_CPU_TOPOLOGY);
	perf_header__set_feat(&session->header, HEADER_TOTAL_MEM);
	perf_header__set_feat(&session->header, HEADER_NUMA_TOPOLOGY);
	perf_header__set_feat(&session->header, HEADER_CPUID);

472
	if (forks) {
473
		err = perf_evlist__prepare_workload(evsel_list, opts, argv);
474 475 476
		if (err < 0) {
			pr_err("Couldn't run the workload!\n");
			goto out_delete_session;
477 478 479
		}
	}

480
	perf_record__open(rec);
481

482
	/*
483
	 * perf_session__delete(session) will be called at perf_record__exit()
484
	 */
485
	on_exit(perf_record__exit, rec);
486

487
	if (opts->pipe_output) {
488 489 490
		err = perf_header__write_pipe(output);
		if (err < 0)
			return err;
491
	} else if (rec->file_new) {
492 493
		err = perf_session__write_header(session, evsel_list,
						 output, false);
494 495
		if (err < 0)
			return err;
496 497
	}

498
	rec->post_processing_offset = lseek(output, 0, SEEK_CUR);
499

500 501 502
	if (opts->pipe_output) {
		err = perf_event__synthesize_attrs(ops, session,
						   process_synthesized_event);
503 504 505 506
		if (err < 0) {
			pr_err("Couldn't synthesize attrs.\n");
			return err;
		}
507

508
		err = perf_event__synthesize_event_types(ops, process_synthesized_event,
509
							 session);
510 511 512 513
		if (err < 0) {
			pr_err("Couldn't synthesize event_types.\n");
			return err;
		}
514

515
		if (have_tracepoints(&evsel_list->entries)) {
516 517 518 519 520 521 522 523
			/*
			 * FIXME err <= 0 here actually means that
			 * there were no tracepoints so its not really
			 * an error, just that we don't need to
			 * synthesize anything.  We really have to
			 * return this more properly and also
			 * propagate errors that now are calling die()
			 */
524
			err = perf_event__synthesize_tracing_data(ops, output, evsel_list,
525 526
								  process_synthesized_event,
								  session);
527 528 529 530
			if (err <= 0) {
				pr_err("Couldn't record tracing data.\n");
				return err;
			}
531
			advance_output(rec, err);
532
		}
533 534
	}

535 536
	machine = perf_session__find_host_machine(session);
	if (!machine) {
537 538 539 540
		pr_err("Couldn't find native kernel information.\n");
		return -1;
	}

541
	err = perf_event__synthesize_kernel_mmap(ops, process_synthesized_event,
542
						 session, machine, "_text");
543
	if (err < 0)
544
		err = perf_event__synthesize_kernel_mmap(ops, process_synthesized_event,
545
							 session, machine, "_stext");
546 547 548 549
	if (err < 0)
		pr_err("Couldn't record kernel reference relocation symbol\n"
		       "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
		       "Check /proc/kallsyms permission or run as root.\n");
550

551
	err = perf_event__synthesize_modules(ops, process_synthesized_event,
552
					     session, machine);
553 554 555 556 557
	if (err < 0)
		pr_err("Couldn't record kernel module information.\n"
		       "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
		       "Check /proc/modules permission or run as root.\n");

558
	if (perf_guest)
559
		perf_session__process_machines(session, ops,
560
					       perf_event__synthesize_guest_os);
561

562 563
	if (!opts->system_wide)
		perf_event__synthesize_thread_map(ops, evsel_list->threads,
564 565
						  process_synthesized_event,
						  session);
566
	else
567
		perf_event__synthesize_threads(ops, process_synthesized_event,
568
					       session);
569

570
	if (rec->realtime_prio) {
571 572
		struct sched_param param;

573
		param.sched_priority = rec->realtime_prio;
574
		if (sched_setscheduler(0, SCHED_FIFO, &param)) {
575
			pr_err("Could not set realtime priority.\n");
576 577 578 579
			exit(-1);
		}
	}

580 581
	perf_evlist__enable(evsel_list);

582 583 584
	/*
	 * Let the child rip
	 */
585
	if (forks)
586
		perf_evlist__start_workload(evsel_list);
587

588
	for (;;) {
589
		int hits = rec->samples;
590

591
		perf_record__mmap_read_all(rec);
592

593
		if (hits == rec->samples) {
594 595
			if (done)
				break;
596
			err = poll(evsel_list->pollfd, evsel_list->nr_fds, -1);
597 598 599
			waking++;
		}

600 601
		if (done)
			perf_evlist__disable(evsel_list);
602 603
	}

604
	if (quiet || signr == SIGUSR1)
605 606
		return 0;

607 608
	fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);

609 610 611 612
	/*
	 * Approximate RIP event size: 24 bytes.
	 */
	fprintf(stderr,
613
		"[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n",
614
		(double)rec->bytes_written / 1024.0 / 1024.0,
615
		output_name,
616
		rec->bytes_written / 24);
617

618
	return 0;
619 620 621 622

out_delete_session:
	perf_session__delete(session);
	return err;
623
}
624 625

static const char * const record_usage[] = {
626 627
	"perf record [<options>] [<command>]",
	"perf record [<options>] -- <command> [<options>]",
628 629 630
	NULL
};

631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653
/*
 * XXX Ideally would be local to cmd_record() and passed to a perf_record__new
 * because we need to have access to it in perf_record__exit, that is called
 * after cmd_record() exits, but since record_options need to be accessible to
 * builtin-script, leave it here.
 *
 * At least we don't ouch it in all the other functions here directly.
 *
 * Just say no to tons of global variables, sigh.
 */
static struct perf_record record = {
	.opts = {
		.target_pid	     = -1,
		.target_tid	     = -1,
		.mmap_pages	     = UINT_MAX,
		.user_freq	     = UINT_MAX,
		.user_interval	     = ULLONG_MAX,
		.freq		     = 1000,
		.sample_id_all_avail = true,
	},
	.write_mode = WRITE_FORCE,
	.file_new   = true,
};
654

655 656 657 658 659 660 661
/*
 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
 * with it and switch to use the library functions in perf_evlist that came
 * from builtin-record.c, i.e. use perf_record_opts,
 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
 * using pipes, etc.
 */
662
const struct option record_options[] = {
663
	OPT_CALLBACK('e', "event", &record.evlist, "event",
664
		     "event selector. use 'perf list' to list available events",
665
		     parse_events_option),
666
	OPT_CALLBACK(0, "filter", &record.evlist, "filter",
L
Li Zefan 已提交
667
		     "event filter", parse_filter),
668
	OPT_INTEGER('p', "pid", &record.opts.target_pid,
669
		    "record events on existing process id"),
670
	OPT_INTEGER('t', "tid", &record.opts.target_tid,
671
		    "record events on existing thread id"),
672
	OPT_INTEGER('r', "realtime", &record.realtime_prio,
673
		    "collect data with this RT SCHED_FIFO priority"),
674
	OPT_BOOLEAN('D', "no-delay", &record.opts.no_delay,
675
		    "collect data without buffering"),
676
	OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
677
		    "collect raw sample records from all opened counters"),
678
	OPT_BOOLEAN('a', "all-cpus", &record.opts.system_wide,
679
			    "system-wide collection from all CPUs"),
680
	OPT_BOOLEAN('A', "append", &record.append_file,
I
Ingo Molnar 已提交
681
			    "append to the output file to do incremental profiling"),
682
	OPT_STRING('C', "cpu", &record.opts.cpu_list, "cpu",
683
		    "list of cpus to monitor"),
684
	OPT_BOOLEAN('f', "force", &record.force,
685
			"overwrite existing data file (deprecated)"),
686 687
	OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
	OPT_STRING('o', "output", &record.output_name, "file",
I
Ingo Molnar 已提交
688
		    "output file name"),
689
	OPT_BOOLEAN('i', "no-inherit", &record.opts.no_inherit,
690
		    "child tasks do not inherit counters"),
691 692
	OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
	OPT_UINTEGER('m', "mmap-pages", &record.opts.mmap_pages,
693
		     "number of mmap data pages"),
694
	OPT_BOOLEAN(0, "group", &record.opts.group,
695
		    "put the counters into a counter group"),
696
	OPT_BOOLEAN('g', "call-graph", &record.opts.call_graph,
697
		    "do call-graph (stack chain/backtrace) recording"),
698
	OPT_INCR('v', "verbose", &verbose,
699
		    "be more verbose (show counter open errors, etc)"),
700
	OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
701
	OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
702
		    "per thread counts"),
703
	OPT_BOOLEAN('d', "data", &record.opts.sample_address,
704
		    "Sample addresses"),
705 706
	OPT_BOOLEAN('T', "timestamp", &record.opts.sample_time, "Sample timestamps"),
	OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
707
		    "don't sample"),
708
	OPT_BOOLEAN('N', "no-buildid-cache", &record.no_buildid_cache,
709
		    "do not update the buildid cache"),
710
	OPT_BOOLEAN('B', "no-buildid", &record.no_buildid,
711
		    "do not collect buildids in perf.data"),
712
	OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
S
Stephane Eranian 已提交
713 714
		     "monitor event in cgroup name only",
		     parse_cgroups),
715 716 717
	OPT_END()
};

718
int cmd_record(int argc, const char **argv, const char *prefix __used)
719
{
720 721
	int err = -ENOMEM;
	struct perf_evsel *pos;
722 723
	struct perf_evlist *evsel_list;
	struct perf_record *rec = &record;
724

725 726
	perf_header__set_cmdline(argc, argv);

727
	evsel_list = perf_evlist__new(NULL, NULL);
728 729 730
	if (evsel_list == NULL)
		return -ENOMEM;

731 732
	rec->evlist = evsel_list;

733
	argc = parse_options(argc, argv, record_options, record_usage,
734
			    PARSE_OPT_STOP_AT_NON_OPTION);
735 736
	if (!argc && rec->opts.target_pid == -1 && rec->opts.target_tid == -1 &&
		!rec->opts.system_wide && !rec->opts.cpu_list)
737
		usage_with_options(record_usage, record_options);
738

739
	if (rec->force && rec->append_file) {
740 741
		fprintf(stderr, "Can't overwrite and append at the same time."
				" You need to choose between -f and -A");
742
		usage_with_options(record_usage, record_options);
743 744
	} else if (rec->append_file) {
		rec->write_mode = WRITE_APPEND;
745
	} else {
746
		rec->write_mode = WRITE_FORCE;
747 748
	}

749
	if (nr_cgroups && !rec->opts.system_wide) {
S
Stephane Eranian 已提交
750 751 752 753 754
		fprintf(stderr, "cgroup monitoring only available in"
			" system-wide mode\n");
		usage_with_options(record_usage, record_options);
	}

755
	symbol__init();
756

757
	if (symbol_conf.kptr_restrict)
758 759 760 761 762 763 764 765
		pr_warning(
"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
"check /proc/sys/kernel/kptr_restrict.\n\n"
"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
"file is not found in the buildid cache or in the vmlinux path.\n\n"
"Samples in kernel modules won't be resolved at all.\n\n"
"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
"even with a suitable vmlinux or kallsyms file.\n\n");
766

767
	if (rec->no_buildid_cache || rec->no_buildid)
768
		disable_buildid_cache();
769

770 771
	if (evsel_list->nr_entries == 0 &&
	    perf_evlist__add_default(evsel_list) < 0) {
772 773
		pr_err("Not enough memory for event selector list\n");
		goto out_symbol_exit;
774
	}
775

776 777
	if (rec->opts.target_pid != -1)
		rec->opts.target_tid = rec->opts.target_pid;
778

779 780
	if (perf_evlist__create_maps(evsel_list, rec->opts.target_pid,
				     rec->opts.target_tid, rec->opts.cpu_list) < 0)
781
		usage_with_options(record_usage, record_options);
782

783
	list_for_each_entry(pos, &evsel_list->entries, node) {
784 785
		if (perf_evsel__alloc_fd(pos, evsel_list->cpus->nr,
					 evsel_list->threads->nr) < 0)
786
			goto out_free_fd;
787 788
		if (perf_header__push_event(pos->attr.config, event_name(pos)))
			goto out_free_fd;
789
	}
790

791
	if (perf_evlist__alloc_pollfd(evsel_list) < 0)
792
		goto out_free_fd;
793

794 795 796 797
	if (rec->opts.user_interval != ULLONG_MAX)
		rec->opts.default_interval = rec->opts.user_interval;
	if (rec->opts.user_freq != UINT_MAX)
		rec->opts.freq = rec->opts.user_freq;
798

799 800 801
	/*
	 * User specified count overrides default frequency.
	 */
802 803 804 805
	if (rec->opts.default_interval)
		rec->opts.freq = 0;
	else if (rec->opts.freq) {
		rec->opts.default_interval = rec->opts.freq;
806 807
	} else {
		fprintf(stderr, "frequency and count are zero, aborting\n");
808
		err = -EINVAL;
809
		goto out_free_fd;
810 811
	}

812
	err = __cmd_record(&record, argc, argv);
813
out_free_fd:
814
	perf_evlist__delete_maps(evsel_list);
815 816
out_symbol_exit:
	symbol__exit();
817
	return err;
818
}