builtin-record.c 22.8 KB
Newer Older
I
Ingo Molnar 已提交
1
/*
2 3 4 5 6
 * builtin-record.c
 *
 * Builtin record command: Record the profile of a workload
 * (or a CPU, or a PID) into the perf.data output file - for
 * later analysis via perf report.
I
Ingo Molnar 已提交
7
 */
8
#include "builtin.h"
9 10 11

#include "perf.h"

12
#include "util/build-id.h"
13
#include "util/util.h"
14
#include "util/parse-options.h"
15
#include "util/parse-events.h"
16

17
#include "util/callchain.h"
18
#include "util/header.h"
19
#include "util/event.h"
20
#include "util/evlist.h"
21
#include "util/evsel.h"
22
#include "util/debug.h"
23
#include "util/session.h"
24
#include "util/tool.h"
25
#include "util/symbol.h"
26
#include "util/cpumap.h"
27
#include "util/thread_map.h"
28
#include "util/data.h"
29

30
#include <unistd.h>
31
#include <sched.h>
32
#include <sys/mman.h>
33

34

35
struct record {
36
	struct perf_tool	tool;
37
	struct record_opts	opts;
38
	u64			bytes_written;
39
	struct perf_data_file	file;
40 41 42 43 44 45 46
	struct perf_evlist	*evlist;
	struct perf_session	*session;
	const char		*progname;
	int			realtime_prio;
	bool			no_buildid;
	bool			no_buildid_cache;
	long			samples;
47
};
48

49
static int record__write(struct record *rec, void *bf, size_t size)
50
{
51
	if (perf_data_file__write(rec->session->file, bf, size) < 0) {
52 53
		pr_err("failed to write perf data, error: %m\n");
		return -1;
54
	}
55

56
	rec->bytes_written += size;
57
	return 0;
58 59
}

60
static int process_synthesized_event(struct perf_tool *tool,
61
				     union perf_event *event,
62 63
				     struct perf_sample *sample __maybe_unused,
				     struct machine *machine __maybe_unused)
64
{
65 66
	struct record *rec = container_of(tool, struct record, tool);
	return record__write(rec, event, event->header.size);
67 68
}

69
static int record__mmap_read(struct record *rec, int idx)
70
{
71
	struct perf_mmap *md = &rec->evlist->mmap[idx];
72
	unsigned int head = perf_mmap__read_head(md);
73
	unsigned int old = md->prev;
J
Jiri Olsa 已提交
74
	unsigned char *data = md->base + page_size;
75 76
	unsigned long size;
	void *buf;
77
	int rc = 0;
78

79
	if (old == head)
80
		return 0;
81

82
	rec->samples++;
83 84 85 86 87 88 89

	size = head - old;

	if ((old & md->mask) + size != (head & md->mask)) {
		buf = &data[old & md->mask];
		size = md->mask + 1 - (old & md->mask);
		old += size;
90

91
		if (record__write(rec, buf, size) < 0) {
92 93 94
			rc = -1;
			goto out;
		}
95 96 97 98 99
	}

	buf = &data[old & md->mask];
	size = head - old;
	old += size;
100

101
	if (record__write(rec, buf, size) < 0) {
102 103 104
		rc = -1;
		goto out;
	}
105 106

	md->prev = old;
107
	perf_evlist__mmap_consume(rec->evlist, idx);
108 109
out:
	return rc;
110 111 112
}

static volatile int done = 0;
113
static volatile int signr = -1;
114
static volatile int child_finished = 0;
115

116
static void sig_handler(int sig)
117
{
118 119
	if (sig == SIGCHLD)
		child_finished = 1;
120 121
	else
		signr = sig;
122

123
	done = 1;
124 125
}

126
static void record__sig_exit(void)
127
{
128
	if (signr == -1)
129 130 131
		return;

	signal(signr, SIG_DFL);
132
	raise(signr);
133 134
}

135
static int record__open(struct record *rec)
136
{
137
	char msg[512];
138
	struct perf_evsel *pos;
139 140
	struct perf_evlist *evlist = rec->evlist;
	struct perf_session *session = rec->session;
141
	struct record_opts *opts = &rec->opts;
142
	int rc = 0;
143

144
	perf_evlist__config(evlist, opts);
145

146
	evlist__for_each(evlist, pos) {
147
try_again:
148
		if (perf_evsel__open(pos, evlist->cpus, evlist->threads) < 0) {
149
			if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
150
				if (verbose)
151
					ui__warning("%s\n", msg);
152 153
				goto try_again;
			}
154

155 156 157 158
			rc = -errno;
			perf_evsel__open_strerror(pos, &opts->target,
						  errno, msg, sizeof(msg));
			ui__error("%s\n", msg);
159
			goto out;
L
Li Zefan 已提交
160 161
		}
	}
162

163
	if (perf_evlist__apply_filters(evlist)) {
164
		error("failed to set filter with %d (%s)\n", errno,
165
			strerror_r(errno, msg, sizeof(msg)));
166 167
		rc = -1;
		goto out;
168 169
	}

170
	if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) {
171 172 173 174 175
		if (errno == EPERM) {
			pr_err("Permission error mapping pages.\n"
			       "Consider increasing "
			       "/proc/sys/kernel/perf_event_mlock_kb,\n"
			       "or try again with a smaller value of -m/--mmap_pages.\n"
176
			       "(current value: %u)\n", opts->mmap_pages);
177 178
			rc = -errno;
		} else {
179 180
			pr_err("failed to mmap with %d (%s)\n", errno,
				strerror_r(errno, msg, sizeof(msg)));
181 182 183
			rc = -errno;
		}
		goto out;
184
	}
185

186
	session->evlist = evlist;
187
	perf_session__set_id_hdr_size(session);
188 189
out:
	return rc;
190 191
}

192
static int process_buildids(struct record *rec)
193
{
194 195
	struct perf_data_file *file  = &rec->file;
	struct perf_session *session = rec->session;
196
	u64 start = session->header.data_offset;
197

198
	u64 size = lseek(file->fd, 0, SEEK_CUR);
199 200 201
	if (size == 0)
		return 0;

202 203
	return __perf_session__process_events(session, start,
					      size - start,
204 205 206
					      size, &build_id__mark_dso_hit_ops);
}

207
static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
208 209
{
	int err;
210
	struct perf_tool *tool = data;
211 212 213 214 215 216 217 218
	/*
	 *As for guest kernel when processing subcommand record&report,
	 *we arrange module mmap prior to guest kernel mmap and trigger
	 *a preload dso because default guest module symbols are loaded
	 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
	 *method is used to avoid symbol missing when the first addr is
	 *in module instead of in guest kernel.
	 */
219
	err = perf_event__synthesize_modules(tool, process_synthesized_event,
220
					     machine);
221 222
	if (err < 0)
		pr_err("Couldn't record guest kernel [%d]'s reference"
223
		       " relocation symbol.\n", machine->pid);
224 225 226 227 228

	/*
	 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
	 * have no _text sometimes.
	 */
229
	err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
230
						 machine);
231 232
	if (err < 0)
		pr_err("Couldn't record guest kernel [%d]'s reference"
233
		       " relocation symbol.\n", machine->pid);
234 235
}

236 237 238 239 240
static struct perf_event_header finished_round_event = {
	.size = sizeof(struct perf_event_header),
	.type = PERF_RECORD_FINISHED_ROUND,
};

241
static int record__mmap_read_all(struct record *rec)
242
{
243
	u64 bytes_written = rec->bytes_written;
244
	int i;
245
	int rc = 0;
246

247
	for (i = 0; i < rec->evlist->nr_mmaps; i++) {
248
		if (rec->evlist->mmap[i].base) {
249
			if (record__mmap_read(rec, i) != 0) {
250 251 252 253
				rc = -1;
				goto out;
			}
		}
254 255
	}

256 257 258 259 260 261
	/*
	 * Mark the round finished in case we wrote
	 * at least one event.
	 */
	if (bytes_written != rec->bytes_written)
		rc = record__write(rec, &finished_round_event, sizeof(finished_round_event));
262 263 264

out:
	return rc;
265 266
}

267
static void record__init_features(struct record *rec)
268 269 270 271 272 273 274 275 276 277
{
	struct perf_session *session = rec->session;
	int feat;

	for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
		perf_header__set_feat(&session->header, feat);

	if (rec->no_buildid)
		perf_header__clear_feat(&session->header, HEADER_BUILD_ID);

278
	if (!have_tracepoints(&rec->evlist->entries))
279 280 281 282 283 284
		perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);

	if (!rec->opts.branch_stack)
		perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
}

285 286 287 288 289 290 291
static volatile int workload_exec_errno;

/*
 * perf_evlist__prepare_workload will send a SIGUSR1
 * if the fork fails, since we asked by setting its
 * want_signal to true.
 */
292 293
static void workload_exec_failed_signal(int signo __maybe_unused,
					siginfo_t *info,
294 295 296 297 298 299 300
					void *ucontext __maybe_unused)
{
	workload_exec_errno = info->si_value.sival_int;
	done = 1;
	child_finished = 1;
}

301
static int __cmd_record(struct record *rec, int argc, const char **argv)
302
{
303
	int err;
304
	int status = 0;
305
	unsigned long waking = 0;
306
	const bool forks = argc > 0;
307
	struct machine *machine;
308
	struct perf_tool *tool = &rec->tool;
309
	struct record_opts *opts = &rec->opts;
310
	struct perf_data_file *file = &rec->file;
311
	struct perf_session *session;
312
	bool disabled = false, draining = false;
313

314
	rec->progname = argv[0];
315

316
	atexit(record__sig_exit);
317 318
	signal(SIGCHLD, sig_handler);
	signal(SIGINT, sig_handler);
319
	signal(SIGTERM, sig_handler);
320

321
	session = perf_session__new(file, false, NULL);
322
	if (session == NULL) {
A
Adrien BAK 已提交
323
		pr_err("Perf session creation failed.\n");
324 325 326
		return -1;
	}

327 328
	rec->session = session;

329
	record__init_features(rec);
330

331
	if (forks) {
332
		err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
333
						    argv, file->is_pipe,
334
						    workload_exec_failed_signal);
335 336
		if (err < 0) {
			pr_err("Couldn't run the workload!\n");
337
			status = err;
338
			goto out_delete_session;
339 340 341
		}
	}

342
	if (record__open(rec) != 0) {
343
		err = -1;
344
		goto out_child;
345
	}
346

347
	if (!rec->evlist->nr_groups)
348 349
		perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);

350 351
	if (file->is_pipe) {
		err = perf_header__write_pipe(file->fd);
352
		if (err < 0)
353
			goto out_child;
354
	} else {
355
		err = perf_session__write_header(session, rec->evlist,
356
						 file->fd, false);
357
		if (err < 0)
358
			goto out_child;
359 360
	}

361
	if (!rec->no_buildid
362
	    && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
363
		pr_err("Couldn't generate buildids. "
364
		       "Use --no-buildid to profile anyway.\n");
365
		err = -1;
366
		goto out_child;
367 368
	}

369
	machine = &session->machines.host;
370

371
	if (file->is_pipe) {
372
		err = perf_event__synthesize_attrs(tool, session,
373
						   process_synthesized_event);
374 375
		if (err < 0) {
			pr_err("Couldn't synthesize attrs.\n");
376
			goto out_child;
377
		}
378

379
		if (have_tracepoints(&rec->evlist->entries)) {
380 381 382 383 384 385 386 387
			/*
			 * FIXME err <= 0 here actually means that
			 * there were no tracepoints so its not really
			 * an error, just that we don't need to
			 * synthesize anything.  We really have to
			 * return this more properly and also
			 * propagate errors that now are calling die()
			 */
388
			err = perf_event__synthesize_tracing_data(tool, file->fd, rec->evlist,
389
								  process_synthesized_event);
390 391
			if (err <= 0) {
				pr_err("Couldn't record tracing data.\n");
392
				goto out_child;
393
			}
394
			rec->bytes_written += err;
395
		}
396 397
	}

398
	err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
399
						 machine);
400 401 402 403
	if (err < 0)
		pr_err("Couldn't record kernel reference relocation symbol\n"
		       "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
		       "Check /proc/kallsyms permission or run as root.\n");
404

405
	err = perf_event__synthesize_modules(tool, process_synthesized_event,
406
					     machine);
407 408 409 410 411
	if (err < 0)
		pr_err("Couldn't record kernel module information.\n"
		       "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
		       "Check /proc/modules permission or run as root.\n");

412
	if (perf_guest) {
413 414
		machines__process_guests(&session->machines,
					 perf_event__synthesize_guest_os, tool);
415
	}
416

417
	err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
418
					    process_synthesized_event, opts->sample_address);
419
	if (err != 0)
420
		goto out_child;
421

422
	if (rec->realtime_prio) {
423 424
		struct sched_param param;

425
		param.sched_priority = rec->realtime_prio;
426
		if (sched_setscheduler(0, SCHED_FIFO, &param)) {
427
			pr_err("Could not set realtime priority.\n");
428
			err = -1;
429
			goto out_child;
430 431 432
		}
	}

433 434 435 436 437
	/*
	 * When perf is starting the traced process, all the events
	 * (apart from group members) have enable_on_exec=1 set,
	 * so don't spoil it by prematurely enabling them.
	 */
438
	if (!target__none(&opts->target) && !opts->initial_delay)
439
		perf_evlist__enable(rec->evlist);
440

441 442 443
	/*
	 * Let the child rip
	 */
444
	if (forks)
445
		perf_evlist__start_workload(rec->evlist);
446

447 448 449 450 451
	if (opts->initial_delay) {
		usleep(opts->initial_delay * 1000);
		perf_evlist__enable(rec->evlist);
	}

452
	for (;;) {
453
		int hits = rec->samples;
454

455
		if (record__mmap_read_all(rec) < 0) {
456
			err = -1;
457
			goto out_child;
458
		}
459

460
		if (hits == rec->samples) {
461
			if (done || draining)
462
				break;
463
			err = perf_evlist__poll(rec->evlist, -1);
464 465 466 467 468
			/*
			 * Propagate error, only if there's any. Ignore positive
			 * number of returned events and interrupt error.
			 */
			if (err > 0 || (err < 0 && errno == EINTR))
469
				err = 0;
470
			waking++;
471 472 473

			if (perf_evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
				draining = true;
474 475
		}

476 477 478 479 480
		/*
		 * When perf is starting the traced process, at the end events
		 * die with the process and we wait for that. Thus no need to
		 * disable events in this case.
		 */
481
		if (done && !disabled && !target__none(&opts->target)) {
482
			perf_evlist__disable(rec->evlist);
483 484
			disabled = true;
		}
485 486
	}

487
	if (forks && workload_exec_errno) {
488
		char msg[STRERR_BUFSIZE];
489 490 491
		const char *emsg = strerror_r(workload_exec_errno, msg, sizeof(msg));
		pr_err("Workload failed: %s\n", emsg);
		err = -1;
492
		goto out_child;
493 494
	}

495 496
	if (!quiet) {
		fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
497

498 499 500 501 502 503 504 505 506
		/*
		 * Approximate RIP event size: 24 bytes.
		 */
		fprintf(stderr,
			"[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n",
			(double)rec->bytes_written / 1024.0 / 1024.0,
			file->path,
			rec->bytes_written / 24);
	}
507

508 509 510
out_child:
	if (forks) {
		int exit_status;
511

512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533
		if (!child_finished)
			kill(rec->evlist->workload.pid, SIGTERM);

		wait(&exit_status);

		if (err < 0)
			status = err;
		else if (WIFEXITED(exit_status))
			status = WEXITSTATUS(exit_status);
		else if (WIFSIGNALED(exit_status))
			signr = WTERMSIG(exit_status);
	} else
		status = err;

	if (!err && !file->is_pipe) {
		rec->session->header.data_size += rec->bytes_written;

		if (!rec->no_buildid)
			process_buildids(rec);
		perf_session__write_header(rec->session, rec->evlist,
					   file->fd, true);
	}
534 535 536

out_delete_session:
	perf_session__delete(session);
537
	return status;
538
}
539

540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557
#define BRANCH_OPT(n, m) \
	{ .name = n, .mode = (m) }

#define BRANCH_END { .name = NULL }

struct branch_mode {
	const char *name;
	int mode;
};

static const struct branch_mode branch_modes[] = {
	BRANCH_OPT("u", PERF_SAMPLE_BRANCH_USER),
	BRANCH_OPT("k", PERF_SAMPLE_BRANCH_KERNEL),
	BRANCH_OPT("hv", PERF_SAMPLE_BRANCH_HV),
	BRANCH_OPT("any", PERF_SAMPLE_BRANCH_ANY),
	BRANCH_OPT("any_call", PERF_SAMPLE_BRANCH_ANY_CALL),
	BRANCH_OPT("any_ret", PERF_SAMPLE_BRANCH_ANY_RETURN),
	BRANCH_OPT("ind_call", PERF_SAMPLE_BRANCH_IND_CALL),
558 559 560
	BRANCH_OPT("abort_tx", PERF_SAMPLE_BRANCH_ABORT_TX),
	BRANCH_OPT("in_tx", PERF_SAMPLE_BRANCH_IN_TX),
	BRANCH_OPT("no_tx", PERF_SAMPLE_BRANCH_NO_TX),
561
	BRANCH_OPT("cond", PERF_SAMPLE_BRANCH_COND),
562 563 564 565
	BRANCH_END
};

static int
566
parse_branch_stack(const struct option *opt, const char *str, int unset)
567 568 569 570 571 572 573 574
{
#define ONLY_PLM \
	(PERF_SAMPLE_BRANCH_USER	|\
	 PERF_SAMPLE_BRANCH_KERNEL	|\
	 PERF_SAMPLE_BRANCH_HV)

	uint64_t *mode = (uint64_t *)opt->value;
	const struct branch_mode *br;
575
	char *s, *os = NULL, *p;
576 577
	int ret = -1;

578 579
	if (unset)
		return 0;
580

581 582 583 584
	/*
	 * cannot set it twice, -b + --branch-filter for instance
	 */
	if (*mode)
585 586
		return -1;

587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607
	/* str may be NULL in case no arg is passed to -b */
	if (str) {
		/* because str is read-only */
		s = os = strdup(str);
		if (!s)
			return -1;

		for (;;) {
			p = strchr(s, ',');
			if (p)
				*p = '\0';

			for (br = branch_modes; br->name; br++) {
				if (!strcasecmp(s, br->name))
					break;
			}
			if (!br->name) {
				ui__warning("unknown branch filter %s,"
					    " check man page\n", s);
				goto error;
			}
608

609
			*mode |= br->mode;
610

611 612
			if (!p)
				break;
613

614 615
			s = p + 1;
		}
616 617 618
	}
	ret = 0;

619
	/* default to any branch */
620
	if ((*mode & ~ONLY_PLM) == 0) {
621
		*mode = PERF_SAMPLE_BRANCH_ANY;
622 623 624 625 626 627
	}
error:
	free(os);
	return ret;
}

628
static void callchain_debug(void)
J
Jiri Olsa 已提交
629
{
630 631
	static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF" };

632
	pr_debug("callchain: type %s\n", str[callchain_param.record_mode]);
633

634
	if (callchain_param.record_mode == CALLCHAIN_DWARF)
J
Jiri Olsa 已提交
635
		pr_debug("callchain: stack dump size %d\n",
636
			 callchain_param.dump_size);
J
Jiri Olsa 已提交
637 638
}

639
int record_parse_callchain_opt(const struct option *opt __maybe_unused,
J
Jiri Olsa 已提交
640 641 642 643 644
			       const char *arg,
			       int unset)
{
	int ret;

645
	callchain_param.enabled = !unset;
646

J
Jiri Olsa 已提交
647 648
	/* --no-call-graph */
	if (unset) {
649
		callchain_param.record_mode = CALLCHAIN_NONE;
J
Jiri Olsa 已提交
650 651 652 653
		pr_debug("callchain: disabled\n");
		return 0;
	}

654
	ret = parse_callchain_record_opt(arg);
655
	if (!ret)
656
		callchain_debug();
657 658 659 660

	return ret;
}

661
int record_callchain_opt(const struct option *opt __maybe_unused,
J
Jiri Olsa 已提交
662 663 664
			 const char *arg __maybe_unused,
			 int unset __maybe_unused)
{
665
	callchain_param.enabled = true;
J
Jiri Olsa 已提交
666

667 668
	if (callchain_param.record_mode == CALLCHAIN_NONE)
		callchain_param.record_mode = CALLCHAIN_FP;
669

670
	callchain_debug();
J
Jiri Olsa 已提交
671 672 673
	return 0;
}

674 675 676
static int perf_record_config(const char *var, const char *value, void *cb)
{
	if (!strcmp(var, "record.call-graph"))
677
		var = "call-graph.record-mode"; /* fall-through */
678 679 680 681

	return perf_default_config(var, value, cb);
}

682
static const char * const record_usage[] = {
683 684
	"perf record [<options>] [<command>]",
	"perf record [<options>] -- <command> [<options>]",
685 686 687
	NULL
};

688
/*
689 690
 * XXX Ideally would be local to cmd_record() and passed to a record__new
 * because we need to have access to it in record__exit, that is called
691 692 693 694 695 696 697
 * after cmd_record() exits, but since record_options need to be accessible to
 * builtin-script, leave it here.
 *
 * At least we don't ouch it in all the other functions here directly.
 *
 * Just say no to tons of global variables, sigh.
 */
698
static struct record record = {
699
	.opts = {
700
		.sample_time	     = true,
701 702 703
		.mmap_pages	     = UINT_MAX,
		.user_freq	     = UINT_MAX,
		.user_interval	     = ULLONG_MAX,
704
		.freq		     = 4000,
N
Namhyung Kim 已提交
705 706
		.target		     = {
			.uses_mmap   = true,
707
			.default_per_cpu = true,
N
Namhyung Kim 已提交
708
		},
709 710
	},
};
711

J
Jiri Olsa 已提交
712
#define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace) recording: "
713

714
#ifdef HAVE_DWARF_UNWIND_SUPPORT
J
Jiri Olsa 已提交
715
const char record_callchain_help[] = CALLCHAIN_HELP "fp dwarf";
716
#else
J
Jiri Olsa 已提交
717
const char record_callchain_help[] = CALLCHAIN_HELP "fp";
718 719
#endif

720 721 722
/*
 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
 * with it and switch to use the library functions in perf_evlist that came
723
 * from builtin-record.c, i.e. use record_opts,
724 725 726
 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
 * using pipes, etc.
 */
727
const struct option record_options[] = {
728
	OPT_CALLBACK('e', "event", &record.evlist, "event",
729
		     "event selector. use 'perf list' to list available events",
730
		     parse_events_option),
731
	OPT_CALLBACK(0, "filter", &record.evlist, "filter",
L
Li Zefan 已提交
732
		     "event filter", parse_filter),
733
	OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
734
		    "record events on existing process id"),
735
	OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
736
		    "record events on existing thread id"),
737
	OPT_INTEGER('r', "realtime", &record.realtime_prio,
738
		    "collect data with this RT SCHED_FIFO priority"),
739
	OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
740
		    "collect data without buffering"),
741
	OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
742
		    "collect raw sample records from all opened counters"),
743
	OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
744
			    "system-wide collection from all CPUs"),
745
	OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
746
		    "list of cpus to monitor"),
747
	OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
748
	OPT_STRING('o', "output", &record.file.path, "file",
I
Ingo Molnar 已提交
749
		    "output file name"),
750 751 752
	OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
			&record.opts.no_inherit_set,
			"child tasks do not inherit counters"),
753
	OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
754 755 756
	OPT_CALLBACK('m', "mmap-pages", &record.opts.mmap_pages, "pages",
		     "number of mmap data pages",
		     perf_evlist__parse_mmap_pages),
757
	OPT_BOOLEAN(0, "group", &record.opts.group,
758
		    "put the counters into a counter group"),
J
Jiri Olsa 已提交
759 760 761 762 763 764
	OPT_CALLBACK_NOOPT('g', NULL, &record.opts,
			   NULL, "enables call-graph recording" ,
			   &record_callchain_opt),
	OPT_CALLBACK(0, "call-graph", &record.opts,
		     "mode[,dump_size]", record_callchain_help,
		     &record_parse_callchain_opt),
765
	OPT_INCR('v', "verbose", &verbose,
766
		    "be more verbose (show counter open errors, etc)"),
767
	OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
768
	OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
769
		    "per thread counts"),
770
	OPT_BOOLEAN('d', "data", &record.opts.sample_address,
771
		    "Sample addresses"),
772
	OPT_BOOLEAN('T', "timestamp", &record.opts.sample_time, "Sample timestamps"),
773
	OPT_BOOLEAN('P', "period", &record.opts.period, "Sample period"),
774
	OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
775
		    "don't sample"),
776
	OPT_BOOLEAN('N', "no-buildid-cache", &record.no_buildid_cache,
777
		    "do not update the buildid cache"),
778
	OPT_BOOLEAN('B', "no-buildid", &record.no_buildid,
779
		    "do not collect buildids in perf.data"),
780
	OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
S
Stephane Eranian 已提交
781 782
		     "monitor event in cgroup name only",
		     parse_cgroups),
783
	OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
784
		  "ms to wait before starting measurement after program start"),
785 786
	OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
		   "user to profile"),
787 788 789 790 791 792 793

	OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
		     "branch any", "sample any taken branches",
		     parse_branch_stack),

	OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
		     "branch filter mask", "branch stack filter modes",
794
		     parse_branch_stack),
795 796
	OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
		    "sample by weight (on special events only)"),
797 798
	OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
		    "sample transaction flags (special events only)"),
799 800
	OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
		    "use per-thread mmaps"),
801 802 803
	OPT_END()
};

804
int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
805
{
806
	int err = -ENOMEM;
807
	struct record *rec = &record;
808
	char errbuf[BUFSIZ];
809

810 811
	rec->evlist = perf_evlist__new();
	if (rec->evlist == NULL)
812 813
		return -ENOMEM;

814 815
	perf_config(perf_record_config, rec);

816
	argc = parse_options(argc, argv, record_options, record_usage,
817
			    PARSE_OPT_STOP_AT_NON_OPTION);
818
	if (!argc && target__none(&rec->opts.target))
819
		usage_with_options(record_usage, record_options);
820

821
	if (nr_cgroups && !rec->opts.target.system_wide) {
822 823
		ui__error("cgroup monitoring only available in"
			  " system-wide mode\n");
S
Stephane Eranian 已提交
824 825 826
		usage_with_options(record_usage, record_options);
	}

827
	symbol__init(NULL);
828

829
	if (symbol_conf.kptr_restrict)
830 831 832 833 834 835 836 837
		pr_warning(
"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
"check /proc/sys/kernel/kptr_restrict.\n\n"
"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
"file is not found in the buildid cache or in the vmlinux path.\n\n"
"Samples in kernel modules won't be resolved at all.\n\n"
"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
"even with a suitable vmlinux or kallsyms file.\n\n");
838

839
	if (rec->no_buildid_cache || rec->no_buildid)
840
		disable_buildid_cache();
841

842 843
	if (rec->evlist->nr_entries == 0 &&
	    perf_evlist__add_default(rec->evlist) < 0) {
844 845
		pr_err("Not enough memory for event selector list\n");
		goto out_symbol_exit;
846
	}
847

848 849 850
	if (rec->opts.target.tid && !rec->opts.no_inherit_set)
		rec->opts.no_inherit = true;

851
	err = target__validate(&rec->opts.target);
852
	if (err) {
853
		target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
854 855 856
		ui__warning("%s", errbuf);
	}

857
	err = target__parse_uid(&rec->opts.target);
858 859
	if (err) {
		int saved_errno = errno;
860

861
		target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
862
		ui__error("%s", errbuf);
863 864

		err = -saved_errno;
865
		goto out_symbol_exit;
866
	}
867

868
	err = -ENOMEM;
869
	if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
870
		usage_with_options(record_usage, record_options);
871

872
	if (record_opts__config(&rec->opts)) {
873
		err = -EINVAL;
874
		goto out_symbol_exit;
875 876
	}

877
	err = __cmd_record(&record, argc, argv);
878
out_symbol_exit:
879
	perf_evlist__delete(rec->evlist);
880
	symbol__exit();
881
	return err;
882
}