builtin-record.c 22.9 KB
Newer Older
I
Ingo Molnar 已提交
1
/*
2 3 4 5 6
 * builtin-record.c
 *
 * Builtin record command: Record the profile of a workload
 * (or a CPU, or a PID) into the perf.data output file - for
 * later analysis via perf report.
I
Ingo Molnar 已提交
7
 */
8
#include "builtin.h"
9 10 11

#include "perf.h"

12
#include "util/build-id.h"
13
#include "util/util.h"
14
#include "util/parse-options.h"
15
#include "util/parse-events.h"
16

17
#include "util/callchain.h"
18
#include "util/cgroup.h"
19
#include "util/header.h"
20
#include "util/event.h"
21
#include "util/evlist.h"
22
#include "util/evsel.h"
23
#include "util/debug.h"
24
#include "util/session.h"
25
#include "util/tool.h"
26
#include "util/symbol.h"
27
#include "util/cpumap.h"
28
#include "util/thread_map.h"
29
#include "util/data.h"
30

31
#include <unistd.h>
32
#include <sched.h>
33
#include <sys/mman.h>
34

35

36
struct record {
37
	struct perf_tool	tool;
38
	struct record_opts	opts;
39
	u64			bytes_written;
40
	struct perf_data_file	file;
41 42 43 44 45 46 47
	struct perf_evlist	*evlist;
	struct perf_session	*session;
	const char		*progname;
	int			realtime_prio;
	bool			no_buildid;
	bool			no_buildid_cache;
	long			samples;
48
};
49

50
static int record__write(struct record *rec, void *bf, size_t size)
51
{
52
	if (perf_data_file__write(rec->session->file, bf, size) < 0) {
53 54
		pr_err("failed to write perf data, error: %m\n");
		return -1;
55
	}
56

57
	rec->bytes_written += size;
58
	return 0;
59 60
}

61
static int process_synthesized_event(struct perf_tool *tool,
62
				     union perf_event *event,
63 64
				     struct perf_sample *sample __maybe_unused,
				     struct machine *machine __maybe_unused)
65
{
66 67
	struct record *rec = container_of(tool, struct record, tool);
	return record__write(rec, event, event->header.size);
68 69
}

70
static int record__mmap_read(struct record *rec, int idx)
71
{
72
	struct perf_mmap *md = &rec->evlist->mmap[idx];
73
	unsigned int head = perf_mmap__read_head(md);
74
	unsigned int old = md->prev;
J
Jiri Olsa 已提交
75
	unsigned char *data = md->base + page_size;
76 77
	unsigned long size;
	void *buf;
78
	int rc = 0;
79

80
	if (old == head)
81
		return 0;
82

83
	rec->samples++;
84 85 86 87 88 89 90

	size = head - old;

	if ((old & md->mask) + size != (head & md->mask)) {
		buf = &data[old & md->mask];
		size = md->mask + 1 - (old & md->mask);
		old += size;
91

92
		if (record__write(rec, buf, size) < 0) {
93 94 95
			rc = -1;
			goto out;
		}
96 97 98 99 100
	}

	buf = &data[old & md->mask];
	size = head - old;
	old += size;
101

102
	if (record__write(rec, buf, size) < 0) {
103 104 105
		rc = -1;
		goto out;
	}
106 107

	md->prev = old;
108
	perf_evlist__mmap_consume(rec->evlist, idx);
109 110
out:
	return rc;
111 112 113
}

static volatile int done = 0;
114
static volatile int signr = -1;
115
static volatile int child_finished = 0;
116

117
static void sig_handler(int sig)
118
{
119 120
	if (sig == SIGCHLD)
		child_finished = 1;
121 122
	else
		signr = sig;
123

124
	done = 1;
125 126
}

127
static void record__sig_exit(void)
128
{
129
	if (signr == -1)
130 131 132
		return;

	signal(signr, SIG_DFL);
133
	raise(signr);
134 135
}

136
static int record__open(struct record *rec)
137
{
138
	char msg[512];
139
	struct perf_evsel *pos;
140 141
	struct perf_evlist *evlist = rec->evlist;
	struct perf_session *session = rec->session;
142
	struct record_opts *opts = &rec->opts;
143
	int rc = 0;
144

145
	perf_evlist__config(evlist, opts);
146

147
	evlist__for_each(evlist, pos) {
148
try_again:
149
		if (perf_evsel__open(pos, evlist->cpus, evlist->threads) < 0) {
150
			if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
151
				if (verbose)
152
					ui__warning("%s\n", msg);
153 154
				goto try_again;
			}
155

156 157 158 159
			rc = -errno;
			perf_evsel__open_strerror(pos, &opts->target,
						  errno, msg, sizeof(msg));
			ui__error("%s\n", msg);
160
			goto out;
L
Li Zefan 已提交
161 162
		}
	}
163

164
	if (perf_evlist__apply_filters(evlist)) {
165
		error("failed to set filter with %d (%s)\n", errno,
166
			strerror_r(errno, msg, sizeof(msg)));
167 168
		rc = -1;
		goto out;
169 170
	}

171
	if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) {
172 173 174 175 176
		if (errno == EPERM) {
			pr_err("Permission error mapping pages.\n"
			       "Consider increasing "
			       "/proc/sys/kernel/perf_event_mlock_kb,\n"
			       "or try again with a smaller value of -m/--mmap_pages.\n"
177
			       "(current value: %u)\n", opts->mmap_pages);
178 179
			rc = -errno;
		} else {
180 181
			pr_err("failed to mmap with %d (%s)\n", errno,
				strerror_r(errno, msg, sizeof(msg)));
182 183 184
			rc = -errno;
		}
		goto out;
185
	}
186

187
	session->evlist = evlist;
188
	perf_session__set_id_hdr_size(session);
189 190
out:
	return rc;
191 192
}

193
static int process_buildids(struct record *rec)
194
{
195 196
	struct perf_data_file *file  = &rec->file;
	struct perf_session *session = rec->session;
197
	u64 start = session->header.data_offset;
198

199
	u64 size = lseek(file->fd, 0, SEEK_CUR);
200 201 202
	if (size == 0)
		return 0;

203 204
	return __perf_session__process_events(session, start,
					      size - start,
205 206 207
					      size, &build_id__mark_dso_hit_ops);
}

208
static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
209 210
{
	int err;
211
	struct perf_tool *tool = data;
212 213 214 215 216 217 218 219
	/*
	 *As for guest kernel when processing subcommand record&report,
	 *we arrange module mmap prior to guest kernel mmap and trigger
	 *a preload dso because default guest module symbols are loaded
	 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
	 *method is used to avoid symbol missing when the first addr is
	 *in module instead of in guest kernel.
	 */
220
	err = perf_event__synthesize_modules(tool, process_synthesized_event,
221
					     machine);
222 223
	if (err < 0)
		pr_err("Couldn't record guest kernel [%d]'s reference"
224
		       " relocation symbol.\n", machine->pid);
225 226 227 228 229

	/*
	 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
	 * have no _text sometimes.
	 */
230
	err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
231
						 machine);
232 233
	if (err < 0)
		pr_err("Couldn't record guest kernel [%d]'s reference"
234
		       " relocation symbol.\n", machine->pid);
235 236
}

237 238 239 240 241
static struct perf_event_header finished_round_event = {
	.size = sizeof(struct perf_event_header),
	.type = PERF_RECORD_FINISHED_ROUND,
};

242
static int record__mmap_read_all(struct record *rec)
243
{
244
	u64 bytes_written = rec->bytes_written;
245
	int i;
246
	int rc = 0;
247

248
	for (i = 0; i < rec->evlist->nr_mmaps; i++) {
249
		if (rec->evlist->mmap[i].base) {
250
			if (record__mmap_read(rec, i) != 0) {
251 252 253 254
				rc = -1;
				goto out;
			}
		}
255 256
	}

257 258 259 260 261 262
	/*
	 * Mark the round finished in case we wrote
	 * at least one event.
	 */
	if (bytes_written != rec->bytes_written)
		rc = record__write(rec, &finished_round_event, sizeof(finished_round_event));
263 264 265

out:
	return rc;
266 267
}

268
static void record__init_features(struct record *rec)
269 270 271 272 273 274 275 276 277 278
{
	struct perf_session *session = rec->session;
	int feat;

	for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
		perf_header__set_feat(&session->header, feat);

	if (rec->no_buildid)
		perf_header__clear_feat(&session->header, HEADER_BUILD_ID);

279
	if (!have_tracepoints(&rec->evlist->entries))
280 281 282 283 284 285
		perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);

	if (!rec->opts.branch_stack)
		perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
}

286 287 288 289 290 291 292
static volatile int workload_exec_errno;

/*
 * perf_evlist__prepare_workload will send a SIGUSR1
 * if the fork fails, since we asked by setting its
 * want_signal to true.
 */
293 294
static void workload_exec_failed_signal(int signo __maybe_unused,
					siginfo_t *info,
295 296 297 298 299 300 301
					void *ucontext __maybe_unused)
{
	workload_exec_errno = info->si_value.sival_int;
	done = 1;
	child_finished = 1;
}

302
static int __cmd_record(struct record *rec, int argc, const char **argv)
303
{
304
	int err;
305
	int status = 0;
306
	unsigned long waking = 0;
307
	const bool forks = argc > 0;
308
	struct machine *machine;
309
	struct perf_tool *tool = &rec->tool;
310
	struct record_opts *opts = &rec->opts;
311
	struct perf_data_file *file = &rec->file;
312
	struct perf_session *session;
313
	bool disabled = false, draining = false;
314

315
	rec->progname = argv[0];
316

317
	atexit(record__sig_exit);
318 319
	signal(SIGCHLD, sig_handler);
	signal(SIGINT, sig_handler);
320
	signal(SIGTERM, sig_handler);
321

322
	session = perf_session__new(file, false, NULL);
323
	if (session == NULL) {
A
Adrien BAK 已提交
324
		pr_err("Perf session creation failed.\n");
325 326 327
		return -1;
	}

328 329
	rec->session = session;

330
	record__init_features(rec);
331

332
	if (forks) {
333
		err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
334
						    argv, file->is_pipe,
335
						    workload_exec_failed_signal);
336 337
		if (err < 0) {
			pr_err("Couldn't run the workload!\n");
338
			status = err;
339
			goto out_delete_session;
340 341 342
		}
	}

343
	if (record__open(rec) != 0) {
344
		err = -1;
345
		goto out_child;
346
	}
347

348
	if (!rec->evlist->nr_groups)
349 350
		perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);

351 352
	if (file->is_pipe) {
		err = perf_header__write_pipe(file->fd);
353
		if (err < 0)
354
			goto out_child;
355
	} else {
356
		err = perf_session__write_header(session, rec->evlist,
357
						 file->fd, false);
358
		if (err < 0)
359
			goto out_child;
360 361
	}

362
	if (!rec->no_buildid
363
	    && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
364
		pr_err("Couldn't generate buildids. "
365
		       "Use --no-buildid to profile anyway.\n");
366
		err = -1;
367
		goto out_child;
368 369
	}

370
	machine = &session->machines.host;
371

372
	if (file->is_pipe) {
373
		err = perf_event__synthesize_attrs(tool, session,
374
						   process_synthesized_event);
375 376
		if (err < 0) {
			pr_err("Couldn't synthesize attrs.\n");
377
			goto out_child;
378
		}
379

380
		if (have_tracepoints(&rec->evlist->entries)) {
381 382 383 384 385 386 387 388
			/*
			 * FIXME err <= 0 here actually means that
			 * there were no tracepoints so its not really
			 * an error, just that we don't need to
			 * synthesize anything.  We really have to
			 * return this more properly and also
			 * propagate errors that now are calling die()
			 */
389
			err = perf_event__synthesize_tracing_data(tool, file->fd, rec->evlist,
390
								  process_synthesized_event);
391 392
			if (err <= 0) {
				pr_err("Couldn't record tracing data.\n");
393
				goto out_child;
394
			}
395
			rec->bytes_written += err;
396
		}
397 398
	}

399
	err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
400
						 machine);
401 402 403 404
	if (err < 0)
		pr_err("Couldn't record kernel reference relocation symbol\n"
		       "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
		       "Check /proc/kallsyms permission or run as root.\n");
405

406
	err = perf_event__synthesize_modules(tool, process_synthesized_event,
407
					     machine);
408 409 410 411 412
	if (err < 0)
		pr_err("Couldn't record kernel module information.\n"
		       "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
		       "Check /proc/modules permission or run as root.\n");

413
	if (perf_guest) {
414 415
		machines__process_guests(&session->machines,
					 perf_event__synthesize_guest_os, tool);
416
	}
417

418
	err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
419
					    process_synthesized_event, opts->sample_address);
420
	if (err != 0)
421
		goto out_child;
422

423
	if (rec->realtime_prio) {
424 425
		struct sched_param param;

426
		param.sched_priority = rec->realtime_prio;
427
		if (sched_setscheduler(0, SCHED_FIFO, &param)) {
428
			pr_err("Could not set realtime priority.\n");
429
			err = -1;
430
			goto out_child;
431 432 433
		}
	}

434 435 436 437 438
	/*
	 * When perf is starting the traced process, all the events
	 * (apart from group members) have enable_on_exec=1 set,
	 * so don't spoil it by prematurely enabling them.
	 */
439
	if (!target__none(&opts->target) && !opts->initial_delay)
440
		perf_evlist__enable(rec->evlist);
441

442 443 444
	/*
	 * Let the child rip
	 */
445
	if (forks)
446
		perf_evlist__start_workload(rec->evlist);
447

448 449 450 451 452
	if (opts->initial_delay) {
		usleep(opts->initial_delay * 1000);
		perf_evlist__enable(rec->evlist);
	}

453
	for (;;) {
454
		int hits = rec->samples;
455

456
		if (record__mmap_read_all(rec) < 0) {
457
			err = -1;
458
			goto out_child;
459
		}
460

461
		if (hits == rec->samples) {
462
			if (done || draining)
463
				break;
464
			err = perf_evlist__poll(rec->evlist, -1);
465 466 467 468 469
			/*
			 * Propagate error, only if there's any. Ignore positive
			 * number of returned events and interrupt error.
			 */
			if (err > 0 || (err < 0 && errno == EINTR))
470
				err = 0;
471
			waking++;
472 473 474

			if (perf_evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
				draining = true;
475 476
		}

477 478 479 480 481
		/*
		 * When perf is starting the traced process, at the end events
		 * die with the process and we wait for that. Thus no need to
		 * disable events in this case.
		 */
482
		if (done && !disabled && !target__none(&opts->target)) {
483
			perf_evlist__disable(rec->evlist);
484 485
			disabled = true;
		}
486 487
	}

488
	if (forks && workload_exec_errno) {
489
		char msg[STRERR_BUFSIZE];
490 491 492
		const char *emsg = strerror_r(workload_exec_errno, msg, sizeof(msg));
		pr_err("Workload failed: %s\n", emsg);
		err = -1;
493
		goto out_child;
494 495
	}

496 497
	if (!quiet) {
		fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
498

499 500 501 502 503 504 505 506 507
		/*
		 * Approximate RIP event size: 24 bytes.
		 */
		fprintf(stderr,
			"[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n",
			(double)rec->bytes_written / 1024.0 / 1024.0,
			file->path,
			rec->bytes_written / 24);
	}
508

509 510 511
out_child:
	if (forks) {
		int exit_status;
512

513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534
		if (!child_finished)
			kill(rec->evlist->workload.pid, SIGTERM);

		wait(&exit_status);

		if (err < 0)
			status = err;
		else if (WIFEXITED(exit_status))
			status = WEXITSTATUS(exit_status);
		else if (WIFSIGNALED(exit_status))
			signr = WTERMSIG(exit_status);
	} else
		status = err;

	if (!err && !file->is_pipe) {
		rec->session->header.data_size += rec->bytes_written;

		if (!rec->no_buildid)
			process_buildids(rec);
		perf_session__write_header(rec->session, rec->evlist,
					   file->fd, true);
	}
535 536 537

out_delete_session:
	perf_session__delete(session);
538
	return status;
539
}
540

541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558
#define BRANCH_OPT(n, m) \
	{ .name = n, .mode = (m) }

#define BRANCH_END { .name = NULL }

struct branch_mode {
	const char *name;
	int mode;
};

static const struct branch_mode branch_modes[] = {
	BRANCH_OPT("u", PERF_SAMPLE_BRANCH_USER),
	BRANCH_OPT("k", PERF_SAMPLE_BRANCH_KERNEL),
	BRANCH_OPT("hv", PERF_SAMPLE_BRANCH_HV),
	BRANCH_OPT("any", PERF_SAMPLE_BRANCH_ANY),
	BRANCH_OPT("any_call", PERF_SAMPLE_BRANCH_ANY_CALL),
	BRANCH_OPT("any_ret", PERF_SAMPLE_BRANCH_ANY_RETURN),
	BRANCH_OPT("ind_call", PERF_SAMPLE_BRANCH_IND_CALL),
559 560 561
	BRANCH_OPT("abort_tx", PERF_SAMPLE_BRANCH_ABORT_TX),
	BRANCH_OPT("in_tx", PERF_SAMPLE_BRANCH_IN_TX),
	BRANCH_OPT("no_tx", PERF_SAMPLE_BRANCH_NO_TX),
562
	BRANCH_OPT("cond", PERF_SAMPLE_BRANCH_COND),
563 564 565 566
	BRANCH_END
};

static int
567
parse_branch_stack(const struct option *opt, const char *str, int unset)
568 569 570 571 572 573 574 575
{
#define ONLY_PLM \
	(PERF_SAMPLE_BRANCH_USER	|\
	 PERF_SAMPLE_BRANCH_KERNEL	|\
	 PERF_SAMPLE_BRANCH_HV)

	uint64_t *mode = (uint64_t *)opt->value;
	const struct branch_mode *br;
576
	char *s, *os = NULL, *p;
577 578
	int ret = -1;

579 580
	if (unset)
		return 0;
581

582 583 584 585
	/*
	 * cannot set it twice, -b + --branch-filter for instance
	 */
	if (*mode)
586 587
		return -1;

588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608
	/* str may be NULL in case no arg is passed to -b */
	if (str) {
		/* because str is read-only */
		s = os = strdup(str);
		if (!s)
			return -1;

		for (;;) {
			p = strchr(s, ',');
			if (p)
				*p = '\0';

			for (br = branch_modes; br->name; br++) {
				if (!strcasecmp(s, br->name))
					break;
			}
			if (!br->name) {
				ui__warning("unknown branch filter %s,"
					    " check man page\n", s);
				goto error;
			}
609

610
			*mode |= br->mode;
611

612 613
			if (!p)
				break;
614

615 616
			s = p + 1;
		}
617 618 619
	}
	ret = 0;

620
	/* default to any branch */
621
	if ((*mode & ~ONLY_PLM) == 0) {
622
		*mode = PERF_SAMPLE_BRANCH_ANY;
623 624 625 626 627 628
	}
error:
	free(os);
	return ret;
}

629
static void callchain_debug(void)
J
Jiri Olsa 已提交
630
{
631 632
	static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF" };

633
	pr_debug("callchain: type %s\n", str[callchain_param.record_mode]);
634

635
	if (callchain_param.record_mode == CALLCHAIN_DWARF)
J
Jiri Olsa 已提交
636
		pr_debug("callchain: stack dump size %d\n",
637
			 callchain_param.dump_size);
J
Jiri Olsa 已提交
638 639
}

640
int record_parse_callchain_opt(const struct option *opt __maybe_unused,
J
Jiri Olsa 已提交
641 642 643 644 645
			       const char *arg,
			       int unset)
{
	int ret;

646
	callchain_param.enabled = !unset;
647

J
Jiri Olsa 已提交
648 649
	/* --no-call-graph */
	if (unset) {
650
		callchain_param.record_mode = CALLCHAIN_NONE;
J
Jiri Olsa 已提交
651 652 653 654
		pr_debug("callchain: disabled\n");
		return 0;
	}

655
	ret = parse_callchain_record_opt(arg);
656
	if (!ret)
657
		callchain_debug();
658 659 660 661

	return ret;
}

662
int record_callchain_opt(const struct option *opt __maybe_unused,
J
Jiri Olsa 已提交
663 664 665
			 const char *arg __maybe_unused,
			 int unset __maybe_unused)
{
666
	callchain_param.enabled = true;
J
Jiri Olsa 已提交
667

668 669
	if (callchain_param.record_mode == CALLCHAIN_NONE)
		callchain_param.record_mode = CALLCHAIN_FP;
670

671
	callchain_debug();
J
Jiri Olsa 已提交
672 673 674
	return 0;
}

675 676 677
static int perf_record_config(const char *var, const char *value, void *cb)
{
	if (!strcmp(var, "record.call-graph"))
678
		var = "call-graph.record-mode"; /* fall-through */
679 680 681 682

	return perf_default_config(var, value, cb);
}

683
static const char * const __record_usage[] = {
684 685
	"perf record [<options>] [<command>]",
	"perf record [<options>] -- <command> [<options>]",
686 687
	NULL
};
688
const char * const *record_usage = __record_usage;
689

690
/*
691 692
 * XXX Ideally would be local to cmd_record() and passed to a record__new
 * because we need to have access to it in record__exit, that is called
693 694 695 696 697 698 699
 * after cmd_record() exits, but since record_options need to be accessible to
 * builtin-script, leave it here.
 *
 * At least we don't ouch it in all the other functions here directly.
 *
 * Just say no to tons of global variables, sigh.
 */
700
static struct record record = {
701
	.opts = {
702
		.sample_time	     = true,
703 704 705
		.mmap_pages	     = UINT_MAX,
		.user_freq	     = UINT_MAX,
		.user_interval	     = ULLONG_MAX,
706
		.freq		     = 4000,
N
Namhyung Kim 已提交
707 708
		.target		     = {
			.uses_mmap   = true,
709
			.default_per_cpu = true,
N
Namhyung Kim 已提交
710
		},
711 712
	},
};
713

J
Jiri Olsa 已提交
714
#define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace) recording: "
715

716
#ifdef HAVE_DWARF_UNWIND_SUPPORT
J
Jiri Olsa 已提交
717
const char record_callchain_help[] = CALLCHAIN_HELP "fp dwarf";
718
#else
J
Jiri Olsa 已提交
719
const char record_callchain_help[] = CALLCHAIN_HELP "fp";
720 721
#endif

722 723 724
/*
 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
 * with it and switch to use the library functions in perf_evlist that came
725
 * from builtin-record.c, i.e. use record_opts,
726 727 728
 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
 * using pipes, etc.
 */
729
struct option __record_options[] = {
730
	OPT_CALLBACK('e', "event", &record.evlist, "event",
731
		     "event selector. use 'perf list' to list available events",
732
		     parse_events_option),
733
	OPT_CALLBACK(0, "filter", &record.evlist, "filter",
L
Li Zefan 已提交
734
		     "event filter", parse_filter),
735
	OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
736
		    "record events on existing process id"),
737
	OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
738
		    "record events on existing thread id"),
739
	OPT_INTEGER('r', "realtime", &record.realtime_prio,
740
		    "collect data with this RT SCHED_FIFO priority"),
741
	OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
742
		    "collect data without buffering"),
743
	OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
744
		    "collect raw sample records from all opened counters"),
745
	OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
746
			    "system-wide collection from all CPUs"),
747
	OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
748
		    "list of cpus to monitor"),
749
	OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
750
	OPT_STRING('o', "output", &record.file.path, "file",
I
Ingo Molnar 已提交
751
		    "output file name"),
752 753 754
	OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
			&record.opts.no_inherit_set,
			"child tasks do not inherit counters"),
755
	OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
756 757 758
	OPT_CALLBACK('m', "mmap-pages", &record.opts.mmap_pages, "pages",
		     "number of mmap data pages",
		     perf_evlist__parse_mmap_pages),
759
	OPT_BOOLEAN(0, "group", &record.opts.group,
760
		    "put the counters into a counter group"),
J
Jiri Olsa 已提交
761 762 763 764 765 766
	OPT_CALLBACK_NOOPT('g', NULL, &record.opts,
			   NULL, "enables call-graph recording" ,
			   &record_callchain_opt),
	OPT_CALLBACK(0, "call-graph", &record.opts,
		     "mode[,dump_size]", record_callchain_help,
		     &record_parse_callchain_opt),
767
	OPT_INCR('v', "verbose", &verbose,
768
		    "be more verbose (show counter open errors, etc)"),
769
	OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
770
	OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
771
		    "per thread counts"),
772
	OPT_BOOLEAN('d', "data", &record.opts.sample_address,
773
		    "Sample addresses"),
774
	OPT_BOOLEAN('T', "timestamp", &record.opts.sample_time, "Sample timestamps"),
775
	OPT_BOOLEAN('P', "period", &record.opts.period, "Sample period"),
776
	OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
777
		    "don't sample"),
778
	OPT_BOOLEAN('N', "no-buildid-cache", &record.no_buildid_cache,
779
		    "do not update the buildid cache"),
780
	OPT_BOOLEAN('B', "no-buildid", &record.no_buildid,
781
		    "do not collect buildids in perf.data"),
782
	OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
S
Stephane Eranian 已提交
783 784
		     "monitor event in cgroup name only",
		     parse_cgroups),
785
	OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
786
		  "ms to wait before starting measurement after program start"),
787 788
	OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
		   "user to profile"),
789 790 791 792 793 794 795

	OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
		     "branch any", "sample any taken branches",
		     parse_branch_stack),

	OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
		     "branch filter mask", "branch stack filter modes",
796
		     parse_branch_stack),
797 798
	OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
		    "sample by weight (on special events only)"),
799 800
	OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
		    "sample transaction flags (special events only)"),
801 802
	OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
		    "use per-thread mmaps"),
803 804 805
	OPT_END()
};

806 807
struct option *record_options = __record_options;

808
int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
809
{
810
	int err = -ENOMEM;
811
	struct record *rec = &record;
812
	char errbuf[BUFSIZ];
813

814 815
	rec->evlist = perf_evlist__new();
	if (rec->evlist == NULL)
816 817
		return -ENOMEM;

818 819
	perf_config(perf_record_config, rec);

820
	argc = parse_options(argc, argv, record_options, record_usage,
821
			    PARSE_OPT_STOP_AT_NON_OPTION);
822
	if (!argc && target__none(&rec->opts.target))
823
		usage_with_options(record_usage, record_options);
824

825
	if (nr_cgroups && !rec->opts.target.system_wide) {
826 827
		ui__error("cgroup monitoring only available in"
			  " system-wide mode\n");
S
Stephane Eranian 已提交
828 829 830
		usage_with_options(record_usage, record_options);
	}

831
	symbol__init(NULL);
832

833
	if (symbol_conf.kptr_restrict)
834 835 836 837 838 839 840 841
		pr_warning(
"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
"check /proc/sys/kernel/kptr_restrict.\n\n"
"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
"file is not found in the buildid cache or in the vmlinux path.\n\n"
"Samples in kernel modules won't be resolved at all.\n\n"
"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
"even with a suitable vmlinux or kallsyms file.\n\n");
842

843
	if (rec->no_buildid_cache || rec->no_buildid)
844
		disable_buildid_cache();
845

846 847
	if (rec->evlist->nr_entries == 0 &&
	    perf_evlist__add_default(rec->evlist) < 0) {
848 849
		pr_err("Not enough memory for event selector list\n");
		goto out_symbol_exit;
850
	}
851

852 853 854
	if (rec->opts.target.tid && !rec->opts.no_inherit_set)
		rec->opts.no_inherit = true;

855
	err = target__validate(&rec->opts.target);
856
	if (err) {
857
		target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
858 859 860
		ui__warning("%s", errbuf);
	}

861
	err = target__parse_uid(&rec->opts.target);
862 863
	if (err) {
		int saved_errno = errno;
864

865
		target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
866
		ui__error("%s", errbuf);
867 868

		err = -saved_errno;
869
		goto out_symbol_exit;
870
	}
871

872
	err = -ENOMEM;
873
	if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
874
		usage_with_options(record_usage, record_options);
875

876
	if (record_opts__config(&rec->opts)) {
877
		err = -EINVAL;
878
		goto out_symbol_exit;
879 880
	}

881
	err = __cmd_record(&record, argc, argv);
882
out_symbol_exit:
883
	perf_evlist__delete(rec->evlist);
884
	symbol__exit();
885
	return err;
886
}