builtin-record.c 24.3 KB
Newer Older
I
Ingo Molnar 已提交
1
/*
2 3 4 5 6
 * builtin-record.c
 *
 * Builtin record command: Record the profile of a workload
 * (or a CPU, or a PID) into the perf.data output file - for
 * later analysis via perf report.
I
Ingo Molnar 已提交
7
 */
8
#include "builtin.h"
9 10 11

#include "perf.h"

12
#include "util/build-id.h"
13
#include "util/util.h"
14
#include "util/parse-options.h"
15
#include "util/parse-events.h"
16

17
#include "util/header.h"
18
#include "util/event.h"
19
#include "util/evlist.h"
20
#include "util/evsel.h"
21
#include "util/debug.h"
22
#include "util/session.h"
23
#include "util/tool.h"
24
#include "util/symbol.h"
25
#include "util/cpumap.h"
26
#include "util/thread_map.h"
27
#include "util/data.h"
28

29
#include <unistd.h>
30
#include <sched.h>
31
#include <sys/mman.h>
32

33

34
struct record {
35
	struct perf_tool	tool;
36
	struct record_opts	opts;
37
	u64			bytes_written;
38
	struct perf_data_file	file;
39 40 41 42 43 44 45
	struct perf_evlist	*evlist;
	struct perf_session	*session;
	const char		*progname;
	int			realtime_prio;
	bool			no_buildid;
	bool			no_buildid_cache;
	long			samples;
46
};
47

48
static int record__write(struct record *rec, void *bf, size_t size)
49
{
50
	if (perf_data_file__write(rec->session->file, bf, size) < 0) {
51 52
		pr_err("failed to write perf data, error: %m\n");
		return -1;
53
	}
54

55
	rec->bytes_written += size;
56
	return 0;
57 58
}

59
static int process_synthesized_event(struct perf_tool *tool,
60
				     union perf_event *event,
61 62
				     struct perf_sample *sample __maybe_unused,
				     struct machine *machine __maybe_unused)
63
{
64 65
	struct record *rec = container_of(tool, struct record, tool);
	return record__write(rec, event, event->header.size);
66 67
}

68
static int record__mmap_read(struct record *rec, struct perf_mmap *md)
69
{
70
	unsigned int head = perf_mmap__read_head(md);
71
	unsigned int old = md->prev;
J
Jiri Olsa 已提交
72
	unsigned char *data = md->base + page_size;
73 74
	unsigned long size;
	void *buf;
75
	int rc = 0;
76

77
	if (old == head)
78
		return 0;
79

80
	rec->samples++;
81 82 83 84 85 86 87

	size = head - old;

	if ((old & md->mask) + size != (head & md->mask)) {
		buf = &data[old & md->mask];
		size = md->mask + 1 - (old & md->mask);
		old += size;
88

89
		if (record__write(rec, buf, size) < 0) {
90 91 92
			rc = -1;
			goto out;
		}
93 94 95 96 97
	}

	buf = &data[old & md->mask];
	size = head - old;
	old += size;
98

99
	if (record__write(rec, buf, size) < 0) {
100 101 102
		rc = -1;
		goto out;
	}
103 104

	md->prev = old;
105
	perf_mmap__write_tail(md, old);
106 107 108

out:
	return rc;
109 110 111
}

static volatile int done = 0;
112
static volatile int signr = -1;
113
static volatile int child_finished = 0;
114

115
static void sig_handler(int sig)
116
{
117 118
	if (sig == SIGCHLD)
		child_finished = 1;
119 120
	else
		signr = sig;
121

122
	done = 1;
123 124
}

125
static void record__sig_exit(void)
126
{
127
	if (signr == -1)
128 129 130
		return;

	signal(signr, SIG_DFL);
131
	raise(signr);
132 133
}

134
static int record__open(struct record *rec)
135
{
136
	char msg[512];
137
	struct perf_evsel *pos;
138 139
	struct perf_evlist *evlist = rec->evlist;
	struct perf_session *session = rec->session;
140
	struct record_opts *opts = &rec->opts;
141
	int rc = 0;
142

143
	perf_evlist__config(evlist, opts);
144

145
	evlist__for_each(evlist, pos) {
146
try_again:
147
		if (perf_evsel__open(pos, evlist->cpus, evlist->threads) < 0) {
148
			if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
149
				if (verbose)
150
					ui__warning("%s\n", msg);
151 152
				goto try_again;
			}
153

154 155 156 157
			rc = -errno;
			perf_evsel__open_strerror(pos, &opts->target,
						  errno, msg, sizeof(msg));
			ui__error("%s\n", msg);
158
			goto out;
L
Li Zefan 已提交
159 160
		}
	}
161

162
	if (perf_evlist__apply_filters(evlist)) {
163 164
		error("failed to set filter with %d (%s)\n", errno,
			strerror(errno));
165 166
		rc = -1;
		goto out;
167 168
	}

169
	if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) {
170 171 172 173 174
		if (errno == EPERM) {
			pr_err("Permission error mapping pages.\n"
			       "Consider increasing "
			       "/proc/sys/kernel/perf_event_mlock_kb,\n"
			       "or try again with a smaller value of -m/--mmap_pages.\n"
175
			       "(current value: %u)\n", opts->mmap_pages);
176 177 178 179 180 181
			rc = -errno;
		} else {
			pr_err("failed to mmap with %d (%s)\n", errno, strerror(errno));
			rc = -errno;
		}
		goto out;
182
	}
183

184
	session->evlist = evlist;
185
	perf_session__set_id_hdr_size(session);
186 187
out:
	return rc;
188 189
}

190
static int process_buildids(struct record *rec)
191
{
192 193
	struct perf_data_file *file  = &rec->file;
	struct perf_session *session = rec->session;
194
	u64 start = session->header.data_offset;
195

196
	u64 size = lseek(file->fd, 0, SEEK_CUR);
197 198 199
	if (size == 0)
		return 0;

200 201
	return __perf_session__process_events(session, start,
					      size - start,
202 203 204
					      size, &build_id__mark_dso_hit_ops);
}

205
static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
206 207
{
	int err;
208
	struct perf_tool *tool = data;
209 210 211 212 213 214 215 216
	/*
	 *As for guest kernel when processing subcommand record&report,
	 *we arrange module mmap prior to guest kernel mmap and trigger
	 *a preload dso because default guest module symbols are loaded
	 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
	 *method is used to avoid symbol missing when the first addr is
	 *in module instead of in guest kernel.
	 */
217
	err = perf_event__synthesize_modules(tool, process_synthesized_event,
218
					     machine);
219 220
	if (err < 0)
		pr_err("Couldn't record guest kernel [%d]'s reference"
221
		       " relocation symbol.\n", machine->pid);
222 223 224 225 226

	/*
	 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
	 * have no _text sometimes.
	 */
227
	err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
228
						 machine);
229 230
	if (err < 0)
		pr_err("Couldn't record guest kernel [%d]'s reference"
231
		       " relocation symbol.\n", machine->pid);
232 233
}

234 235 236 237 238
static struct perf_event_header finished_round_event = {
	.size = sizeof(struct perf_event_header),
	.type = PERF_RECORD_FINISHED_ROUND,
};

239
static int record__mmap_read_all(struct record *rec)
240
{
241
	u64 bytes_written = rec->bytes_written;
242
	int i;
243
	int rc = 0;
244

245
	for (i = 0; i < rec->evlist->nr_mmaps; i++) {
246
		if (rec->evlist->mmap[i].base) {
247
			if (record__mmap_read(rec, &rec->evlist->mmap[i]) != 0) {
248 249 250 251
				rc = -1;
				goto out;
			}
		}
252 253
	}

254 255 256 257 258 259
	/*
	 * Mark the round finished in case we wrote
	 * at least one event.
	 */
	if (bytes_written != rec->bytes_written)
		rc = record__write(rec, &finished_round_event, sizeof(finished_round_event));
260 261 262

out:
	return rc;
263 264
}

265
static void record__init_features(struct record *rec)
266 267 268 269 270 271 272 273 274 275
{
	struct perf_session *session = rec->session;
	int feat;

	for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
		perf_header__set_feat(&session->header, feat);

	if (rec->no_buildid)
		perf_header__clear_feat(&session->header, HEADER_BUILD_ID);

276
	if (!have_tracepoints(&rec->evlist->entries))
277 278 279 280 281 282
		perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);

	if (!rec->opts.branch_stack)
		perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
}

283 284 285 286 287 288 289
static volatile int workload_exec_errno;

/*
 * perf_evlist__prepare_workload will send a SIGUSR1
 * if the fork fails, since we asked by setting its
 * want_signal to true.
 */
290 291
static void workload_exec_failed_signal(int signo __maybe_unused,
					siginfo_t *info,
292 293 294 295 296 297 298
					void *ucontext __maybe_unused)
{
	workload_exec_errno = info->si_value.sival_int;
	done = 1;
	child_finished = 1;
}

299
static int __cmd_record(struct record *rec, int argc, const char **argv)
300
{
301
	int err;
302
	int status = 0;
303
	unsigned long waking = 0;
304
	const bool forks = argc > 0;
305
	struct machine *machine;
306
	struct perf_tool *tool = &rec->tool;
307
	struct record_opts *opts = &rec->opts;
308
	struct perf_data_file *file = &rec->file;
309
	struct perf_session *session;
310
	bool disabled = false;
311

312
	rec->progname = argv[0];
313

314
	atexit(record__sig_exit);
315 316
	signal(SIGCHLD, sig_handler);
	signal(SIGINT, sig_handler);
317
	signal(SIGTERM, sig_handler);
318

319
	session = perf_session__new(file, false, NULL);
320
	if (session == NULL) {
A
Adrien BAK 已提交
321
		pr_err("Perf session creation failed.\n");
322 323 324
		return -1;
	}

325 326
	rec->session = session;

327
	record__init_features(rec);
328

329
	if (forks) {
330
		err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
331
						    argv, file->is_pipe,
332
						    workload_exec_failed_signal);
333 334
		if (err < 0) {
			pr_err("Couldn't run the workload!\n");
335
			status = err;
336
			goto out_delete_session;
337 338 339
		}
	}

340
	if (record__open(rec) != 0) {
341
		err = -1;
342
		goto out_child;
343
	}
344

345
	if (!rec->evlist->nr_groups)
346 347
		perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);

348 349
	if (file->is_pipe) {
		err = perf_header__write_pipe(file->fd);
350
		if (err < 0)
351
			goto out_child;
352
	} else {
353
		err = perf_session__write_header(session, rec->evlist,
354
						 file->fd, false);
355
		if (err < 0)
356
			goto out_child;
357 358
	}

359
	if (!rec->no_buildid
360
	    && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
361
		pr_err("Couldn't generate buildids. "
362
		       "Use --no-buildid to profile anyway.\n");
363
		err = -1;
364
		goto out_child;
365 366
	}

367
	machine = &session->machines.host;
368

369
	if (file->is_pipe) {
370
		err = perf_event__synthesize_attrs(tool, session,
371
						   process_synthesized_event);
372 373
		if (err < 0) {
			pr_err("Couldn't synthesize attrs.\n");
374
			goto out_child;
375
		}
376

377
		if (have_tracepoints(&rec->evlist->entries)) {
378 379 380 381 382 383 384 385
			/*
			 * FIXME err <= 0 here actually means that
			 * there were no tracepoints so its not really
			 * an error, just that we don't need to
			 * synthesize anything.  We really have to
			 * return this more properly and also
			 * propagate errors that now are calling die()
			 */
386
			err = perf_event__synthesize_tracing_data(tool, file->fd, rec->evlist,
387
								  process_synthesized_event);
388 389
			if (err <= 0) {
				pr_err("Couldn't record tracing data.\n");
390
				goto out_child;
391
			}
392
			rec->bytes_written += err;
393
		}
394 395
	}

396
	err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
397
						 machine);
398 399 400 401
	if (err < 0)
		pr_err("Couldn't record kernel reference relocation symbol\n"
		       "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
		       "Check /proc/kallsyms permission or run as root.\n");
402

403
	err = perf_event__synthesize_modules(tool, process_synthesized_event,
404
					     machine);
405 406 407 408 409
	if (err < 0)
		pr_err("Couldn't record kernel module information.\n"
		       "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
		       "Check /proc/modules permission or run as root.\n");

410
	if (perf_guest) {
411 412
		machines__process_guests(&session->machines,
					 perf_event__synthesize_guest_os, tool);
413
	}
414

415
	err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
416
					    process_synthesized_event, opts->sample_address);
417
	if (err != 0)
418
		goto out_child;
419

420
	if (rec->realtime_prio) {
421 422
		struct sched_param param;

423
		param.sched_priority = rec->realtime_prio;
424
		if (sched_setscheduler(0, SCHED_FIFO, &param)) {
425
			pr_err("Could not set realtime priority.\n");
426
			err = -1;
427
			goto out_child;
428 429 430
		}
	}

431 432 433 434 435
	/*
	 * When perf is starting the traced process, all the events
	 * (apart from group members) have enable_on_exec=1 set,
	 * so don't spoil it by prematurely enabling them.
	 */
436
	if (!target__none(&opts->target) && !opts->initial_delay)
437
		perf_evlist__enable(rec->evlist);
438

439 440 441
	/*
	 * Let the child rip
	 */
442
	if (forks)
443
		perf_evlist__start_workload(rec->evlist);
444

445 446 447 448 449
	if (opts->initial_delay) {
		usleep(opts->initial_delay * 1000);
		perf_evlist__enable(rec->evlist);
	}

450
	for (;;) {
451
		int hits = rec->samples;
452

453
		if (record__mmap_read_all(rec) < 0) {
454
			err = -1;
455
			goto out_child;
456
		}
457

458
		if (hits == rec->samples) {
459 460
			if (done)
				break;
461
			err = poll(rec->evlist->pollfd, rec->evlist->nr_fds, -1);
462 463 464 465 466
			/*
			 * Propagate error, only if there's any. Ignore positive
			 * number of returned events and interrupt error.
			 */
			if (err > 0 || (err < 0 && errno == EINTR))
467
				err = 0;
468 469 470
			waking++;
		}

471 472 473 474 475
		/*
		 * When perf is starting the traced process, at the end events
		 * die with the process and we wait for that. Thus no need to
		 * disable events in this case.
		 */
476
		if (done && !disabled && !target__none(&opts->target)) {
477
			perf_evlist__disable(rec->evlist);
478 479
			disabled = true;
		}
480 481
	}

482 483 484 485 486
	if (forks && workload_exec_errno) {
		char msg[512];
		const char *emsg = strerror_r(workload_exec_errno, msg, sizeof(msg));
		pr_err("Workload failed: %s\n", emsg);
		err = -1;
487
		goto out_child;
488 489
	}

490 491
	if (!quiet) {
		fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
492

493 494 495 496 497 498 499 500 501
		/*
		 * Approximate RIP event size: 24 bytes.
		 */
		fprintf(stderr,
			"[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n",
			(double)rec->bytes_written / 1024.0 / 1024.0,
			file->path,
			rec->bytes_written / 24);
	}
502

503 504 505
out_child:
	if (forks) {
		int exit_status;
506

507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528
		if (!child_finished)
			kill(rec->evlist->workload.pid, SIGTERM);

		wait(&exit_status);

		if (err < 0)
			status = err;
		else if (WIFEXITED(exit_status))
			status = WEXITSTATUS(exit_status);
		else if (WIFSIGNALED(exit_status))
			signr = WTERMSIG(exit_status);
	} else
		status = err;

	if (!err && !file->is_pipe) {
		rec->session->header.data_size += rec->bytes_written;

		if (!rec->no_buildid)
			process_buildids(rec);
		perf_session__write_header(rec->session, rec->evlist,
					   file->fd, true);
	}
529 530 531

out_delete_session:
	perf_session__delete(session);
532
	return status;
533
}
534

535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552
#define BRANCH_OPT(n, m) \
	{ .name = n, .mode = (m) }

#define BRANCH_END { .name = NULL }

struct branch_mode {
	const char *name;
	int mode;
};

static const struct branch_mode branch_modes[] = {
	BRANCH_OPT("u", PERF_SAMPLE_BRANCH_USER),
	BRANCH_OPT("k", PERF_SAMPLE_BRANCH_KERNEL),
	BRANCH_OPT("hv", PERF_SAMPLE_BRANCH_HV),
	BRANCH_OPT("any", PERF_SAMPLE_BRANCH_ANY),
	BRANCH_OPT("any_call", PERF_SAMPLE_BRANCH_ANY_CALL),
	BRANCH_OPT("any_ret", PERF_SAMPLE_BRANCH_ANY_RETURN),
	BRANCH_OPT("ind_call", PERF_SAMPLE_BRANCH_IND_CALL),
553 554 555
	BRANCH_OPT("abort_tx", PERF_SAMPLE_BRANCH_ABORT_TX),
	BRANCH_OPT("in_tx", PERF_SAMPLE_BRANCH_IN_TX),
	BRANCH_OPT("no_tx", PERF_SAMPLE_BRANCH_NO_TX),
556
	BRANCH_OPT("cond", PERF_SAMPLE_BRANCH_COND),
557 558 559 560
	BRANCH_END
};

static int
561
parse_branch_stack(const struct option *opt, const char *str, int unset)
562 563 564 565 566 567 568 569
{
#define ONLY_PLM \
	(PERF_SAMPLE_BRANCH_USER	|\
	 PERF_SAMPLE_BRANCH_KERNEL	|\
	 PERF_SAMPLE_BRANCH_HV)

	uint64_t *mode = (uint64_t *)opt->value;
	const struct branch_mode *br;
570
	char *s, *os = NULL, *p;
571 572
	int ret = -1;

573 574
	if (unset)
		return 0;
575

576 577 578 579
	/*
	 * cannot set it twice, -b + --branch-filter for instance
	 */
	if (*mode)
580 581
		return -1;

582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602
	/* str may be NULL in case no arg is passed to -b */
	if (str) {
		/* because str is read-only */
		s = os = strdup(str);
		if (!s)
			return -1;

		for (;;) {
			p = strchr(s, ',');
			if (p)
				*p = '\0';

			for (br = branch_modes; br->name; br++) {
				if (!strcasecmp(s, br->name))
					break;
			}
			if (!br->name) {
				ui__warning("unknown branch filter %s,"
					    " check man page\n", s);
				goto error;
			}
603

604
			*mode |= br->mode;
605

606 607
			if (!p)
				break;
608

609 610
			s = p + 1;
		}
611 612 613
	}
	ret = 0;

614
	/* default to any branch */
615
	if ((*mode & ~ONLY_PLM) == 0) {
616
		*mode = PERF_SAMPLE_BRANCH_ANY;
617 618 619 620 621 622
	}
error:
	free(os);
	return ret;
}

623
#ifdef HAVE_DWARF_UNWIND_SUPPORT
624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648
static int get_stack_size(char *str, unsigned long *_size)
{
	char *endptr;
	unsigned long size;
	unsigned long max_size = round_down(USHRT_MAX, sizeof(u64));

	size = strtoul(str, &endptr, 0);

	do {
		if (*endptr)
			break;

		size = round_up(size, sizeof(u64));
		if (!size || size > max_size)
			break;

		*_size = size;
		return 0;

	} while (0);

	pr_err("callchain: Incorrect stack dump size (max %ld): %s\n",
	       max_size, str);
	return -1;
}
649
#endif /* HAVE_DWARF_UNWIND_SUPPORT */
650

651
int record_parse_callchain(const char *arg, struct record_opts *opts)
652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670
{
	char *tok, *name, *saveptr = NULL;
	char *buf;
	int ret = -1;

	/* We need buffer that we know we can write to. */
	buf = malloc(strlen(arg) + 1);
	if (!buf)
		return -ENOMEM;

	strcpy(buf, arg);

	tok = strtok_r((char *)buf, ",", &saveptr);
	name = tok ? : (char *)buf;

	do {
		/* Framepointer style */
		if (!strncmp(name, "fp", sizeof("fp"))) {
			if (!strtok_r(NULL, ",", &saveptr)) {
671
				opts->call_graph = CALLCHAIN_FP;
672 673 674 675 676 677
				ret = 0;
			} else
				pr_err("callchain: No more arguments "
				       "needed for -g fp\n");
			break;

678
#ifdef HAVE_DWARF_UNWIND_SUPPORT
679 680
		/* Dwarf style */
		} else if (!strncmp(name, "dwarf", sizeof("dwarf"))) {
681 682
			const unsigned long default_stack_dump_size = 8192;

683
			ret = 0;
684 685
			opts->call_graph = CALLCHAIN_DWARF;
			opts->stack_dump_size = default_stack_dump_size;
686 687 688 689 690 691

			tok = strtok_r(NULL, ",", &saveptr);
			if (tok) {
				unsigned long size = 0;

				ret = get_stack_size(tok, &size);
692
				opts->stack_dump_size = size;
693
			}
694
#endif /* HAVE_DWARF_UNWIND_SUPPORT */
695
		} else {
J
Jiri Olsa 已提交
696
			pr_err("callchain: Unknown --call-graph option "
697 698 699 700 701 702 703
			       "value: %s\n", arg);
			break;
		}

	} while (0);

	free(buf);
J
Jiri Olsa 已提交
704 705 706
	return ret;
}

707
static void callchain_debug(struct record_opts *opts)
J
Jiri Olsa 已提交
708
{
709 710 711
	static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF" };

	pr_debug("callchain: type %s\n", str[opts->call_graph]);
712

J
Jiri Olsa 已提交
713 714 715 716 717 718 719 720 721
	if (opts->call_graph == CALLCHAIN_DWARF)
		pr_debug("callchain: stack dump size %d\n",
			 opts->stack_dump_size);
}

int record_parse_callchain_opt(const struct option *opt,
			       const char *arg,
			       int unset)
{
722
	struct record_opts *opts = opt->value;
J
Jiri Olsa 已提交
723 724
	int ret;

725 726
	opts->call_graph_enabled = !unset;

J
Jiri Olsa 已提交
727 728 729 730 731 732 733 734
	/* --no-call-graph */
	if (unset) {
		opts->call_graph = CALLCHAIN_NONE;
		pr_debug("callchain: disabled\n");
		return 0;
	}

	ret = record_parse_callchain(arg, opts);
735
	if (!ret)
J
Jiri Olsa 已提交
736
		callchain_debug(opts);
737 738 739 740

	return ret;
}

J
Jiri Olsa 已提交
741 742 743 744
int record_callchain_opt(const struct option *opt,
			 const char *arg __maybe_unused,
			 int unset __maybe_unused)
{
745
	struct record_opts *opts = opt->value;
J
Jiri Olsa 已提交
746

747 748
	opts->call_graph_enabled = !unset;

J
Jiri Olsa 已提交
749 750 751 752 753 754 755
	if (opts->call_graph == CALLCHAIN_NONE)
		opts->call_graph = CALLCHAIN_FP;

	callchain_debug(opts);
	return 0;
}

756 757 758 759 760 761 762 763 764 765
static int perf_record_config(const char *var, const char *value, void *cb)
{
	struct record *rec = cb;

	if (!strcmp(var, "record.call-graph"))
		return record_parse_callchain(value, &rec->opts);

	return perf_default_config(var, value, cb);
}

766
static const char * const record_usage[] = {
767 768
	"perf record [<options>] [<command>]",
	"perf record [<options>] -- <command> [<options>]",
769 770 771
	NULL
};

772
/*
773 774
 * XXX Ideally would be local to cmd_record() and passed to a record__new
 * because we need to have access to it in record__exit, that is called
775 776 777 778 779 780 781
 * after cmd_record() exits, but since record_options need to be accessible to
 * builtin-script, leave it here.
 *
 * At least we don't ouch it in all the other functions here directly.
 *
 * Just say no to tons of global variables, sigh.
 */
782
static struct record record = {
783 784 785 786
	.opts = {
		.mmap_pages	     = UINT_MAX,
		.user_freq	     = UINT_MAX,
		.user_interval	     = ULLONG_MAX,
787
		.freq		     = 4000,
N
Namhyung Kim 已提交
788 789
		.target		     = {
			.uses_mmap   = true,
790
			.default_per_cpu = true,
N
Namhyung Kim 已提交
791
		},
792 793
	},
};
794

J
Jiri Olsa 已提交
795
#define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace) recording: "
796

797
#ifdef HAVE_DWARF_UNWIND_SUPPORT
J
Jiri Olsa 已提交
798
const char record_callchain_help[] = CALLCHAIN_HELP "fp dwarf";
799
#else
J
Jiri Olsa 已提交
800
const char record_callchain_help[] = CALLCHAIN_HELP "fp";
801 802
#endif

803 804 805
/*
 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
 * with it and switch to use the library functions in perf_evlist that came
806
 * from builtin-record.c, i.e. use record_opts,
807 808 809
 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
 * using pipes, etc.
 */
810
const struct option record_options[] = {
811
	OPT_CALLBACK('e', "event", &record.evlist, "event",
812
		     "event selector. use 'perf list' to list available events",
813
		     parse_events_option),
814
	OPT_CALLBACK(0, "filter", &record.evlist, "filter",
L
Li Zefan 已提交
815
		     "event filter", parse_filter),
816
	OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
817
		    "record events on existing process id"),
818
	OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
819
		    "record events on existing thread id"),
820
	OPT_INTEGER('r', "realtime", &record.realtime_prio,
821
		    "collect data with this RT SCHED_FIFO priority"),
822
	OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
823
		    "collect data without buffering"),
824
	OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
825
		    "collect raw sample records from all opened counters"),
826
	OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
827
			    "system-wide collection from all CPUs"),
828
	OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
829
		    "list of cpus to monitor"),
830
	OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
831
	OPT_STRING('o', "output", &record.file.path, "file",
I
Ingo Molnar 已提交
832
		    "output file name"),
833 834 835
	OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
			&record.opts.no_inherit_set,
			"child tasks do not inherit counters"),
836
	OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
837 838 839
	OPT_CALLBACK('m', "mmap-pages", &record.opts.mmap_pages, "pages",
		     "number of mmap data pages",
		     perf_evlist__parse_mmap_pages),
840
	OPT_BOOLEAN(0, "group", &record.opts.group,
841
		    "put the counters into a counter group"),
J
Jiri Olsa 已提交
842 843 844 845 846 847
	OPT_CALLBACK_NOOPT('g', NULL, &record.opts,
			   NULL, "enables call-graph recording" ,
			   &record_callchain_opt),
	OPT_CALLBACK(0, "call-graph", &record.opts,
		     "mode[,dump_size]", record_callchain_help,
		     &record_parse_callchain_opt),
848
	OPT_INCR('v', "verbose", &verbose,
849
		    "be more verbose (show counter open errors, etc)"),
850
	OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
851
	OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
852
		    "per thread counts"),
853
	OPT_BOOLEAN('d', "data", &record.opts.sample_address,
854
		    "Sample addresses"),
855
	OPT_BOOLEAN('T', "timestamp", &record.opts.sample_time, "Sample timestamps"),
856
	OPT_BOOLEAN('P', "period", &record.opts.period, "Sample period"),
857
	OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
858
		    "don't sample"),
859
	OPT_BOOLEAN('N', "no-buildid-cache", &record.no_buildid_cache,
860
		    "do not update the buildid cache"),
861
	OPT_BOOLEAN('B', "no-buildid", &record.no_buildid,
862
		    "do not collect buildids in perf.data"),
863
	OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
S
Stephane Eranian 已提交
864 865
		     "monitor event in cgroup name only",
		     parse_cgroups),
866
	OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
867
		  "ms to wait before starting measurement after program start"),
868 869
	OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
		   "user to profile"),
870 871 872 873 874 875 876

	OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
		     "branch any", "sample any taken branches",
		     parse_branch_stack),

	OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
		     "branch filter mask", "branch stack filter modes",
877
		     parse_branch_stack),
878 879
	OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
		    "sample by weight (on special events only)"),
880 881
	OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
		    "sample transaction flags (special events only)"),
882 883
	OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
		    "use per-thread mmaps"),
884 885 886
	OPT_END()
};

887
int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
888
{
889
	int err = -ENOMEM;
890
	struct record *rec = &record;
891
	char errbuf[BUFSIZ];
892

893 894
	rec->evlist = perf_evlist__new();
	if (rec->evlist == NULL)
895 896
		return -ENOMEM;

897 898
	perf_config(perf_record_config, rec);

899
	argc = parse_options(argc, argv, record_options, record_usage,
900
			    PARSE_OPT_STOP_AT_NON_OPTION);
901
	if (!argc && target__none(&rec->opts.target))
902
		usage_with_options(record_usage, record_options);
903

904
	if (nr_cgroups && !rec->opts.target.system_wide) {
905 906
		ui__error("cgroup monitoring only available in"
			  " system-wide mode\n");
S
Stephane Eranian 已提交
907 908 909
		usage_with_options(record_usage, record_options);
	}

910
	symbol__init();
911

912
	if (symbol_conf.kptr_restrict)
913 914 915 916 917 918 919 920
		pr_warning(
"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
"check /proc/sys/kernel/kptr_restrict.\n\n"
"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
"file is not found in the buildid cache or in the vmlinux path.\n\n"
"Samples in kernel modules won't be resolved at all.\n\n"
"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
"even with a suitable vmlinux or kallsyms file.\n\n");
921

922
	if (rec->no_buildid_cache || rec->no_buildid)
923
		disable_buildid_cache();
924

925 926
	if (rec->evlist->nr_entries == 0 &&
	    perf_evlist__add_default(rec->evlist) < 0) {
927 928
		pr_err("Not enough memory for event selector list\n");
		goto out_symbol_exit;
929
	}
930

931 932 933
	if (rec->opts.target.tid && !rec->opts.no_inherit_set)
		rec->opts.no_inherit = true;

934
	err = target__validate(&rec->opts.target);
935
	if (err) {
936
		target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
937 938 939
		ui__warning("%s", errbuf);
	}

940
	err = target__parse_uid(&rec->opts.target);
941 942
	if (err) {
		int saved_errno = errno;
943

944
		target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
945
		ui__error("%s", errbuf);
946 947

		err = -saved_errno;
948
		goto out_symbol_exit;
949
	}
950

951
	err = -ENOMEM;
952
	if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
953
		usage_with_options(record_usage, record_options);
954

955
	if (record_opts__config(&rec->opts)) {
956
		err = -EINVAL;
957
		goto out_symbol_exit;
958 959
	}

960
	err = __cmd_record(&record, argc, argv);
961
out_symbol_exit:
962
	perf_evlist__delete(rec->evlist);
963
	symbol__exit();
964
	return err;
965
}