builtin-record.c 24.2 KB
Newer Older
I
Ingo Molnar 已提交
1
/*
2 3 4 5 6
 * builtin-record.c
 *
 * Builtin record command: Record the profile of a workload
 * (or a CPU, or a PID) into the perf.data output file - for
 * later analysis via perf report.
I
Ingo Molnar 已提交
7
 */
8
#include "builtin.h"
9 10 11

#include "perf.h"

12
#include "util/build-id.h"
13
#include "util/util.h"
14
#include "util/parse-options.h"
15
#include "util/parse-events.h"
16

17
#include "util/header.h"
18
#include "util/event.h"
19
#include "util/evlist.h"
20
#include "util/evsel.h"
21
#include "util/debug.h"
22
#include "util/session.h"
23
#include "util/tool.h"
24
#include "util/symbol.h"
25
#include "util/cpumap.h"
26
#include "util/thread_map.h"
27
#include "util/data.h"
28

29
#include <unistd.h>
30
#include <sched.h>
31
#include <sys/mman.h>
32

33

34
struct record {
35
	struct perf_tool	tool;
36
	struct record_opts	opts;
37
	u64			bytes_written;
38
	struct perf_data_file	file;
39 40 41 42 43 44 45
	struct perf_evlist	*evlist;
	struct perf_session	*session;
	const char		*progname;
	int			realtime_prio;
	bool			no_buildid;
	bool			no_buildid_cache;
	long			samples;
46
};
47

48
static int record__write(struct record *rec, void *bf, size_t size)
49
{
50
	if (perf_data_file__write(rec->session->file, bf, size) < 0) {
51 52
		pr_err("failed to write perf data, error: %m\n");
		return -1;
53
	}
54

55
	rec->bytes_written += size;
56
	return 0;
57 58
}

59
static int process_synthesized_event(struct perf_tool *tool,
60
				     union perf_event *event,
61 62
				     struct perf_sample *sample __maybe_unused,
				     struct machine *machine __maybe_unused)
63
{
64 65
	struct record *rec = container_of(tool, struct record, tool);
	return record__write(rec, event, event->header.size);
66 67
}

68
static int record__mmap_read(struct record *rec, struct perf_mmap *md)
69
{
70
	unsigned int head = perf_mmap__read_head(md);
71
	unsigned int old = md->prev;
J
Jiri Olsa 已提交
72
	unsigned char *data = md->base + page_size;
73 74
	unsigned long size;
	void *buf;
75
	int rc = 0;
76

77
	if (old == head)
78
		return 0;
79

80
	rec->samples++;
81 82 83 84 85 86 87

	size = head - old;

	if ((old & md->mask) + size != (head & md->mask)) {
		buf = &data[old & md->mask];
		size = md->mask + 1 - (old & md->mask);
		old += size;
88

89
		if (record__write(rec, buf, size) < 0) {
90 91 92
			rc = -1;
			goto out;
		}
93 94 95 96 97
	}

	buf = &data[old & md->mask];
	size = head - old;
	old += size;
98

99
	if (record__write(rec, buf, size) < 0) {
100 101 102
		rc = -1;
		goto out;
	}
103 104

	md->prev = old;
105
	perf_mmap__write_tail(md, old);
106 107 108

out:
	return rc;
109 110 111
}

static volatile int done = 0;
112
static volatile int signr = -1;
113
static volatile int child_finished = 0;
114

115
static void sig_handler(int sig)
116
{
117 118
	if (sig == SIGCHLD)
		child_finished = 1;
119 120
	else
		signr = sig;
121

122
	done = 1;
123 124
}

125
static void record__sig_exit(void)
126
{
127
	if (signr == -1)
128 129 130
		return;

	signal(signr, SIG_DFL);
131
	raise(signr);
132 133
}

134
static int record__open(struct record *rec)
135
{
136
	char msg[512];
137
	struct perf_evsel *pos;
138 139
	struct perf_evlist *evlist = rec->evlist;
	struct perf_session *session = rec->session;
140
	struct record_opts *opts = &rec->opts;
141
	int rc = 0;
142

143
	perf_evlist__config(evlist, opts);
144

145
	evlist__for_each(evlist, pos) {
146
try_again:
147
		if (perf_evsel__open(pos, evlist->cpus, evlist->threads) < 0) {
148
			if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
149
				if (verbose)
150
					ui__warning("%s\n", msg);
151 152
				goto try_again;
			}
153

154 155 156 157
			rc = -errno;
			perf_evsel__open_strerror(pos, &opts->target,
						  errno, msg, sizeof(msg));
			ui__error("%s\n", msg);
158
			goto out;
L
Li Zefan 已提交
159 160
		}
	}
161

162
	if (perf_evlist__apply_filters(evlist)) {
163 164
		error("failed to set filter with %d (%s)\n", errno,
			strerror(errno));
165 166
		rc = -1;
		goto out;
167 168
	}

169
	if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) {
170 171 172 173 174
		if (errno == EPERM) {
			pr_err("Permission error mapping pages.\n"
			       "Consider increasing "
			       "/proc/sys/kernel/perf_event_mlock_kb,\n"
			       "or try again with a smaller value of -m/--mmap_pages.\n"
175
			       "(current value: %u)\n", opts->mmap_pages);
176 177 178 179 180 181
			rc = -errno;
		} else {
			pr_err("failed to mmap with %d (%s)\n", errno, strerror(errno));
			rc = -errno;
		}
		goto out;
182
	}
183

184
	session->evlist = evlist;
185
	perf_session__set_id_hdr_size(session);
186 187
out:
	return rc;
188 189
}

190
static int process_buildids(struct record *rec)
191
{
192 193
	struct perf_data_file *file  = &rec->file;
	struct perf_session *session = rec->session;
194
	u64 start = session->header.data_offset;
195

196
	u64 size = lseek(file->fd, 0, SEEK_CUR);
197 198 199
	if (size == 0)
		return 0;

200 201
	return __perf_session__process_events(session, start,
					      size - start,
202 203 204
					      size, &build_id__mark_dso_hit_ops);
}

205
static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
206 207
{
	int err;
208
	struct perf_tool *tool = data;
209 210 211 212 213 214 215 216
	/*
	 *As for guest kernel when processing subcommand record&report,
	 *we arrange module mmap prior to guest kernel mmap and trigger
	 *a preload dso because default guest module symbols are loaded
	 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
	 *method is used to avoid symbol missing when the first addr is
	 *in module instead of in guest kernel.
	 */
217
	err = perf_event__synthesize_modules(tool, process_synthesized_event,
218
					     machine);
219 220
	if (err < 0)
		pr_err("Couldn't record guest kernel [%d]'s reference"
221
		       " relocation symbol.\n", machine->pid);
222 223 224 225 226

	/*
	 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
	 * have no _text sometimes.
	 */
227
	err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
228
						 machine);
229 230
	if (err < 0)
		pr_err("Couldn't record guest kernel [%d]'s reference"
231
		       " relocation symbol.\n", machine->pid);
232 233
}

234 235 236 237 238
static struct perf_event_header finished_round_event = {
	.size = sizeof(struct perf_event_header),
	.type = PERF_RECORD_FINISHED_ROUND,
};

239
static int record__mmap_read_all(struct record *rec)
240
{
241
	int i;
242
	int rc = 0;
243

244
	for (i = 0; i < rec->evlist->nr_mmaps; i++) {
245
		if (rec->evlist->mmap[i].base) {
246
			if (record__mmap_read(rec, &rec->evlist->mmap[i]) != 0) {
247 248 249 250
				rc = -1;
				goto out;
			}
		}
251 252
	}

253
	if (perf_header__has_feat(&rec->session->header, HEADER_TRACING_DATA))
254
		rc = record__write(rec, &finished_round_event, sizeof(finished_round_event));
255 256 257

out:
	return rc;
258 259
}

260
static void record__init_features(struct record *rec)
261 262 263 264 265 266 267 268 269 270
{
	struct perf_session *session = rec->session;
	int feat;

	for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
		perf_header__set_feat(&session->header, feat);

	if (rec->no_buildid)
		perf_header__clear_feat(&session->header, HEADER_BUILD_ID);

271
	if (!have_tracepoints(&rec->evlist->entries))
272 273 274 275 276 277
		perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);

	if (!rec->opts.branch_stack)
		perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
}

278 279 280 281 282 283 284
static volatile int workload_exec_errno;

/*
 * perf_evlist__prepare_workload will send a SIGUSR1
 * if the fork fails, since we asked by setting its
 * want_signal to true.
 */
285 286
static void workload_exec_failed_signal(int signo __maybe_unused,
					siginfo_t *info,
287 288 289 290 291 292 293
					void *ucontext __maybe_unused)
{
	workload_exec_errno = info->si_value.sival_int;
	done = 1;
	child_finished = 1;
}

294
static int __cmd_record(struct record *rec, int argc, const char **argv)
295
{
296
	int err;
297
	int status = 0;
298
	unsigned long waking = 0;
299
	const bool forks = argc > 0;
300
	struct machine *machine;
301
	struct perf_tool *tool = &rec->tool;
302
	struct record_opts *opts = &rec->opts;
303
	struct perf_data_file *file = &rec->file;
304
	struct perf_session *session;
305
	bool disabled = false;
306

307
	rec->progname = argv[0];
308

309
	atexit(record__sig_exit);
310 311
	signal(SIGCHLD, sig_handler);
	signal(SIGINT, sig_handler);
312
	signal(SIGTERM, sig_handler);
313

314
	session = perf_session__new(file, false, NULL);
315
	if (session == NULL) {
A
Adrien BAK 已提交
316
		pr_err("Perf session creation failed.\n");
317 318 319
		return -1;
	}

320 321
	rec->session = session;

322
	record__init_features(rec);
323

324
	if (forks) {
325
		err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
326
						    argv, file->is_pipe,
327
						    workload_exec_failed_signal);
328 329
		if (err < 0) {
			pr_err("Couldn't run the workload!\n");
330
			status = err;
331
			goto out_delete_session;
332 333 334
		}
	}

335
	if (record__open(rec) != 0) {
336
		err = -1;
337
		goto out_child;
338
	}
339

340
	if (!rec->evlist->nr_groups)
341 342
		perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);

343 344
	if (file->is_pipe) {
		err = perf_header__write_pipe(file->fd);
345
		if (err < 0)
346
			goto out_child;
347
	} else {
348
		err = perf_session__write_header(session, rec->evlist,
349
						 file->fd, false);
350
		if (err < 0)
351
			goto out_child;
352 353
	}

354
	if (!rec->no_buildid
355
	    && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
356
		pr_err("Couldn't generate buildids. "
357
		       "Use --no-buildid to profile anyway.\n");
358
		err = -1;
359
		goto out_child;
360 361
	}

362
	machine = &session->machines.host;
363

364
	if (file->is_pipe) {
365
		err = perf_event__synthesize_attrs(tool, session,
366
						   process_synthesized_event);
367 368
		if (err < 0) {
			pr_err("Couldn't synthesize attrs.\n");
369
			goto out_child;
370
		}
371

372
		if (have_tracepoints(&rec->evlist->entries)) {
373 374 375 376 377 378 379 380
			/*
			 * FIXME err <= 0 here actually means that
			 * there were no tracepoints so its not really
			 * an error, just that we don't need to
			 * synthesize anything.  We really have to
			 * return this more properly and also
			 * propagate errors that now are calling die()
			 */
381
			err = perf_event__synthesize_tracing_data(tool, file->fd, rec->evlist,
382
								  process_synthesized_event);
383 384
			if (err <= 0) {
				pr_err("Couldn't record tracing data.\n");
385
				goto out_child;
386
			}
387
			rec->bytes_written += err;
388
		}
389 390
	}

391
	err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
392
						 machine);
393 394 395 396
	if (err < 0)
		pr_err("Couldn't record kernel reference relocation symbol\n"
		       "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
		       "Check /proc/kallsyms permission or run as root.\n");
397

398
	err = perf_event__synthesize_modules(tool, process_synthesized_event,
399
					     machine);
400 401 402 403 404
	if (err < 0)
		pr_err("Couldn't record kernel module information.\n"
		       "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
		       "Check /proc/modules permission or run as root.\n");

405
	if (perf_guest) {
406 407
		machines__process_guests(&session->machines,
					 perf_event__synthesize_guest_os, tool);
408
	}
409

410
	err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
411
					    process_synthesized_event, opts->sample_address);
412
	if (err != 0)
413
		goto out_child;
414

415
	if (rec->realtime_prio) {
416 417
		struct sched_param param;

418
		param.sched_priority = rec->realtime_prio;
419
		if (sched_setscheduler(0, SCHED_FIFO, &param)) {
420
			pr_err("Could not set realtime priority.\n");
421
			err = -1;
422
			goto out_child;
423 424 425
		}
	}

426 427 428 429 430
	/*
	 * When perf is starting the traced process, all the events
	 * (apart from group members) have enable_on_exec=1 set,
	 * so don't spoil it by prematurely enabling them.
	 */
431
	if (!target__none(&opts->target) && !opts->initial_delay)
432
		perf_evlist__enable(rec->evlist);
433

434 435 436
	/*
	 * Let the child rip
	 */
437
	if (forks)
438
		perf_evlist__start_workload(rec->evlist);
439

440 441 442 443 444
	if (opts->initial_delay) {
		usleep(opts->initial_delay * 1000);
		perf_evlist__enable(rec->evlist);
	}

445
	for (;;) {
446
		int hits = rec->samples;
447

448
		if (record__mmap_read_all(rec) < 0) {
449
			err = -1;
450
			goto out_child;
451
		}
452

453
		if (hits == rec->samples) {
454 455
			if (done)
				break;
456
			err = poll(rec->evlist->pollfd, rec->evlist->nr_fds, -1);
457 458 459 460 461
			/*
			 * Propagate error, only if there's any. Ignore positive
			 * number of returned events and interrupt error.
			 */
			if (err > 0 || (err < 0 && errno == EINTR))
462
				err = 0;
463 464 465
			waking++;
		}

466 467 468 469 470
		/*
		 * When perf is starting the traced process, at the end events
		 * die with the process and we wait for that. Thus no need to
		 * disable events in this case.
		 */
471
		if (done && !disabled && !target__none(&opts->target)) {
472
			perf_evlist__disable(rec->evlist);
473 474
			disabled = true;
		}
475 476
	}

477 478 479 480 481
	if (forks && workload_exec_errno) {
		char msg[512];
		const char *emsg = strerror_r(workload_exec_errno, msg, sizeof(msg));
		pr_err("Workload failed: %s\n", emsg);
		err = -1;
482
		goto out_child;
483 484
	}

485 486
	if (!quiet) {
		fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
487

488 489 490 491 492 493 494 495 496
		/*
		 * Approximate RIP event size: 24 bytes.
		 */
		fprintf(stderr,
			"[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n",
			(double)rec->bytes_written / 1024.0 / 1024.0,
			file->path,
			rec->bytes_written / 24);
	}
497

498 499 500
out_child:
	if (forks) {
		int exit_status;
501

502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523
		if (!child_finished)
			kill(rec->evlist->workload.pid, SIGTERM);

		wait(&exit_status);

		if (err < 0)
			status = err;
		else if (WIFEXITED(exit_status))
			status = WEXITSTATUS(exit_status);
		else if (WIFSIGNALED(exit_status))
			signr = WTERMSIG(exit_status);
	} else
		status = err;

	if (!err && !file->is_pipe) {
		rec->session->header.data_size += rec->bytes_written;

		if (!rec->no_buildid)
			process_buildids(rec);
		perf_session__write_header(rec->session, rec->evlist,
					   file->fd, true);
	}
524 525 526

out_delete_session:
	perf_session__delete(session);
527
	return status;
528
}
529

530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547
#define BRANCH_OPT(n, m) \
	{ .name = n, .mode = (m) }

#define BRANCH_END { .name = NULL }

struct branch_mode {
	const char *name;
	int mode;
};

static const struct branch_mode branch_modes[] = {
	BRANCH_OPT("u", PERF_SAMPLE_BRANCH_USER),
	BRANCH_OPT("k", PERF_SAMPLE_BRANCH_KERNEL),
	BRANCH_OPT("hv", PERF_SAMPLE_BRANCH_HV),
	BRANCH_OPT("any", PERF_SAMPLE_BRANCH_ANY),
	BRANCH_OPT("any_call", PERF_SAMPLE_BRANCH_ANY_CALL),
	BRANCH_OPT("any_ret", PERF_SAMPLE_BRANCH_ANY_RETURN),
	BRANCH_OPT("ind_call", PERF_SAMPLE_BRANCH_IND_CALL),
548 549 550
	BRANCH_OPT("abort_tx", PERF_SAMPLE_BRANCH_ABORT_TX),
	BRANCH_OPT("in_tx", PERF_SAMPLE_BRANCH_IN_TX),
	BRANCH_OPT("no_tx", PERF_SAMPLE_BRANCH_NO_TX),
551
	BRANCH_OPT("cond", PERF_SAMPLE_BRANCH_COND),
552 553 554 555
	BRANCH_END
};

static int
556
parse_branch_stack(const struct option *opt, const char *str, int unset)
557 558 559 560 561 562 563 564
{
#define ONLY_PLM \
	(PERF_SAMPLE_BRANCH_USER	|\
	 PERF_SAMPLE_BRANCH_KERNEL	|\
	 PERF_SAMPLE_BRANCH_HV)

	uint64_t *mode = (uint64_t *)opt->value;
	const struct branch_mode *br;
565
	char *s, *os = NULL, *p;
566 567
	int ret = -1;

568 569
	if (unset)
		return 0;
570

571 572 573 574
	/*
	 * cannot set it twice, -b + --branch-filter for instance
	 */
	if (*mode)
575 576
		return -1;

577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597
	/* str may be NULL in case no arg is passed to -b */
	if (str) {
		/* because str is read-only */
		s = os = strdup(str);
		if (!s)
			return -1;

		for (;;) {
			p = strchr(s, ',');
			if (p)
				*p = '\0';

			for (br = branch_modes; br->name; br++) {
				if (!strcasecmp(s, br->name))
					break;
			}
			if (!br->name) {
				ui__warning("unknown branch filter %s,"
					    " check man page\n", s);
				goto error;
			}
598

599
			*mode |= br->mode;
600

601 602
			if (!p)
				break;
603

604 605
			s = p + 1;
		}
606 607 608
	}
	ret = 0;

609
	/* default to any branch */
610
	if ((*mode & ~ONLY_PLM) == 0) {
611
		*mode = PERF_SAMPLE_BRANCH_ANY;
612 613 614 615 616 617
	}
error:
	free(os);
	return ret;
}

618
#ifdef HAVE_DWARF_UNWIND_SUPPORT
619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643
static int get_stack_size(char *str, unsigned long *_size)
{
	char *endptr;
	unsigned long size;
	unsigned long max_size = round_down(USHRT_MAX, sizeof(u64));

	size = strtoul(str, &endptr, 0);

	do {
		if (*endptr)
			break;

		size = round_up(size, sizeof(u64));
		if (!size || size > max_size)
			break;

		*_size = size;
		return 0;

	} while (0);

	pr_err("callchain: Incorrect stack dump size (max %ld): %s\n",
	       max_size, str);
	return -1;
}
644
#endif /* HAVE_DWARF_UNWIND_SUPPORT */
645

646
int record_parse_callchain(const char *arg, struct record_opts *opts)
647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665
{
	char *tok, *name, *saveptr = NULL;
	char *buf;
	int ret = -1;

	/* We need buffer that we know we can write to. */
	buf = malloc(strlen(arg) + 1);
	if (!buf)
		return -ENOMEM;

	strcpy(buf, arg);

	tok = strtok_r((char *)buf, ",", &saveptr);
	name = tok ? : (char *)buf;

	do {
		/* Framepointer style */
		if (!strncmp(name, "fp", sizeof("fp"))) {
			if (!strtok_r(NULL, ",", &saveptr)) {
666
				opts->call_graph = CALLCHAIN_FP;
667 668 669 670 671 672
				ret = 0;
			} else
				pr_err("callchain: No more arguments "
				       "needed for -g fp\n");
			break;

673
#ifdef HAVE_DWARF_UNWIND_SUPPORT
674 675
		/* Dwarf style */
		} else if (!strncmp(name, "dwarf", sizeof("dwarf"))) {
676 677
			const unsigned long default_stack_dump_size = 8192;

678
			ret = 0;
679 680
			opts->call_graph = CALLCHAIN_DWARF;
			opts->stack_dump_size = default_stack_dump_size;
681 682 683 684 685 686

			tok = strtok_r(NULL, ",", &saveptr);
			if (tok) {
				unsigned long size = 0;

				ret = get_stack_size(tok, &size);
687
				opts->stack_dump_size = size;
688
			}
689
#endif /* HAVE_DWARF_UNWIND_SUPPORT */
690
		} else {
J
Jiri Olsa 已提交
691
			pr_err("callchain: Unknown --call-graph option "
692 693 694 695 696 697 698
			       "value: %s\n", arg);
			break;
		}

	} while (0);

	free(buf);
J
Jiri Olsa 已提交
699 700 701
	return ret;
}

702
static void callchain_debug(struct record_opts *opts)
J
Jiri Olsa 已提交
703
{
704 705 706
	static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF" };

	pr_debug("callchain: type %s\n", str[opts->call_graph]);
707

J
Jiri Olsa 已提交
708 709 710 711 712 713 714 715 716
	if (opts->call_graph == CALLCHAIN_DWARF)
		pr_debug("callchain: stack dump size %d\n",
			 opts->stack_dump_size);
}

int record_parse_callchain_opt(const struct option *opt,
			       const char *arg,
			       int unset)
{
717
	struct record_opts *opts = opt->value;
J
Jiri Olsa 已提交
718 719
	int ret;

720 721
	opts->call_graph_enabled = !unset;

J
Jiri Olsa 已提交
722 723 724 725 726 727 728 729
	/* --no-call-graph */
	if (unset) {
		opts->call_graph = CALLCHAIN_NONE;
		pr_debug("callchain: disabled\n");
		return 0;
	}

	ret = record_parse_callchain(arg, opts);
730
	if (!ret)
J
Jiri Olsa 已提交
731
		callchain_debug(opts);
732 733 734 735

	return ret;
}

J
Jiri Olsa 已提交
736 737 738 739
int record_callchain_opt(const struct option *opt,
			 const char *arg __maybe_unused,
			 int unset __maybe_unused)
{
740
	struct record_opts *opts = opt->value;
J
Jiri Olsa 已提交
741

742 743
	opts->call_graph_enabled = !unset;

J
Jiri Olsa 已提交
744 745 746 747 748 749 750
	if (opts->call_graph == CALLCHAIN_NONE)
		opts->call_graph = CALLCHAIN_FP;

	callchain_debug(opts);
	return 0;
}

751 752 753 754 755 756 757 758 759 760
static int perf_record_config(const char *var, const char *value, void *cb)
{
	struct record *rec = cb;

	if (!strcmp(var, "record.call-graph"))
		return record_parse_callchain(value, &rec->opts);

	return perf_default_config(var, value, cb);
}

761
static const char * const record_usage[] = {
762 763
	"perf record [<options>] [<command>]",
	"perf record [<options>] -- <command> [<options>]",
764 765 766
	NULL
};

767
/*
768 769
 * XXX Ideally would be local to cmd_record() and passed to a record__new
 * because we need to have access to it in record__exit, that is called
770 771 772 773 774 775 776
 * after cmd_record() exits, but since record_options need to be accessible to
 * builtin-script, leave it here.
 *
 * At least we don't ouch it in all the other functions here directly.
 *
 * Just say no to tons of global variables, sigh.
 */
777
static struct record record = {
778 779 780 781
	.opts = {
		.mmap_pages	     = UINT_MAX,
		.user_freq	     = UINT_MAX,
		.user_interval	     = ULLONG_MAX,
782
		.freq		     = 4000,
N
Namhyung Kim 已提交
783 784
		.target		     = {
			.uses_mmap   = true,
785
			.default_per_cpu = true,
N
Namhyung Kim 已提交
786
		},
787 788
	},
};
789

J
Jiri Olsa 已提交
790
#define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace) recording: "
791

792
#ifdef HAVE_DWARF_UNWIND_SUPPORT
J
Jiri Olsa 已提交
793
const char record_callchain_help[] = CALLCHAIN_HELP "fp dwarf";
794
#else
J
Jiri Olsa 已提交
795
const char record_callchain_help[] = CALLCHAIN_HELP "fp";
796 797
#endif

798 799 800
/*
 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
 * with it and switch to use the library functions in perf_evlist that came
801
 * from builtin-record.c, i.e. use record_opts,
802 803 804
 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
 * using pipes, etc.
 */
805
const struct option record_options[] = {
806
	OPT_CALLBACK('e', "event", &record.evlist, "event",
807
		     "event selector. use 'perf list' to list available events",
808
		     parse_events_option),
809
	OPT_CALLBACK(0, "filter", &record.evlist, "filter",
L
Li Zefan 已提交
810
		     "event filter", parse_filter),
811
	OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
812
		    "record events on existing process id"),
813
	OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
814
		    "record events on existing thread id"),
815
	OPT_INTEGER('r', "realtime", &record.realtime_prio,
816
		    "collect data with this RT SCHED_FIFO priority"),
817
	OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
818
		    "collect data without buffering"),
819
	OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
820
		    "collect raw sample records from all opened counters"),
821
	OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
822
			    "system-wide collection from all CPUs"),
823
	OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
824
		    "list of cpus to monitor"),
825
	OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
826
	OPT_STRING('o', "output", &record.file.path, "file",
I
Ingo Molnar 已提交
827
		    "output file name"),
828 829 830
	OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
			&record.opts.no_inherit_set,
			"child tasks do not inherit counters"),
831
	OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
832 833 834
	OPT_CALLBACK('m', "mmap-pages", &record.opts.mmap_pages, "pages",
		     "number of mmap data pages",
		     perf_evlist__parse_mmap_pages),
835
	OPT_BOOLEAN(0, "group", &record.opts.group,
836
		    "put the counters into a counter group"),
J
Jiri Olsa 已提交
837 838 839 840 841 842
	OPT_CALLBACK_NOOPT('g', NULL, &record.opts,
			   NULL, "enables call-graph recording" ,
			   &record_callchain_opt),
	OPT_CALLBACK(0, "call-graph", &record.opts,
		     "mode[,dump_size]", record_callchain_help,
		     &record_parse_callchain_opt),
843
	OPT_INCR('v', "verbose", &verbose,
844
		    "be more verbose (show counter open errors, etc)"),
845
	OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
846
	OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
847
		    "per thread counts"),
848
	OPT_BOOLEAN('d', "data", &record.opts.sample_address,
849
		    "Sample addresses"),
850
	OPT_BOOLEAN('T', "timestamp", &record.opts.sample_time, "Sample timestamps"),
851
	OPT_BOOLEAN('P', "period", &record.opts.period, "Sample period"),
852
	OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
853
		    "don't sample"),
854
	OPT_BOOLEAN('N', "no-buildid-cache", &record.no_buildid_cache,
855
		    "do not update the buildid cache"),
856
	OPT_BOOLEAN('B', "no-buildid", &record.no_buildid,
857
		    "do not collect buildids in perf.data"),
858
	OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
S
Stephane Eranian 已提交
859 860
		     "monitor event in cgroup name only",
		     parse_cgroups),
861
	OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
862
		  "ms to wait before starting measurement after program start"),
863 864
	OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
		   "user to profile"),
865 866 867 868 869 870 871

	OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
		     "branch any", "sample any taken branches",
		     parse_branch_stack),

	OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
		     "branch filter mask", "branch stack filter modes",
872
		     parse_branch_stack),
873 874
	OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
		    "sample by weight (on special events only)"),
875 876
	OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
		    "sample transaction flags (special events only)"),
877 878
	OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
		    "use per-thread mmaps"),
879 880 881
	OPT_END()
};

882
int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
883
{
884
	int err = -ENOMEM;
885
	struct record *rec = &record;
886
	char errbuf[BUFSIZ];
887

888 889
	rec->evlist = perf_evlist__new();
	if (rec->evlist == NULL)
890 891
		return -ENOMEM;

892 893
	perf_config(perf_record_config, rec);

894
	argc = parse_options(argc, argv, record_options, record_usage,
895
			    PARSE_OPT_STOP_AT_NON_OPTION);
896
	if (!argc && target__none(&rec->opts.target))
897
		usage_with_options(record_usage, record_options);
898

899
	if (nr_cgroups && !rec->opts.target.system_wide) {
900 901
		ui__error("cgroup monitoring only available in"
			  " system-wide mode\n");
S
Stephane Eranian 已提交
902 903 904
		usage_with_options(record_usage, record_options);
	}

905
	symbol__init();
906

907
	if (symbol_conf.kptr_restrict)
908 909 910 911 912 913 914 915
		pr_warning(
"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
"check /proc/sys/kernel/kptr_restrict.\n\n"
"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
"file is not found in the buildid cache or in the vmlinux path.\n\n"
"Samples in kernel modules won't be resolved at all.\n\n"
"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
"even with a suitable vmlinux or kallsyms file.\n\n");
916

917
	if (rec->no_buildid_cache || rec->no_buildid)
918
		disable_buildid_cache();
919

920 921
	if (rec->evlist->nr_entries == 0 &&
	    perf_evlist__add_default(rec->evlist) < 0) {
922 923
		pr_err("Not enough memory for event selector list\n");
		goto out_symbol_exit;
924
	}
925

926 927 928
	if (rec->opts.target.tid && !rec->opts.no_inherit_set)
		rec->opts.no_inherit = true;

929
	err = target__validate(&rec->opts.target);
930
	if (err) {
931
		target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
932 933 934
		ui__warning("%s", errbuf);
	}

935
	err = target__parse_uid(&rec->opts.target);
936 937
	if (err) {
		int saved_errno = errno;
938

939
		target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
940
		ui__error("%s", errbuf);
941 942

		err = -saved_errno;
943
		goto out_symbol_exit;
944
	}
945

946
	err = -ENOMEM;
947
	if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
948
		usage_with_options(record_usage, record_options);
949

950
	if (record_opts__config(&rec->opts)) {
951
		err = -EINVAL;
952
		goto out_symbol_exit;
953 954
	}

955
	err = __cmd_record(&record, argc, argv);
956
out_symbol_exit:
957
	perf_evlist__delete(rec->evlist);
958
	symbol__exit();
959
	return err;
960
}