builtin-record.c 24.1 KB
Newer Older
I
Ingo Molnar 已提交
1
/*
2 3 4 5 6
 * builtin-record.c
 *
 * Builtin record command: Record the profile of a workload
 * (or a CPU, or a PID) into the perf.data output file - for
 * later analysis via perf report.
I
Ingo Molnar 已提交
7
 */
8
#include "builtin.h"
9 10 11

#include "perf.h"

12
#include "util/build-id.h"
13
#include "util/util.h"
14
#include "util/parse-options.h"
15
#include "util/parse-events.h"
16

17
#include "util/header.h"
18
#include "util/event.h"
19
#include "util/evlist.h"
20
#include "util/evsel.h"
21
#include "util/debug.h"
22
#include "util/session.h"
23
#include "util/tool.h"
24
#include "util/symbol.h"
25
#include "util/cpumap.h"
26
#include "util/thread_map.h"
27
#include "util/data.h"
28

29
#include <unistd.h>
30
#include <sched.h>
31
#include <sys/mman.h>
32

33

34
struct record {
35
	struct perf_tool	tool;
36
	struct record_opts	opts;
37
	u64			bytes_written;
38
	struct perf_data_file	file;
39 40 41 42 43 44 45
	struct perf_evlist	*evlist;
	struct perf_session	*session;
	const char		*progname;
	int			realtime_prio;
	bool			no_buildid;
	bool			no_buildid_cache;
	long			samples;
46
};
47

48
static int record__write(struct record *rec, void *bf, size_t size)
49
{
50
	if (perf_data_file__write(rec->session->file, bf, size) < 0) {
51 52
		pr_err("failed to write perf data, error: %m\n");
		return -1;
53
	}
54

55
	rec->bytes_written += size;
56
	return 0;
57 58
}

59
static int process_synthesized_event(struct perf_tool *tool,
60
				     union perf_event *event,
61 62
				     struct perf_sample *sample __maybe_unused,
				     struct machine *machine __maybe_unused)
63
{
64 65
	struct record *rec = container_of(tool, struct record, tool);
	return record__write(rec, event, event->header.size);
66 67
}

68
static int record__mmap_read(struct record *rec, struct perf_mmap *md)
69
{
70
	unsigned int head = perf_mmap__read_head(md);
71
	unsigned int old = md->prev;
J
Jiri Olsa 已提交
72
	unsigned char *data = md->base + page_size;
73 74
	unsigned long size;
	void *buf;
75
	int rc = 0;
76

77
	if (old == head)
78
		return 0;
79

80
	rec->samples++;
81 82 83 84 85 86 87

	size = head - old;

	if ((old & md->mask) + size != (head & md->mask)) {
		buf = &data[old & md->mask];
		size = md->mask + 1 - (old & md->mask);
		old += size;
88

89
		if (record__write(rec, buf, size) < 0) {
90 91 92
			rc = -1;
			goto out;
		}
93 94 95 96 97
	}

	buf = &data[old & md->mask];
	size = head - old;
	old += size;
98

99
	if (record__write(rec, buf, size) < 0) {
100 101 102
		rc = -1;
		goto out;
	}
103 104

	md->prev = old;
105
	perf_mmap__write_tail(md, old);
106 107 108

out:
	return rc;
109 110 111
}

static volatile int done = 0;
112
static volatile int signr = -1;
113
static volatile int child_finished = 0;
114

115
static void sig_handler(int sig)
116
{
117 118
	if (sig == SIGCHLD)
		child_finished = 1;
119 120
	else
		signr = sig;
121

122
	done = 1;
123 124
}

125
static void record__sig_exit(void)
126
{
127
	if (signr == -1)
128 129 130
		return;

	signal(signr, SIG_DFL);
131
	raise(signr);
132 133
}

134
static int record__open(struct record *rec)
135
{
136
	char msg[512];
137
	struct perf_evsel *pos;
138 139
	struct perf_evlist *evlist = rec->evlist;
	struct perf_session *session = rec->session;
140
	struct record_opts *opts = &rec->opts;
141
	int rc = 0;
142

143
	perf_evlist__config(evlist, opts);
144

145
	evlist__for_each(evlist, pos) {
146
try_again:
147
		if (perf_evsel__open(pos, evlist->cpus, evlist->threads) < 0) {
148
			if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
149
				if (verbose)
150
					ui__warning("%s\n", msg);
151 152
				goto try_again;
			}
153

154 155 156 157
			rc = -errno;
			perf_evsel__open_strerror(pos, &opts->target,
						  errno, msg, sizeof(msg));
			ui__error("%s\n", msg);
158
			goto out;
L
Li Zefan 已提交
159 160
		}
	}
161

162
	if (perf_evlist__apply_filters(evlist)) {
163 164
		error("failed to set filter with %d (%s)\n", errno,
			strerror(errno));
165 166
		rc = -1;
		goto out;
167 168
	}

169
	if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) {
170 171 172 173 174
		if (errno == EPERM) {
			pr_err("Permission error mapping pages.\n"
			       "Consider increasing "
			       "/proc/sys/kernel/perf_event_mlock_kb,\n"
			       "or try again with a smaller value of -m/--mmap_pages.\n"
175
			       "(current value: %u)\n", opts->mmap_pages);
176 177 178 179 180 181
			rc = -errno;
		} else {
			pr_err("failed to mmap with %d (%s)\n", errno, strerror(errno));
			rc = -errno;
		}
		goto out;
182
	}
183

184
	session->evlist = evlist;
185
	perf_session__set_id_hdr_size(session);
186 187
out:
	return rc;
188 189
}

190
static int process_buildids(struct record *rec)
191
{
192 193
	struct perf_data_file *file  = &rec->file;
	struct perf_session *session = rec->session;
194
	u64 start = session->header.data_offset;
195

196
	u64 size = lseek(file->fd, 0, SEEK_CUR);
197 198 199
	if (size == 0)
		return 0;

200 201
	return __perf_session__process_events(session, start,
					      size - start,
202 203 204
					      size, &build_id__mark_dso_hit_ops);
}

205
static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
206 207
{
	int err;
208
	struct perf_tool *tool = data;
209 210 211 212 213 214 215 216
	/*
	 *As for guest kernel when processing subcommand record&report,
	 *we arrange module mmap prior to guest kernel mmap and trigger
	 *a preload dso because default guest module symbols are loaded
	 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
	 *method is used to avoid symbol missing when the first addr is
	 *in module instead of in guest kernel.
	 */
217
	err = perf_event__synthesize_modules(tool, process_synthesized_event,
218
					     machine);
219 220
	if (err < 0)
		pr_err("Couldn't record guest kernel [%d]'s reference"
221
		       " relocation symbol.\n", machine->pid);
222 223 224 225 226

	/*
	 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
	 * have no _text sometimes.
	 */
227
	err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
228
						 machine);
229 230
	if (err < 0)
		pr_err("Couldn't record guest kernel [%d]'s reference"
231
		       " relocation symbol.\n", machine->pid);
232 233
}

234 235 236 237 238
static struct perf_event_header finished_round_event = {
	.size = sizeof(struct perf_event_header),
	.type = PERF_RECORD_FINISHED_ROUND,
};

239
static int record__mmap_read_all(struct record *rec)
240
{
241
	int i;
242
	int rc = 0;
243

244
	for (i = 0; i < rec->evlist->nr_mmaps; i++) {
245
		if (rec->evlist->mmap[i].base) {
246
			if (record__mmap_read(rec, &rec->evlist->mmap[i]) != 0) {
247 248 249 250
				rc = -1;
				goto out;
			}
		}
251 252
	}

253
	if (perf_header__has_feat(&rec->session->header, HEADER_TRACING_DATA))
254
		rc = record__write(rec, &finished_round_event, sizeof(finished_round_event));
255 256 257

out:
	return rc;
258 259
}

260
static void record__init_features(struct record *rec)
261 262 263 264 265 266 267 268 269 270
{
	struct perf_session *session = rec->session;
	int feat;

	for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
		perf_header__set_feat(&session->header, feat);

	if (rec->no_buildid)
		perf_header__clear_feat(&session->header, HEADER_BUILD_ID);

271
	if (!have_tracepoints(&rec->evlist->entries))
272 273 274 275 276 277
		perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);

	if (!rec->opts.branch_stack)
		perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
}

278 279 280 281 282 283 284
static volatile int workload_exec_errno;

/*
 * perf_evlist__prepare_workload will send a SIGUSR1
 * if the fork fails, since we asked by setting its
 * want_signal to true.
 */
285 286
static void workload_exec_failed_signal(int signo __maybe_unused,
					siginfo_t *info,
287 288 289 290 291 292 293
					void *ucontext __maybe_unused)
{
	workload_exec_errno = info->si_value.sival_int;
	done = 1;
	child_finished = 1;
}

294
static int __cmd_record(struct record *rec, int argc, const char **argv)
295
{
296
	int err;
297
	int status = 0;
298
	unsigned long waking = 0;
299
	const bool forks = argc > 0;
300
	struct machine *machine;
301
	struct perf_tool *tool = &rec->tool;
302
	struct record_opts *opts = &rec->opts;
303
	struct perf_data_file *file = &rec->file;
304
	struct perf_session *session;
305
	bool disabled = false;
306

307
	rec->progname = argv[0];
308

309
	atexit(record__sig_exit);
310 311
	signal(SIGCHLD, sig_handler);
	signal(SIGINT, sig_handler);
312
	signal(SIGTERM, sig_handler);
313

314
	session = perf_session__new(file, false, NULL);
315
	if (session == NULL) {
A
Adrien BAK 已提交
316
		pr_err("Perf session creation failed.\n");
317 318 319
		return -1;
	}

320 321
	rec->session = session;

322
	record__init_features(rec);
323

324
	if (forks) {
325
		err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
326
						    argv, file->is_pipe,
327
						    workload_exec_failed_signal);
328 329
		if (err < 0) {
			pr_err("Couldn't run the workload!\n");
330
			status = err;
331
			goto out_delete_session;
332 333 334
		}
	}

335
	if (record__open(rec) != 0) {
336
		err = -1;
337
		goto out_child;
338
	}
339

340
	if (!rec->evlist->nr_groups)
341 342
		perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);

343 344
	if (file->is_pipe) {
		err = perf_header__write_pipe(file->fd);
345
		if (err < 0)
346
			goto out_child;
347
	} else {
348
		err = perf_session__write_header(session, rec->evlist,
349
						 file->fd, false);
350
		if (err < 0)
351
			goto out_child;
352 353
	}

354
	if (!rec->no_buildid
355
	    && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
356
		pr_err("Couldn't generate buildids. "
357
		       "Use --no-buildid to profile anyway.\n");
358
		err = -1;
359
		goto out_child;
360 361
	}

362
	machine = &session->machines.host;
363

364
	if (file->is_pipe) {
365
		err = perf_event__synthesize_attrs(tool, session,
366
						   process_synthesized_event);
367 368
		if (err < 0) {
			pr_err("Couldn't synthesize attrs.\n");
369
			goto out_child;
370
		}
371

372
		if (have_tracepoints(&rec->evlist->entries)) {
373 374 375 376 377 378 379 380
			/*
			 * FIXME err <= 0 here actually means that
			 * there were no tracepoints so its not really
			 * an error, just that we don't need to
			 * synthesize anything.  We really have to
			 * return this more properly and also
			 * propagate errors that now are calling die()
			 */
381
			err = perf_event__synthesize_tracing_data(tool, file->fd, rec->evlist,
382
								  process_synthesized_event);
383 384
			if (err <= 0) {
				pr_err("Couldn't record tracing data.\n");
385
				goto out_child;
386
			}
387
			rec->bytes_written += err;
388
		}
389 390
	}

391
	err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
392
						 machine);
393 394 395 396
	if (err < 0)
		pr_err("Couldn't record kernel reference relocation symbol\n"
		       "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
		       "Check /proc/kallsyms permission or run as root.\n");
397

398
	err = perf_event__synthesize_modules(tool, process_synthesized_event,
399
					     machine);
400 401 402 403 404
	if (err < 0)
		pr_err("Couldn't record kernel module information.\n"
		       "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
		       "Check /proc/modules permission or run as root.\n");

405
	if (perf_guest) {
406 407
		machines__process_guests(&session->machines,
					 perf_event__synthesize_guest_os, tool);
408
	}
409

410
	err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
411
					    process_synthesized_event, opts->sample_address);
412
	if (err != 0)
413
		goto out_child;
414

415
	if (rec->realtime_prio) {
416 417
		struct sched_param param;

418
		param.sched_priority = rec->realtime_prio;
419
		if (sched_setscheduler(0, SCHED_FIFO, &param)) {
420
			pr_err("Could not set realtime priority.\n");
421
			err = -1;
422
			goto out_child;
423 424 425
		}
	}

426 427 428 429 430
	/*
	 * When perf is starting the traced process, all the events
	 * (apart from group members) have enable_on_exec=1 set,
	 * so don't spoil it by prematurely enabling them.
	 */
431
	if (!target__none(&opts->target) && !opts->initial_delay)
432
		perf_evlist__enable(rec->evlist);
433

434 435 436
	/*
	 * Let the child rip
	 */
437
	if (forks)
438
		perf_evlist__start_workload(rec->evlist);
439

440 441 442 443 444
	if (opts->initial_delay) {
		usleep(opts->initial_delay * 1000);
		perf_evlist__enable(rec->evlist);
	}

445
	for (;;) {
446
		int hits = rec->samples;
447

448
		if (record__mmap_read_all(rec) < 0) {
449
			err = -1;
450
			goto out_child;
451
		}
452

453
		if (hits == rec->samples) {
454 455
			if (done)
				break;
456
			err = poll(rec->evlist->pollfd, rec->evlist->nr_fds, -1);
457 458
			if (err < 0 && errno == EINTR)
				err = 0;
459 460 461
			waking++;
		}

462 463 464 465 466
		/*
		 * When perf is starting the traced process, at the end events
		 * die with the process and we wait for that. Thus no need to
		 * disable events in this case.
		 */
467
		if (done && !disabled && !target__none(&opts->target)) {
468
			perf_evlist__disable(rec->evlist);
469 470
			disabled = true;
		}
471 472
	}

473 474 475 476 477
	if (forks && workload_exec_errno) {
		char msg[512];
		const char *emsg = strerror_r(workload_exec_errno, msg, sizeof(msg));
		pr_err("Workload failed: %s\n", emsg);
		err = -1;
478
		goto out_child;
479 480
	}

481 482
	if (!quiet) {
		fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
483

484 485 486 487 488 489 490 491 492
		/*
		 * Approximate RIP event size: 24 bytes.
		 */
		fprintf(stderr,
			"[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n",
			(double)rec->bytes_written / 1024.0 / 1024.0,
			file->path,
			rec->bytes_written / 24);
	}
493

494 495 496
out_child:
	if (forks) {
		int exit_status;
497

498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519
		if (!child_finished)
			kill(rec->evlist->workload.pid, SIGTERM);

		wait(&exit_status);

		if (err < 0)
			status = err;
		else if (WIFEXITED(exit_status))
			status = WEXITSTATUS(exit_status);
		else if (WIFSIGNALED(exit_status))
			signr = WTERMSIG(exit_status);
	} else
		status = err;

	if (!err && !file->is_pipe) {
		rec->session->header.data_size += rec->bytes_written;

		if (!rec->no_buildid)
			process_buildids(rec);
		perf_session__write_header(rec->session, rec->evlist,
					   file->fd, true);
	}
520 521 522

out_delete_session:
	perf_session__delete(session);
523
	return status;
524
}
525

526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543
#define BRANCH_OPT(n, m) \
	{ .name = n, .mode = (m) }

#define BRANCH_END { .name = NULL }

struct branch_mode {
	const char *name;
	int mode;
};

static const struct branch_mode branch_modes[] = {
	BRANCH_OPT("u", PERF_SAMPLE_BRANCH_USER),
	BRANCH_OPT("k", PERF_SAMPLE_BRANCH_KERNEL),
	BRANCH_OPT("hv", PERF_SAMPLE_BRANCH_HV),
	BRANCH_OPT("any", PERF_SAMPLE_BRANCH_ANY),
	BRANCH_OPT("any_call", PERF_SAMPLE_BRANCH_ANY_CALL),
	BRANCH_OPT("any_ret", PERF_SAMPLE_BRANCH_ANY_RETURN),
	BRANCH_OPT("ind_call", PERF_SAMPLE_BRANCH_IND_CALL),
544 545 546
	BRANCH_OPT("abort_tx", PERF_SAMPLE_BRANCH_ABORT_TX),
	BRANCH_OPT("in_tx", PERF_SAMPLE_BRANCH_IN_TX),
	BRANCH_OPT("no_tx", PERF_SAMPLE_BRANCH_NO_TX),
547 548 549 550
	BRANCH_END
};

static int
551
parse_branch_stack(const struct option *opt, const char *str, int unset)
552 553 554 555 556 557 558 559
{
#define ONLY_PLM \
	(PERF_SAMPLE_BRANCH_USER	|\
	 PERF_SAMPLE_BRANCH_KERNEL	|\
	 PERF_SAMPLE_BRANCH_HV)

	uint64_t *mode = (uint64_t *)opt->value;
	const struct branch_mode *br;
560
	char *s, *os = NULL, *p;
561 562
	int ret = -1;

563 564
	if (unset)
		return 0;
565

566 567 568 569
	/*
	 * cannot set it twice, -b + --branch-filter for instance
	 */
	if (*mode)
570 571
		return -1;

572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592
	/* str may be NULL in case no arg is passed to -b */
	if (str) {
		/* because str is read-only */
		s = os = strdup(str);
		if (!s)
			return -1;

		for (;;) {
			p = strchr(s, ',');
			if (p)
				*p = '\0';

			for (br = branch_modes; br->name; br++) {
				if (!strcasecmp(s, br->name))
					break;
			}
			if (!br->name) {
				ui__warning("unknown branch filter %s,"
					    " check man page\n", s);
				goto error;
			}
593

594
			*mode |= br->mode;
595

596 597
			if (!p)
				break;
598

599 600
			s = p + 1;
		}
601 602 603
	}
	ret = 0;

604
	/* default to any branch */
605
	if ((*mode & ~ONLY_PLM) == 0) {
606
		*mode = PERF_SAMPLE_BRANCH_ANY;
607 608 609 610 611 612
	}
error:
	free(os);
	return ret;
}

613
#ifdef HAVE_DWARF_UNWIND_SUPPORT
614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638
static int get_stack_size(char *str, unsigned long *_size)
{
	char *endptr;
	unsigned long size;
	unsigned long max_size = round_down(USHRT_MAX, sizeof(u64));

	size = strtoul(str, &endptr, 0);

	do {
		if (*endptr)
			break;

		size = round_up(size, sizeof(u64));
		if (!size || size > max_size)
			break;

		*_size = size;
		return 0;

	} while (0);

	pr_err("callchain: Incorrect stack dump size (max %ld): %s\n",
	       max_size, str);
	return -1;
}
639
#endif /* HAVE_DWARF_UNWIND_SUPPORT */
640

641
int record_parse_callchain(const char *arg, struct record_opts *opts)
642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660
{
	char *tok, *name, *saveptr = NULL;
	char *buf;
	int ret = -1;

	/* We need buffer that we know we can write to. */
	buf = malloc(strlen(arg) + 1);
	if (!buf)
		return -ENOMEM;

	strcpy(buf, arg);

	tok = strtok_r((char *)buf, ",", &saveptr);
	name = tok ? : (char *)buf;

	do {
		/* Framepointer style */
		if (!strncmp(name, "fp", sizeof("fp"))) {
			if (!strtok_r(NULL, ",", &saveptr)) {
661
				opts->call_graph = CALLCHAIN_FP;
662 663 664 665 666 667
				ret = 0;
			} else
				pr_err("callchain: No more arguments "
				       "needed for -g fp\n");
			break;

668
#ifdef HAVE_DWARF_UNWIND_SUPPORT
669 670
		/* Dwarf style */
		} else if (!strncmp(name, "dwarf", sizeof("dwarf"))) {
671 672
			const unsigned long default_stack_dump_size = 8192;

673
			ret = 0;
674 675
			opts->call_graph = CALLCHAIN_DWARF;
			opts->stack_dump_size = default_stack_dump_size;
676 677 678 679 680 681

			tok = strtok_r(NULL, ",", &saveptr);
			if (tok) {
				unsigned long size = 0;

				ret = get_stack_size(tok, &size);
682
				opts->stack_dump_size = size;
683
			}
684
#endif /* HAVE_DWARF_UNWIND_SUPPORT */
685
		} else {
J
Jiri Olsa 已提交
686
			pr_err("callchain: Unknown --call-graph option "
687 688 689 690 691 692 693
			       "value: %s\n", arg);
			break;
		}

	} while (0);

	free(buf);
J
Jiri Olsa 已提交
694 695 696
	return ret;
}

697
static void callchain_debug(struct record_opts *opts)
J
Jiri Olsa 已提交
698
{
699 700 701
	static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF" };

	pr_debug("callchain: type %s\n", str[opts->call_graph]);
702

J
Jiri Olsa 已提交
703 704 705 706 707 708 709 710 711
	if (opts->call_graph == CALLCHAIN_DWARF)
		pr_debug("callchain: stack dump size %d\n",
			 opts->stack_dump_size);
}

int record_parse_callchain_opt(const struct option *opt,
			       const char *arg,
			       int unset)
{
712
	struct record_opts *opts = opt->value;
J
Jiri Olsa 已提交
713 714
	int ret;

715 716
	opts->call_graph_enabled = !unset;

J
Jiri Olsa 已提交
717 718 719 720 721 722 723 724
	/* --no-call-graph */
	if (unset) {
		opts->call_graph = CALLCHAIN_NONE;
		pr_debug("callchain: disabled\n");
		return 0;
	}

	ret = record_parse_callchain(arg, opts);
725
	if (!ret)
J
Jiri Olsa 已提交
726
		callchain_debug(opts);
727 728 729 730

	return ret;
}

J
Jiri Olsa 已提交
731 732 733 734
int record_callchain_opt(const struct option *opt,
			 const char *arg __maybe_unused,
			 int unset __maybe_unused)
{
735
	struct record_opts *opts = opt->value;
J
Jiri Olsa 已提交
736

737 738
	opts->call_graph_enabled = !unset;

J
Jiri Olsa 已提交
739 740 741 742 743 744 745
	if (opts->call_graph == CALLCHAIN_NONE)
		opts->call_graph = CALLCHAIN_FP;

	callchain_debug(opts);
	return 0;
}

746 747 748 749 750 751 752 753 754 755
static int perf_record_config(const char *var, const char *value, void *cb)
{
	struct record *rec = cb;

	if (!strcmp(var, "record.call-graph"))
		return record_parse_callchain(value, &rec->opts);

	return perf_default_config(var, value, cb);
}

756
static const char * const record_usage[] = {
757 758
	"perf record [<options>] [<command>]",
	"perf record [<options>] -- <command> [<options>]",
759 760 761
	NULL
};

762
/*
763 764
 * XXX Ideally would be local to cmd_record() and passed to a record__new
 * because we need to have access to it in record__exit, that is called
765 766 767 768 769 770 771
 * after cmd_record() exits, but since record_options need to be accessible to
 * builtin-script, leave it here.
 *
 * At least we don't ouch it in all the other functions here directly.
 *
 * Just say no to tons of global variables, sigh.
 */
772
static struct record record = {
773 774 775 776
	.opts = {
		.mmap_pages	     = UINT_MAX,
		.user_freq	     = UINT_MAX,
		.user_interval	     = ULLONG_MAX,
777
		.freq		     = 4000,
N
Namhyung Kim 已提交
778 779
		.target		     = {
			.uses_mmap   = true,
780
			.default_per_cpu = true,
N
Namhyung Kim 已提交
781
		},
782 783
	},
};
784

J
Jiri Olsa 已提交
785
#define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace) recording: "
786

787
#ifdef HAVE_DWARF_UNWIND_SUPPORT
J
Jiri Olsa 已提交
788
const char record_callchain_help[] = CALLCHAIN_HELP "fp dwarf";
789
#else
J
Jiri Olsa 已提交
790
const char record_callchain_help[] = CALLCHAIN_HELP "fp";
791 792
#endif

793 794 795
/*
 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
 * with it and switch to use the library functions in perf_evlist that came
796
 * from builtin-record.c, i.e. use record_opts,
797 798 799
 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
 * using pipes, etc.
 */
800
const struct option record_options[] = {
801
	OPT_CALLBACK('e', "event", &record.evlist, "event",
802
		     "event selector. use 'perf list' to list available events",
803
		     parse_events_option),
804
	OPT_CALLBACK(0, "filter", &record.evlist, "filter",
L
Li Zefan 已提交
805
		     "event filter", parse_filter),
806
	OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
807
		    "record events on existing process id"),
808
	OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
809
		    "record events on existing thread id"),
810
	OPT_INTEGER('r', "realtime", &record.realtime_prio,
811
		    "collect data with this RT SCHED_FIFO priority"),
812
	OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
813
		    "collect data without buffering"),
814
	OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
815
		    "collect raw sample records from all opened counters"),
816
	OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
817
			    "system-wide collection from all CPUs"),
818
	OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
819
		    "list of cpus to monitor"),
820
	OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
821
	OPT_STRING('o', "output", &record.file.path, "file",
I
Ingo Molnar 已提交
822
		    "output file name"),
823 824 825
	OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
			&record.opts.no_inherit_set,
			"child tasks do not inherit counters"),
826
	OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
827 828 829
	OPT_CALLBACK('m', "mmap-pages", &record.opts.mmap_pages, "pages",
		     "number of mmap data pages",
		     perf_evlist__parse_mmap_pages),
830
	OPT_BOOLEAN(0, "group", &record.opts.group,
831
		    "put the counters into a counter group"),
J
Jiri Olsa 已提交
832 833 834 835 836 837
	OPT_CALLBACK_NOOPT('g', NULL, &record.opts,
			   NULL, "enables call-graph recording" ,
			   &record_callchain_opt),
	OPT_CALLBACK(0, "call-graph", &record.opts,
		     "mode[,dump_size]", record_callchain_help,
		     &record_parse_callchain_opt),
838
	OPT_INCR('v', "verbose", &verbose,
839
		    "be more verbose (show counter open errors, etc)"),
840
	OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
841
	OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
842
		    "per thread counts"),
843
	OPT_BOOLEAN('d', "data", &record.opts.sample_address,
844
		    "Sample addresses"),
845
	OPT_BOOLEAN('T', "timestamp", &record.opts.sample_time, "Sample timestamps"),
846
	OPT_BOOLEAN('P', "period", &record.opts.period, "Sample period"),
847
	OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
848
		    "don't sample"),
849
	OPT_BOOLEAN('N', "no-buildid-cache", &record.no_buildid_cache,
850
		    "do not update the buildid cache"),
851
	OPT_BOOLEAN('B', "no-buildid", &record.no_buildid,
852
		    "do not collect buildids in perf.data"),
853
	OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
S
Stephane Eranian 已提交
854 855
		     "monitor event in cgroup name only",
		     parse_cgroups),
856
	OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
857
		  "ms to wait before starting measurement after program start"),
858 859
	OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
		   "user to profile"),
860 861 862 863 864 865 866

	OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
		     "branch any", "sample any taken branches",
		     parse_branch_stack),

	OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
		     "branch filter mask", "branch stack filter modes",
867
		     parse_branch_stack),
868 869
	OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
		    "sample by weight (on special events only)"),
870 871
	OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
		    "sample transaction flags (special events only)"),
872 873
	OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
		    "use per-thread mmaps"),
874 875 876
	OPT_END()
};

877
int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
878
{
879
	int err = -ENOMEM;
880
	struct record *rec = &record;
881
	char errbuf[BUFSIZ];
882

883 884
	rec->evlist = perf_evlist__new();
	if (rec->evlist == NULL)
885 886
		return -ENOMEM;

887 888
	perf_config(perf_record_config, rec);

889
	argc = parse_options(argc, argv, record_options, record_usage,
890
			    PARSE_OPT_STOP_AT_NON_OPTION);
891
	if (!argc && target__none(&rec->opts.target))
892
		usage_with_options(record_usage, record_options);
893

894
	if (nr_cgroups && !rec->opts.target.system_wide) {
895 896
		ui__error("cgroup monitoring only available in"
			  " system-wide mode\n");
S
Stephane Eranian 已提交
897 898 899
		usage_with_options(record_usage, record_options);
	}

900
	symbol__init();
901

902
	if (symbol_conf.kptr_restrict)
903 904 905 906 907 908 909 910
		pr_warning(
"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
"check /proc/sys/kernel/kptr_restrict.\n\n"
"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
"file is not found in the buildid cache or in the vmlinux path.\n\n"
"Samples in kernel modules won't be resolved at all.\n\n"
"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
"even with a suitable vmlinux or kallsyms file.\n\n");
911

912
	if (rec->no_buildid_cache || rec->no_buildid)
913
		disable_buildid_cache();
914

915 916
	if (rec->evlist->nr_entries == 0 &&
	    perf_evlist__add_default(rec->evlist) < 0) {
917 918
		pr_err("Not enough memory for event selector list\n");
		goto out_symbol_exit;
919
	}
920

921 922 923
	if (rec->opts.target.tid && !rec->opts.no_inherit_set)
		rec->opts.no_inherit = true;

924
	err = target__validate(&rec->opts.target);
925
	if (err) {
926
		target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
927 928 929
		ui__warning("%s", errbuf);
	}

930
	err = target__parse_uid(&rec->opts.target);
931 932
	if (err) {
		int saved_errno = errno;
933

934
		target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
935
		ui__error("%s", errbuf);
936 937

		err = -saved_errno;
938
		goto out_symbol_exit;
939
	}
940

941
	err = -ENOMEM;
942
	if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
943
		usage_with_options(record_usage, record_options);
944

945
	if (record_opts__config(&rec->opts)) {
946
		err = -EINVAL;
947
		goto out_symbol_exit;
948 949
	}

950
	err = __cmd_record(&record, argc, argv);
951
out_symbol_exit:
952
	perf_evlist__delete(rec->evlist);
953
	symbol__exit();
954
	return err;
955
}