builtin-record.c 24.2 KB
Newer Older
I
Ingo Molnar 已提交
1
/*
2 3 4 5 6
 * builtin-record.c
 *
 * Builtin record command: Record the profile of a workload
 * (or a CPU, or a PID) into the perf.data output file - for
 * later analysis via perf report.
I
Ingo Molnar 已提交
7
 */
8
#include "builtin.h"
9 10 11

#include "perf.h"

12
#include "util/build-id.h"
13
#include "util/util.h"
14
#include "util/parse-options.h"
15
#include "util/parse-events.h"
16

17
#include "util/header.h"
18
#include "util/event.h"
19
#include "util/evlist.h"
20
#include "util/evsel.h"
21
#include "util/debug.h"
22
#include "util/session.h"
23
#include "util/tool.h"
24
#include "util/symbol.h"
25
#include "util/cpumap.h"
26
#include "util/thread_map.h"
27
#include "util/data.h"
28

29
#include <unistd.h>
30
#include <sched.h>
31
#include <sys/mman.h>
32

33

34
struct record {
35
	struct perf_tool	tool;
36
	struct record_opts	opts;
37
	u64			bytes_written;
38
	struct perf_data_file	file;
39 40 41 42 43 44 45
	struct perf_evlist	*evlist;
	struct perf_session	*session;
	const char		*progname;
	int			realtime_prio;
	bool			no_buildid;
	bool			no_buildid_cache;
	long			samples;
46
};
47

48
static int record__write(struct record *rec, void *bf, size_t size)
49
{
50
	if (perf_data_file__write(rec->session->file, bf, size) < 0) {
51 52
		pr_err("failed to write perf data, error: %m\n");
		return -1;
53
	}
54

55
	rec->bytes_written += size;
56
	return 0;
57 58
}

59
static int process_synthesized_event(struct perf_tool *tool,
60
				     union perf_event *event,
61 62
				     struct perf_sample *sample __maybe_unused,
				     struct machine *machine __maybe_unused)
63
{
64 65
	struct record *rec = container_of(tool, struct record, tool);
	return record__write(rec, event, event->header.size);
66 67
}

68
static int record__mmap_read(struct record *rec, struct perf_mmap *md)
69
{
70
	unsigned int head = perf_mmap__read_head(md);
71
	unsigned int old = md->prev;
J
Jiri Olsa 已提交
72
	unsigned char *data = md->base + page_size;
73 74
	unsigned long size;
	void *buf;
75
	int rc = 0;
76

77
	if (old == head)
78
		return 0;
79

80
	rec->samples++;
81 82 83 84 85 86 87

	size = head - old;

	if ((old & md->mask) + size != (head & md->mask)) {
		buf = &data[old & md->mask];
		size = md->mask + 1 - (old & md->mask);
		old += size;
88

89
		if (record__write(rec, buf, size) < 0) {
90 91 92
			rc = -1;
			goto out;
		}
93 94 95 96 97
	}

	buf = &data[old & md->mask];
	size = head - old;
	old += size;
98

99
	if (record__write(rec, buf, size) < 0) {
100 101 102
		rc = -1;
		goto out;
	}
103 104

	md->prev = old;
105
	perf_mmap__write_tail(md, old);
106 107 108

out:
	return rc;
109 110 111
}

static volatile int done = 0;
112
static volatile int signr = -1;
113
static volatile int child_finished = 0;
114

115
static void sig_handler(int sig)
116
{
117 118
	if (sig == SIGCHLD)
		child_finished = 1;
119 120
	else
		signr = sig;
121

122
	done = 1;
123 124
}

125
static void record__sig_exit(void)
126
{
127
	if (signr == -1)
128 129 130
		return;

	signal(signr, SIG_DFL);
131
	raise(signr);
132 133
}

134
static int record__open(struct record *rec)
135
{
136
	char msg[512];
137
	struct perf_evsel *pos;
138 139
	struct perf_evlist *evlist = rec->evlist;
	struct perf_session *session = rec->session;
140
	struct record_opts *opts = &rec->opts;
141
	int rc = 0;
142

143
	perf_evlist__config(evlist, opts);
144

145
	evlist__for_each(evlist, pos) {
146
try_again:
147
		if (perf_evsel__open(pos, evlist->cpus, evlist->threads) < 0) {
148
			if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
149
				if (verbose)
150
					ui__warning("%s\n", msg);
151 152
				goto try_again;
			}
153

154 155 156 157
			rc = -errno;
			perf_evsel__open_strerror(pos, &opts->target,
						  errno, msg, sizeof(msg));
			ui__error("%s\n", msg);
158
			goto out;
L
Li Zefan 已提交
159 160
		}
	}
161

162
	if (perf_evlist__apply_filters(evlist)) {
163 164
		error("failed to set filter with %d (%s)\n", errno,
			strerror(errno));
165 166
		rc = -1;
		goto out;
167 168
	}

169
	if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) {
170 171 172 173 174
		if (errno == EPERM) {
			pr_err("Permission error mapping pages.\n"
			       "Consider increasing "
			       "/proc/sys/kernel/perf_event_mlock_kb,\n"
			       "or try again with a smaller value of -m/--mmap_pages.\n"
175
			       "(current value: %u)\n", opts->mmap_pages);
176 177 178 179 180 181
			rc = -errno;
		} else {
			pr_err("failed to mmap with %d (%s)\n", errno, strerror(errno));
			rc = -errno;
		}
		goto out;
182
	}
183

184
	session->evlist = evlist;
185
	perf_session__set_id_hdr_size(session);
186 187
out:
	return rc;
188 189
}

190
static int process_buildids(struct record *rec)
191
{
192 193
	struct perf_data_file *file  = &rec->file;
	struct perf_session *session = rec->session;
194
	u64 start = session->header.data_offset;
195

196
	u64 size = lseek(file->fd, 0, SEEK_CUR);
197 198 199
	if (size == 0)
		return 0;

200 201
	return __perf_session__process_events(session, start,
					      size - start,
202 203 204
					      size, &build_id__mark_dso_hit_ops);
}

205
static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
206 207
{
	int err;
208
	struct perf_tool *tool = data;
209 210 211 212 213 214 215 216
	/*
	 *As for guest kernel when processing subcommand record&report,
	 *we arrange module mmap prior to guest kernel mmap and trigger
	 *a preload dso because default guest module symbols are loaded
	 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
	 *method is used to avoid symbol missing when the first addr is
	 *in module instead of in guest kernel.
	 */
217
	err = perf_event__synthesize_modules(tool, process_synthesized_event,
218
					     machine);
219 220
	if (err < 0)
		pr_err("Couldn't record guest kernel [%d]'s reference"
221
		       " relocation symbol.\n", machine->pid);
222 223 224 225 226

	/*
	 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
	 * have no _text sometimes.
	 */
227
	err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
228
						 machine);
229 230
	if (err < 0)
		pr_err("Couldn't record guest kernel [%d]'s reference"
231
		       " relocation symbol.\n", machine->pid);
232 233
}

234 235 236 237 238
static struct perf_event_header finished_round_event = {
	.size = sizeof(struct perf_event_header),
	.type = PERF_RECORD_FINISHED_ROUND,
};

239
static int record__mmap_read_all(struct record *rec)
240
{
241
	int i;
242
	int rc = 0;
243

244
	for (i = 0; i < rec->evlist->nr_mmaps; i++) {
245
		if (rec->evlist->mmap[i].base) {
246
			if (record__mmap_read(rec, &rec->evlist->mmap[i]) != 0) {
247 248 249 250
				rc = -1;
				goto out;
			}
		}
251 252
	}

253
	rc = record__write(rec, &finished_round_event, sizeof(finished_round_event));
254 255 256

out:
	return rc;
257 258
}

259
static void record__init_features(struct record *rec)
260 261 262 263 264 265 266 267 268 269
{
	struct perf_session *session = rec->session;
	int feat;

	for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
		perf_header__set_feat(&session->header, feat);

	if (rec->no_buildid)
		perf_header__clear_feat(&session->header, HEADER_BUILD_ID);

270
	if (!have_tracepoints(&rec->evlist->entries))
271 272 273 274 275 276
		perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);

	if (!rec->opts.branch_stack)
		perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
}

277 278 279 280 281 282 283
static volatile int workload_exec_errno;

/*
 * perf_evlist__prepare_workload will send a SIGUSR1
 * if the fork fails, since we asked by setting its
 * want_signal to true.
 */
284 285
static void workload_exec_failed_signal(int signo __maybe_unused,
					siginfo_t *info,
286 287 288 289 290 291 292
					void *ucontext __maybe_unused)
{
	workload_exec_errno = info->si_value.sival_int;
	done = 1;
	child_finished = 1;
}

293
static int __cmd_record(struct record *rec, int argc, const char **argv)
294
{
295
	int err;
296
	int status = 0;
297
	unsigned long waking = 0;
298
	const bool forks = argc > 0;
299
	struct machine *machine;
300
	struct perf_tool *tool = &rec->tool;
301
	struct record_opts *opts = &rec->opts;
302
	struct perf_data_file *file = &rec->file;
303
	struct perf_session *session;
304
	bool disabled = false;
305

306
	rec->progname = argv[0];
307

308
	atexit(record__sig_exit);
309 310
	signal(SIGCHLD, sig_handler);
	signal(SIGINT, sig_handler);
311
	signal(SIGTERM, sig_handler);
312

313
	session = perf_session__new(file, false, NULL);
314
	if (session == NULL) {
A
Adrien BAK 已提交
315
		pr_err("Perf session creation failed.\n");
316 317 318
		return -1;
	}

319 320
	rec->session = session;

321
	record__init_features(rec);
322

323
	if (forks) {
324
		err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
325
						    argv, file->is_pipe,
326
						    workload_exec_failed_signal);
327 328
		if (err < 0) {
			pr_err("Couldn't run the workload!\n");
329
			status = err;
330
			goto out_delete_session;
331 332 333
		}
	}

334
	if (record__open(rec) != 0) {
335
		err = -1;
336
		goto out_child;
337
	}
338

339
	if (!rec->evlist->nr_groups)
340 341
		perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);

342 343
	if (file->is_pipe) {
		err = perf_header__write_pipe(file->fd);
344
		if (err < 0)
345
			goto out_child;
346
	} else {
347
		err = perf_session__write_header(session, rec->evlist,
348
						 file->fd, false);
349
		if (err < 0)
350
			goto out_child;
351 352
	}

353
	if (!rec->no_buildid
354
	    && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
355
		pr_err("Couldn't generate buildids. "
356
		       "Use --no-buildid to profile anyway.\n");
357
		err = -1;
358
		goto out_child;
359 360
	}

361
	machine = &session->machines.host;
362

363
	if (file->is_pipe) {
364
		err = perf_event__synthesize_attrs(tool, session,
365
						   process_synthesized_event);
366 367
		if (err < 0) {
			pr_err("Couldn't synthesize attrs.\n");
368
			goto out_child;
369
		}
370

371
		if (have_tracepoints(&rec->evlist->entries)) {
372 373 374 375 376 377 378 379
			/*
			 * FIXME err <= 0 here actually means that
			 * there were no tracepoints so its not really
			 * an error, just that we don't need to
			 * synthesize anything.  We really have to
			 * return this more properly and also
			 * propagate errors that now are calling die()
			 */
380
			err = perf_event__synthesize_tracing_data(tool, file->fd, rec->evlist,
381
								  process_synthesized_event);
382 383
			if (err <= 0) {
				pr_err("Couldn't record tracing data.\n");
384
				goto out_child;
385
			}
386
			rec->bytes_written += err;
387
		}
388 389
	}

390
	err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
391
						 machine);
392 393 394 395
	if (err < 0)
		pr_err("Couldn't record kernel reference relocation symbol\n"
		       "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
		       "Check /proc/kallsyms permission or run as root.\n");
396

397
	err = perf_event__synthesize_modules(tool, process_synthesized_event,
398
					     machine);
399 400 401 402 403
	if (err < 0)
		pr_err("Couldn't record kernel module information.\n"
		       "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
		       "Check /proc/modules permission or run as root.\n");

404
	if (perf_guest) {
405 406
		machines__process_guests(&session->machines,
					 perf_event__synthesize_guest_os, tool);
407
	}
408

409
	err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
410
					    process_synthesized_event, opts->sample_address);
411
	if (err != 0)
412
		goto out_child;
413

414
	if (rec->realtime_prio) {
415 416
		struct sched_param param;

417
		param.sched_priority = rec->realtime_prio;
418
		if (sched_setscheduler(0, SCHED_FIFO, &param)) {
419
			pr_err("Could not set realtime priority.\n");
420
			err = -1;
421
			goto out_child;
422 423 424
		}
	}

425 426 427 428 429
	/*
	 * When perf is starting the traced process, all the events
	 * (apart from group members) have enable_on_exec=1 set,
	 * so don't spoil it by prematurely enabling them.
	 */
430
	if (!target__none(&opts->target) && !opts->initial_delay)
431
		perf_evlist__enable(rec->evlist);
432

433 434 435
	/*
	 * Let the child rip
	 */
436
	if (forks)
437
		perf_evlist__start_workload(rec->evlist);
438

439 440 441 442 443
	if (opts->initial_delay) {
		usleep(opts->initial_delay * 1000);
		perf_evlist__enable(rec->evlist);
	}

444
	for (;;) {
445
		int hits = rec->samples;
446

447
		if (record__mmap_read_all(rec) < 0) {
448
			err = -1;
449
			goto out_child;
450
		}
451

452
		if (hits == rec->samples) {
453 454
			if (done)
				break;
455
			err = poll(rec->evlist->pollfd, rec->evlist->nr_fds, -1);
456 457 458 459 460
			/*
			 * Propagate error, only if there's any. Ignore positive
			 * number of returned events and interrupt error.
			 */
			if (err > 0 || (err < 0 && errno == EINTR))
461
				err = 0;
462 463 464
			waking++;
		}

465 466 467 468 469
		/*
		 * When perf is starting the traced process, at the end events
		 * die with the process and we wait for that. Thus no need to
		 * disable events in this case.
		 */
470
		if (done && !disabled && !target__none(&opts->target)) {
471
			perf_evlist__disable(rec->evlist);
472 473
			disabled = true;
		}
474 475
	}

476 477 478 479 480
	if (forks && workload_exec_errno) {
		char msg[512];
		const char *emsg = strerror_r(workload_exec_errno, msg, sizeof(msg));
		pr_err("Workload failed: %s\n", emsg);
		err = -1;
481
		goto out_child;
482 483
	}

484 485
	if (!quiet) {
		fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
486

487 488 489 490 491 492 493 494 495
		/*
		 * Approximate RIP event size: 24 bytes.
		 */
		fprintf(stderr,
			"[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n",
			(double)rec->bytes_written / 1024.0 / 1024.0,
			file->path,
			rec->bytes_written / 24);
	}
496

497 498 499
out_child:
	if (forks) {
		int exit_status;
500

501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522
		if (!child_finished)
			kill(rec->evlist->workload.pid, SIGTERM);

		wait(&exit_status);

		if (err < 0)
			status = err;
		else if (WIFEXITED(exit_status))
			status = WEXITSTATUS(exit_status);
		else if (WIFSIGNALED(exit_status))
			signr = WTERMSIG(exit_status);
	} else
		status = err;

	if (!err && !file->is_pipe) {
		rec->session->header.data_size += rec->bytes_written;

		if (!rec->no_buildid)
			process_buildids(rec);
		perf_session__write_header(rec->session, rec->evlist,
					   file->fd, true);
	}
523 524 525

out_delete_session:
	perf_session__delete(session);
526
	return status;
527
}
528

529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546
#define BRANCH_OPT(n, m) \
	{ .name = n, .mode = (m) }

#define BRANCH_END { .name = NULL }

struct branch_mode {
	const char *name;
	int mode;
};

static const struct branch_mode branch_modes[] = {
	BRANCH_OPT("u", PERF_SAMPLE_BRANCH_USER),
	BRANCH_OPT("k", PERF_SAMPLE_BRANCH_KERNEL),
	BRANCH_OPT("hv", PERF_SAMPLE_BRANCH_HV),
	BRANCH_OPT("any", PERF_SAMPLE_BRANCH_ANY),
	BRANCH_OPT("any_call", PERF_SAMPLE_BRANCH_ANY_CALL),
	BRANCH_OPT("any_ret", PERF_SAMPLE_BRANCH_ANY_RETURN),
	BRANCH_OPT("ind_call", PERF_SAMPLE_BRANCH_IND_CALL),
547 548 549
	BRANCH_OPT("abort_tx", PERF_SAMPLE_BRANCH_ABORT_TX),
	BRANCH_OPT("in_tx", PERF_SAMPLE_BRANCH_IN_TX),
	BRANCH_OPT("no_tx", PERF_SAMPLE_BRANCH_NO_TX),
550
	BRANCH_OPT("cond", PERF_SAMPLE_BRANCH_COND),
551 552 553 554
	BRANCH_END
};

static int
555
parse_branch_stack(const struct option *opt, const char *str, int unset)
556 557 558 559 560 561 562 563
{
#define ONLY_PLM \
	(PERF_SAMPLE_BRANCH_USER	|\
	 PERF_SAMPLE_BRANCH_KERNEL	|\
	 PERF_SAMPLE_BRANCH_HV)

	uint64_t *mode = (uint64_t *)opt->value;
	const struct branch_mode *br;
564
	char *s, *os = NULL, *p;
565 566
	int ret = -1;

567 568
	if (unset)
		return 0;
569

570 571 572 573
	/*
	 * cannot set it twice, -b + --branch-filter for instance
	 */
	if (*mode)
574 575
		return -1;

576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596
	/* str may be NULL in case no arg is passed to -b */
	if (str) {
		/* because str is read-only */
		s = os = strdup(str);
		if (!s)
			return -1;

		for (;;) {
			p = strchr(s, ',');
			if (p)
				*p = '\0';

			for (br = branch_modes; br->name; br++) {
				if (!strcasecmp(s, br->name))
					break;
			}
			if (!br->name) {
				ui__warning("unknown branch filter %s,"
					    " check man page\n", s);
				goto error;
			}
597

598
			*mode |= br->mode;
599

600 601
			if (!p)
				break;
602

603 604
			s = p + 1;
		}
605 606 607
	}
	ret = 0;

608
	/* default to any branch */
609
	if ((*mode & ~ONLY_PLM) == 0) {
610
		*mode = PERF_SAMPLE_BRANCH_ANY;
611 612 613 614 615 616
	}
error:
	free(os);
	return ret;
}

617
#ifdef HAVE_DWARF_UNWIND_SUPPORT
618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642
static int get_stack_size(char *str, unsigned long *_size)
{
	char *endptr;
	unsigned long size;
	unsigned long max_size = round_down(USHRT_MAX, sizeof(u64));

	size = strtoul(str, &endptr, 0);

	do {
		if (*endptr)
			break;

		size = round_up(size, sizeof(u64));
		if (!size || size > max_size)
			break;

		*_size = size;
		return 0;

	} while (0);

	pr_err("callchain: Incorrect stack dump size (max %ld): %s\n",
	       max_size, str);
	return -1;
}
643
#endif /* HAVE_DWARF_UNWIND_SUPPORT */
644

645
int record_parse_callchain(const char *arg, struct record_opts *opts)
646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664
{
	char *tok, *name, *saveptr = NULL;
	char *buf;
	int ret = -1;

	/* We need buffer that we know we can write to. */
	buf = malloc(strlen(arg) + 1);
	if (!buf)
		return -ENOMEM;

	strcpy(buf, arg);

	tok = strtok_r((char *)buf, ",", &saveptr);
	name = tok ? : (char *)buf;

	do {
		/* Framepointer style */
		if (!strncmp(name, "fp", sizeof("fp"))) {
			if (!strtok_r(NULL, ",", &saveptr)) {
665
				opts->call_graph = CALLCHAIN_FP;
666 667 668 669 670 671
				ret = 0;
			} else
				pr_err("callchain: No more arguments "
				       "needed for -g fp\n");
			break;

672
#ifdef HAVE_DWARF_UNWIND_SUPPORT
673 674
		/* Dwarf style */
		} else if (!strncmp(name, "dwarf", sizeof("dwarf"))) {
675 676
			const unsigned long default_stack_dump_size = 8192;

677
			ret = 0;
678 679
			opts->call_graph = CALLCHAIN_DWARF;
			opts->stack_dump_size = default_stack_dump_size;
680 681 682 683 684 685

			tok = strtok_r(NULL, ",", &saveptr);
			if (tok) {
				unsigned long size = 0;

				ret = get_stack_size(tok, &size);
686
				opts->stack_dump_size = size;
687
			}
688
#endif /* HAVE_DWARF_UNWIND_SUPPORT */
689
		} else {
J
Jiri Olsa 已提交
690
			pr_err("callchain: Unknown --call-graph option "
691 692 693 694 695 696 697
			       "value: %s\n", arg);
			break;
		}

	} while (0);

	free(buf);
J
Jiri Olsa 已提交
698 699 700
	return ret;
}

701
static void callchain_debug(struct record_opts *opts)
J
Jiri Olsa 已提交
702
{
703 704 705
	static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF" };

	pr_debug("callchain: type %s\n", str[opts->call_graph]);
706

J
Jiri Olsa 已提交
707 708 709 710 711 712 713 714 715
	if (opts->call_graph == CALLCHAIN_DWARF)
		pr_debug("callchain: stack dump size %d\n",
			 opts->stack_dump_size);
}

int record_parse_callchain_opt(const struct option *opt,
			       const char *arg,
			       int unset)
{
716
	struct record_opts *opts = opt->value;
J
Jiri Olsa 已提交
717 718
	int ret;

719 720
	opts->call_graph_enabled = !unset;

J
Jiri Olsa 已提交
721 722 723 724 725 726 727 728
	/* --no-call-graph */
	if (unset) {
		opts->call_graph = CALLCHAIN_NONE;
		pr_debug("callchain: disabled\n");
		return 0;
	}

	ret = record_parse_callchain(arg, opts);
729
	if (!ret)
J
Jiri Olsa 已提交
730
		callchain_debug(opts);
731 732 733 734

	return ret;
}

J
Jiri Olsa 已提交
735 736 737 738
int record_callchain_opt(const struct option *opt,
			 const char *arg __maybe_unused,
			 int unset __maybe_unused)
{
739
	struct record_opts *opts = opt->value;
J
Jiri Olsa 已提交
740

741 742
	opts->call_graph_enabled = !unset;

J
Jiri Olsa 已提交
743 744 745 746 747 748 749
	if (opts->call_graph == CALLCHAIN_NONE)
		opts->call_graph = CALLCHAIN_FP;

	callchain_debug(opts);
	return 0;
}

750 751 752 753 754 755 756 757 758 759
static int perf_record_config(const char *var, const char *value, void *cb)
{
	struct record *rec = cb;

	if (!strcmp(var, "record.call-graph"))
		return record_parse_callchain(value, &rec->opts);

	return perf_default_config(var, value, cb);
}

760
static const char * const record_usage[] = {
761 762
	"perf record [<options>] [<command>]",
	"perf record [<options>] -- <command> [<options>]",
763 764 765
	NULL
};

766
/*
767 768
 * XXX Ideally would be local to cmd_record() and passed to a record__new
 * because we need to have access to it in record__exit, that is called
769 770 771 772 773 774 775
 * after cmd_record() exits, but since record_options need to be accessible to
 * builtin-script, leave it here.
 *
 * At least we don't ouch it in all the other functions here directly.
 *
 * Just say no to tons of global variables, sigh.
 */
776
static struct record record = {
777 778 779 780
	.opts = {
		.mmap_pages	     = UINT_MAX,
		.user_freq	     = UINT_MAX,
		.user_interval	     = ULLONG_MAX,
781
		.freq		     = 4000,
N
Namhyung Kim 已提交
782 783
		.target		     = {
			.uses_mmap   = true,
784
			.default_per_cpu = true,
N
Namhyung Kim 已提交
785
		},
786 787
	},
};
788

J
Jiri Olsa 已提交
789
#define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace) recording: "
790

791
#ifdef HAVE_DWARF_UNWIND_SUPPORT
J
Jiri Olsa 已提交
792
const char record_callchain_help[] = CALLCHAIN_HELP "fp dwarf";
793
#else
J
Jiri Olsa 已提交
794
const char record_callchain_help[] = CALLCHAIN_HELP "fp";
795 796
#endif

797 798 799
/*
 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
 * with it and switch to use the library functions in perf_evlist that came
800
 * from builtin-record.c, i.e. use record_opts,
801 802 803
 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
 * using pipes, etc.
 */
804
const struct option record_options[] = {
805
	OPT_CALLBACK('e', "event", &record.evlist, "event",
806
		     "event selector. use 'perf list' to list available events",
807
		     parse_events_option),
808
	OPT_CALLBACK(0, "filter", &record.evlist, "filter",
L
Li Zefan 已提交
809
		     "event filter", parse_filter),
810
	OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
811
		    "record events on existing process id"),
812
	OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
813
		    "record events on existing thread id"),
814
	OPT_INTEGER('r', "realtime", &record.realtime_prio,
815
		    "collect data with this RT SCHED_FIFO priority"),
816
	OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
817
		    "collect data without buffering"),
818
	OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
819
		    "collect raw sample records from all opened counters"),
820
	OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
821
			    "system-wide collection from all CPUs"),
822
	OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
823
		    "list of cpus to monitor"),
824
	OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
825
	OPT_STRING('o', "output", &record.file.path, "file",
I
Ingo Molnar 已提交
826
		    "output file name"),
827 828 829
	OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
			&record.opts.no_inherit_set,
			"child tasks do not inherit counters"),
830
	OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
831 832 833
	OPT_CALLBACK('m', "mmap-pages", &record.opts.mmap_pages, "pages",
		     "number of mmap data pages",
		     perf_evlist__parse_mmap_pages),
834
	OPT_BOOLEAN(0, "group", &record.opts.group,
835
		    "put the counters into a counter group"),
J
Jiri Olsa 已提交
836 837 838 839 840 841
	OPT_CALLBACK_NOOPT('g', NULL, &record.opts,
			   NULL, "enables call-graph recording" ,
			   &record_callchain_opt),
	OPT_CALLBACK(0, "call-graph", &record.opts,
		     "mode[,dump_size]", record_callchain_help,
		     &record_parse_callchain_opt),
842
	OPT_INCR('v', "verbose", &verbose,
843
		    "be more verbose (show counter open errors, etc)"),
844
	OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
845
	OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
846
		    "per thread counts"),
847
	OPT_BOOLEAN('d', "data", &record.opts.sample_address,
848
		    "Sample addresses"),
849
	OPT_BOOLEAN('T', "timestamp", &record.opts.sample_time, "Sample timestamps"),
850
	OPT_BOOLEAN('P', "period", &record.opts.period, "Sample period"),
851
	OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
852
		    "don't sample"),
853
	OPT_BOOLEAN('N', "no-buildid-cache", &record.no_buildid_cache,
854
		    "do not update the buildid cache"),
855
	OPT_BOOLEAN('B', "no-buildid", &record.no_buildid,
856
		    "do not collect buildids in perf.data"),
857
	OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
S
Stephane Eranian 已提交
858 859
		     "monitor event in cgroup name only",
		     parse_cgroups),
860
	OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
861
		  "ms to wait before starting measurement after program start"),
862 863
	OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
		   "user to profile"),
864 865 866 867 868 869 870

	OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
		     "branch any", "sample any taken branches",
		     parse_branch_stack),

	OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
		     "branch filter mask", "branch stack filter modes",
871
		     parse_branch_stack),
872 873
	OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
		    "sample by weight (on special events only)"),
874 875
	OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
		    "sample transaction flags (special events only)"),
876 877
	OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
		    "use per-thread mmaps"),
878 879 880
	OPT_END()
};

881
int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
882
{
883
	int err = -ENOMEM;
884
	struct record *rec = &record;
885
	char errbuf[BUFSIZ];
886

887 888
	rec->evlist = perf_evlist__new();
	if (rec->evlist == NULL)
889 890
		return -ENOMEM;

891 892
	perf_config(perf_record_config, rec);

893
	argc = parse_options(argc, argv, record_options, record_usage,
894
			    PARSE_OPT_STOP_AT_NON_OPTION);
895
	if (!argc && target__none(&rec->opts.target))
896
		usage_with_options(record_usage, record_options);
897

898
	if (nr_cgroups && !rec->opts.target.system_wide) {
899 900
		ui__error("cgroup monitoring only available in"
			  " system-wide mode\n");
S
Stephane Eranian 已提交
901 902 903
		usage_with_options(record_usage, record_options);
	}

904
	symbol__init();
905

906
	if (symbol_conf.kptr_restrict)
907 908 909 910 911 912 913 914
		pr_warning(
"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
"check /proc/sys/kernel/kptr_restrict.\n\n"
"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
"file is not found in the buildid cache or in the vmlinux path.\n\n"
"Samples in kernel modules won't be resolved at all.\n\n"
"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
"even with a suitable vmlinux or kallsyms file.\n\n");
915

916
	if (rec->no_buildid_cache || rec->no_buildid)
917
		disable_buildid_cache();
918

919 920
	if (rec->evlist->nr_entries == 0 &&
	    perf_evlist__add_default(rec->evlist) < 0) {
921 922
		pr_err("Not enough memory for event selector list\n");
		goto out_symbol_exit;
923
	}
924

925 926 927
	if (rec->opts.target.tid && !rec->opts.no_inherit_set)
		rec->opts.no_inherit = true;

928
	err = target__validate(&rec->opts.target);
929
	if (err) {
930
		target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
931 932 933
		ui__warning("%s", errbuf);
	}

934
	err = target__parse_uid(&rec->opts.target);
935 936
	if (err) {
		int saved_errno = errno;
937

938
		target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
939
		ui__error("%s", errbuf);
940 941

		err = -saved_errno;
942
		goto out_symbol_exit;
943
	}
944

945
	err = -ENOMEM;
946
	if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
947
		usage_with_options(record_usage, record_options);
948

949
	if (record_opts__config(&rec->opts)) {
950
		err = -EINVAL;
951
		goto out_symbol_exit;
952 953
	}

954
	err = __cmd_record(&record, argc, argv);
955
out_symbol_exit:
956
	perf_evlist__delete(rec->evlist);
957
	symbol__exit();
958
	return err;
959
}