builtin-record.c 26.0 KB
Newer Older
I
Ingo Molnar 已提交
1
/*
2 3 4 5 6
 * builtin-record.c
 *
 * Builtin record command: Record the profile of a workload
 * (or a CPU, or a PID) into the perf.data output file - for
 * later analysis via perf report.
I
Ingo Molnar 已提交
7
 */
8
#include "builtin.h"
9 10 11

#include "perf.h"

12
#include "util/build-id.h"
13
#include "util/util.h"
14
#include "util/parse-options.h"
15
#include "util/parse-events.h"
16

17
#include "util/callchain.h"
18
#include "util/cgroup.h"
19
#include "util/header.h"
20
#include "util/event.h"
21
#include "util/evlist.h"
22
#include "util/evsel.h"
23
#include "util/debug.h"
24
#include "util/session.h"
25
#include "util/tool.h"
26
#include "util/symbol.h"
27
#include "util/cpumap.h"
28
#include "util/thread_map.h"
29
#include "util/data.h"
30

31
#include <unistd.h>
32
#include <sched.h>
33
#include <sys/mman.h>
34

35

36
struct record {
37
	struct perf_tool	tool;
38
	struct record_opts	opts;
39
	u64			bytes_written;
40
	struct perf_data_file	file;
41 42 43 44 45 46 47
	struct perf_evlist	*evlist;
	struct perf_session	*session;
	const char		*progname;
	int			realtime_prio;
	bool			no_buildid;
	bool			no_buildid_cache;
	long			samples;
48
};
49

50
static int record__write(struct record *rec, void *bf, size_t size)
51
{
52
	if (perf_data_file__write(rec->session->file, bf, size) < 0) {
53 54
		pr_err("failed to write perf data, error: %m\n");
		return -1;
55
	}
56

57
	rec->bytes_written += size;
58
	return 0;
59 60
}

61
static int process_synthesized_event(struct perf_tool *tool,
62
				     union perf_event *event,
63 64
				     struct perf_sample *sample __maybe_unused,
				     struct machine *machine __maybe_unused)
65
{
66 67
	struct record *rec = container_of(tool, struct record, tool);
	return record__write(rec, event, event->header.size);
68 69
}

70
static int record__mmap_read(struct record *rec, int idx)
71
{
72
	struct perf_mmap *md = &rec->evlist->mmap[idx];
73
	unsigned int head = perf_mmap__read_head(md);
74
	unsigned int old = md->prev;
J
Jiri Olsa 已提交
75
	unsigned char *data = md->base + page_size;
76 77
	unsigned long size;
	void *buf;
78
	int rc = 0;
79

80
	if (old == head)
81
		return 0;
82

83
	rec->samples++;
84 85 86 87 88 89 90

	size = head - old;

	if ((old & md->mask) + size != (head & md->mask)) {
		buf = &data[old & md->mask];
		size = md->mask + 1 - (old & md->mask);
		old += size;
91

92
		if (record__write(rec, buf, size) < 0) {
93 94 95
			rc = -1;
			goto out;
		}
96 97 98 99 100
	}

	buf = &data[old & md->mask];
	size = head - old;
	old += size;
101

102
	if (record__write(rec, buf, size) < 0) {
103 104 105
		rc = -1;
		goto out;
	}
106 107

	md->prev = old;
108
	perf_evlist__mmap_consume(rec->evlist, idx);
109 110
out:
	return rc;
111 112 113
}

static volatile int done = 0;
114
static volatile int signr = -1;
115
static volatile int child_finished = 0;
116

117
static void sig_handler(int sig)
118
{
119 120
	if (sig == SIGCHLD)
		child_finished = 1;
121 122
	else
		signr = sig;
123

124
	done = 1;
125 126
}

127
static void record__sig_exit(void)
128
{
129
	if (signr == -1)
130 131 132
		return;

	signal(signr, SIG_DFL);
133
	raise(signr);
134 135
}

136
static int record__open(struct record *rec)
137
{
138
	char msg[512];
139
	struct perf_evsel *pos;
140 141
	struct perf_evlist *evlist = rec->evlist;
	struct perf_session *session = rec->session;
142
	struct record_opts *opts = &rec->opts;
143
	int rc = 0;
144

145
	perf_evlist__config(evlist, opts);
146

147
	evlist__for_each(evlist, pos) {
148
try_again:
149
		if (perf_evsel__open(pos, evlist->cpus, evlist->threads) < 0) {
150
			if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
151
				if (verbose)
152
					ui__warning("%s\n", msg);
153 154
				goto try_again;
			}
155

156 157 158 159
			rc = -errno;
			perf_evsel__open_strerror(pos, &opts->target,
						  errno, msg, sizeof(msg));
			ui__error("%s\n", msg);
160
			goto out;
L
Li Zefan 已提交
161 162
		}
	}
163

164 165 166
	if (perf_evlist__apply_filters(evlist, &pos)) {
		error("failed to set filter \"%s\" on event %s with %d (%s)\n",
			pos->filter, perf_evsel__name(pos), errno,
167
			strerror_r(errno, msg, sizeof(msg)));
168 169
		rc = -1;
		goto out;
170 171
	}

172
	if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) {
173 174 175 176 177
		if (errno == EPERM) {
			pr_err("Permission error mapping pages.\n"
			       "Consider increasing "
			       "/proc/sys/kernel/perf_event_mlock_kb,\n"
			       "or try again with a smaller value of -m/--mmap_pages.\n"
178
			       "(current value: %u)\n", opts->mmap_pages);
179 180
			rc = -errno;
		} else {
181 182
			pr_err("failed to mmap with %d (%s)\n", errno,
				strerror_r(errno, msg, sizeof(msg)));
183 184 185
			rc = -errno;
		}
		goto out;
186
	}
187

188
	session->evlist = evlist;
189
	perf_session__set_id_hdr_size(session);
190 191
out:
	return rc;
192 193
}

194 195 196 197 198 199 200 201 202 203 204 205 206
static int process_sample_event(struct perf_tool *tool,
				union perf_event *event,
				struct perf_sample *sample,
				struct perf_evsel *evsel,
				struct machine *machine)
{
	struct record *rec = container_of(tool, struct record, tool);

	rec->samples++;

	return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
}

207
static int process_buildids(struct record *rec)
208
{
209 210
	struct perf_data_file *file  = &rec->file;
	struct perf_session *session = rec->session;
211

212
	u64 size = lseek(perf_data_file__fd(file), 0, SEEK_CUR);
213 214 215
	if (size == 0)
		return 0;

216 217
	file->size = size;

218 219 220 221 222 223 224 225 226 227 228
	/*
	 * During this process, it'll load kernel map and replace the
	 * dso->long_name to a real pathname it found.  In this case
	 * we prefer the vmlinux path like
	 *   /lib/modules/3.16.4/build/vmlinux
	 *
	 * rather than build-id path (in debug directory).
	 *   $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
	 */
	symbol_conf.ignore_vmlinux_buildid = true;

229
	return perf_session__process_events(session);
230 231
}

232
static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
233 234
{
	int err;
235
	struct perf_tool *tool = data;
236 237 238 239 240 241 242 243
	/*
	 *As for guest kernel when processing subcommand record&report,
	 *we arrange module mmap prior to guest kernel mmap and trigger
	 *a preload dso because default guest module symbols are loaded
	 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
	 *method is used to avoid symbol missing when the first addr is
	 *in module instead of in guest kernel.
	 */
244
	err = perf_event__synthesize_modules(tool, process_synthesized_event,
245
					     machine);
246 247
	if (err < 0)
		pr_err("Couldn't record guest kernel [%d]'s reference"
248
		       " relocation symbol.\n", machine->pid);
249 250 251 252 253

	/*
	 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
	 * have no _text sometimes.
	 */
254
	err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
255
						 machine);
256 257
	if (err < 0)
		pr_err("Couldn't record guest kernel [%d]'s reference"
258
		       " relocation symbol.\n", machine->pid);
259 260
}

261 262 263 264 265
static struct perf_event_header finished_round_event = {
	.size = sizeof(struct perf_event_header),
	.type = PERF_RECORD_FINISHED_ROUND,
};

266
static int record__mmap_read_all(struct record *rec)
267
{
268
	u64 bytes_written = rec->bytes_written;
269
	int i;
270
	int rc = 0;
271

272
	for (i = 0; i < rec->evlist->nr_mmaps; i++) {
273
		if (rec->evlist->mmap[i].base) {
274
			if (record__mmap_read(rec, i) != 0) {
275 276 277 278
				rc = -1;
				goto out;
			}
		}
279 280
	}

281 282 283 284 285 286
	/*
	 * Mark the round finished in case we wrote
	 * at least one event.
	 */
	if (bytes_written != rec->bytes_written)
		rc = record__write(rec, &finished_round_event, sizeof(finished_round_event));
287 288 289

out:
	return rc;
290 291
}

292
static void record__init_features(struct record *rec)
293 294 295 296 297 298 299 300 301 302
{
	struct perf_session *session = rec->session;
	int feat;

	for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
		perf_header__set_feat(&session->header, feat);

	if (rec->no_buildid)
		perf_header__clear_feat(&session->header, HEADER_BUILD_ID);

303
	if (!have_tracepoints(&rec->evlist->entries))
304 305 306 307 308 309
		perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);

	if (!rec->opts.branch_stack)
		perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
}

310 311 312 313 314 315 316
static volatile int workload_exec_errno;

/*
 * perf_evlist__prepare_workload will send a SIGUSR1
 * if the fork fails, since we asked by setting its
 * want_signal to true.
 */
317 318
static void workload_exec_failed_signal(int signo __maybe_unused,
					siginfo_t *info,
319 320 321 322 323 324 325
					void *ucontext __maybe_unused)
{
	workload_exec_errno = info->si_value.sival_int;
	done = 1;
	child_finished = 1;
}

326
static int __cmd_record(struct record *rec, int argc, const char **argv)
327
{
328
	int err;
329
	int status = 0;
330
	unsigned long waking = 0;
331
	const bool forks = argc > 0;
332
	struct machine *machine;
333
	struct perf_tool *tool = &rec->tool;
334
	struct record_opts *opts = &rec->opts;
335
	struct perf_data_file *file = &rec->file;
336
	struct perf_session *session;
337
	bool disabled = false, draining = false;
338
	int fd;
339

340
	rec->progname = argv[0];
341

342
	atexit(record__sig_exit);
343 344
	signal(SIGCHLD, sig_handler);
	signal(SIGINT, sig_handler);
345
	signal(SIGTERM, sig_handler);
346

347
	session = perf_session__new(file, false, tool);
348
	if (session == NULL) {
A
Adrien BAK 已提交
349
		pr_err("Perf session creation failed.\n");
350 351 352
		return -1;
	}

353
	fd = perf_data_file__fd(file);
354 355
	rec->session = session;

356
	record__init_features(rec);
357

358
	if (forks) {
359
		err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
360
						    argv, file->is_pipe,
361
						    workload_exec_failed_signal);
362 363
		if (err < 0) {
			pr_err("Couldn't run the workload!\n");
364
			status = err;
365
			goto out_delete_session;
366 367 368
		}
	}

369
	if (record__open(rec) != 0) {
370
		err = -1;
371
		goto out_child;
372
	}
373

374
	if (!rec->evlist->nr_groups)
375 376
		perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);

377
	if (file->is_pipe) {
378
		err = perf_header__write_pipe(fd);
379
		if (err < 0)
380
			goto out_child;
381
	} else {
382
		err = perf_session__write_header(session, rec->evlist, fd, false);
383
		if (err < 0)
384
			goto out_child;
385 386
	}

387
	if (!rec->no_buildid
388
	    && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
389
		pr_err("Couldn't generate buildids. "
390
		       "Use --no-buildid to profile anyway.\n");
391
		err = -1;
392
		goto out_child;
393 394
	}

395
	machine = &session->machines.host;
396

397
	if (file->is_pipe) {
398
		err = perf_event__synthesize_attrs(tool, session,
399
						   process_synthesized_event);
400 401
		if (err < 0) {
			pr_err("Couldn't synthesize attrs.\n");
402
			goto out_child;
403
		}
404

405
		if (have_tracepoints(&rec->evlist->entries)) {
406 407 408 409 410 411 412 413
			/*
			 * FIXME err <= 0 here actually means that
			 * there were no tracepoints so its not really
			 * an error, just that we don't need to
			 * synthesize anything.  We really have to
			 * return this more properly and also
			 * propagate errors that now are calling die()
			 */
414
			err = perf_event__synthesize_tracing_data(tool,	fd, rec->evlist,
415
								  process_synthesized_event);
416 417
			if (err <= 0) {
				pr_err("Couldn't record tracing data.\n");
418
				goto out_child;
419
			}
420
			rec->bytes_written += err;
421
		}
422 423
	}

424
	err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
425
						 machine);
426 427 428 429
	if (err < 0)
		pr_err("Couldn't record kernel reference relocation symbol\n"
		       "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
		       "Check /proc/kallsyms permission or run as root.\n");
430

431
	err = perf_event__synthesize_modules(tool, process_synthesized_event,
432
					     machine);
433 434 435 436 437
	if (err < 0)
		pr_err("Couldn't record kernel module information.\n"
		       "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
		       "Check /proc/modules permission or run as root.\n");

438
	if (perf_guest) {
439 440
		machines__process_guests(&session->machines,
					 perf_event__synthesize_guest_os, tool);
441
	}
442

443
	err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
444
					    process_synthesized_event, opts->sample_address);
445
	if (err != 0)
446
		goto out_child;
447

448
	if (rec->realtime_prio) {
449 450
		struct sched_param param;

451
		param.sched_priority = rec->realtime_prio;
452
		if (sched_setscheduler(0, SCHED_FIFO, &param)) {
453
			pr_err("Could not set realtime priority.\n");
454
			err = -1;
455
			goto out_child;
456 457 458
		}
	}

459 460 461 462 463
	/*
	 * When perf is starting the traced process, all the events
	 * (apart from group members) have enable_on_exec=1 set,
	 * so don't spoil it by prematurely enabling them.
	 */
464
	if (!target__none(&opts->target) && !opts->initial_delay)
465
		perf_evlist__enable(rec->evlist);
466

467 468 469
	/*
	 * Let the child rip
	 */
470
	if (forks)
471
		perf_evlist__start_workload(rec->evlist);
472

473 474 475 476 477
	if (opts->initial_delay) {
		usleep(opts->initial_delay * 1000);
		perf_evlist__enable(rec->evlist);
	}

478
	for (;;) {
479
		int hits = rec->samples;
480

481
		if (record__mmap_read_all(rec) < 0) {
482
			err = -1;
483
			goto out_child;
484
		}
485

486
		if (hits == rec->samples) {
487
			if (done || draining)
488
				break;
489
			err = perf_evlist__poll(rec->evlist, -1);
490 491 492 493 494
			/*
			 * Propagate error, only if there's any. Ignore positive
			 * number of returned events and interrupt error.
			 */
			if (err > 0 || (err < 0 && errno == EINTR))
495
				err = 0;
496
			waking++;
497 498 499

			if (perf_evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
				draining = true;
500 501
		}

502 503 504 505 506
		/*
		 * When perf is starting the traced process, at the end events
		 * die with the process and we wait for that. Thus no need to
		 * disable events in this case.
		 */
507
		if (done && !disabled && !target__none(&opts->target)) {
508
			perf_evlist__disable(rec->evlist);
509 510
			disabled = true;
		}
511 512
	}

513
	if (forks && workload_exec_errno) {
514
		char msg[STRERR_BUFSIZE];
515 516 517
		const char *emsg = strerror_r(workload_exec_errno, msg, sizeof(msg));
		pr_err("Workload failed: %s\n", emsg);
		err = -1;
518
		goto out_child;
519 520
	}

521
	if (!quiet)
522
		fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
523

524 525 526
out_child:
	if (forks) {
		int exit_status;
527

528 529 530 531 532 533 534 535 536 537 538 539 540 541
		if (!child_finished)
			kill(rec->evlist->workload.pid, SIGTERM);

		wait(&exit_status);

		if (err < 0)
			status = err;
		else if (WIFEXITED(exit_status))
			status = WEXITSTATUS(exit_status);
		else if (WIFSIGNALED(exit_status))
			signr = WTERMSIG(exit_status);
	} else
		status = err;

542 543 544
	/* this will be recalculated during process_buildids() */
	rec->samples = 0;

545 546 547 548 549
	if (!err && !file->is_pipe) {
		rec->session->header.data_size += rec->bytes_written;

		if (!rec->no_buildid)
			process_buildids(rec);
550
		perf_session__write_header(rec->session, rec->evlist, fd, true);
551
	}
552

553 554 555 556 557 558 559 560 561 562 563 564 565 566
	if (!err && !quiet) {
		char samples[128];

		if (rec->samples)
			scnprintf(samples, sizeof(samples),
				  " (%" PRIu64 " samples)", rec->samples);
		else
			samples[0] = '\0';

		fprintf(stderr,	"[ perf record: Captured and wrote %.3f MB %s%s ]\n",
			perf_data_file__size(file) / 1024.0 / 1024.0,
			file->path, samples);
	}

567 568
out_delete_session:
	perf_session__delete(session);
569
	return status;
570
}
571

572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589
#define BRANCH_OPT(n, m) \
	{ .name = n, .mode = (m) }

#define BRANCH_END { .name = NULL }

struct branch_mode {
	const char *name;
	int mode;
};

static const struct branch_mode branch_modes[] = {
	BRANCH_OPT("u", PERF_SAMPLE_BRANCH_USER),
	BRANCH_OPT("k", PERF_SAMPLE_BRANCH_KERNEL),
	BRANCH_OPT("hv", PERF_SAMPLE_BRANCH_HV),
	BRANCH_OPT("any", PERF_SAMPLE_BRANCH_ANY),
	BRANCH_OPT("any_call", PERF_SAMPLE_BRANCH_ANY_CALL),
	BRANCH_OPT("any_ret", PERF_SAMPLE_BRANCH_ANY_RETURN),
	BRANCH_OPT("ind_call", PERF_SAMPLE_BRANCH_IND_CALL),
590 591 592
	BRANCH_OPT("abort_tx", PERF_SAMPLE_BRANCH_ABORT_TX),
	BRANCH_OPT("in_tx", PERF_SAMPLE_BRANCH_IN_TX),
	BRANCH_OPT("no_tx", PERF_SAMPLE_BRANCH_NO_TX),
593
	BRANCH_OPT("cond", PERF_SAMPLE_BRANCH_COND),
594 595 596 597
	BRANCH_END
};

static int
598
parse_branch_stack(const struct option *opt, const char *str, int unset)
599 600 601 602 603 604 605 606
{
#define ONLY_PLM \
	(PERF_SAMPLE_BRANCH_USER	|\
	 PERF_SAMPLE_BRANCH_KERNEL	|\
	 PERF_SAMPLE_BRANCH_HV)

	uint64_t *mode = (uint64_t *)opt->value;
	const struct branch_mode *br;
607
	char *s, *os = NULL, *p;
608 609
	int ret = -1;

610 611
	if (unset)
		return 0;
612

613 614 615 616
	/*
	 * cannot set it twice, -b + --branch-filter for instance
	 */
	if (*mode)
617 618
		return -1;

619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639
	/* str may be NULL in case no arg is passed to -b */
	if (str) {
		/* because str is read-only */
		s = os = strdup(str);
		if (!s)
			return -1;

		for (;;) {
			p = strchr(s, ',');
			if (p)
				*p = '\0';

			for (br = branch_modes; br->name; br++) {
				if (!strcasecmp(s, br->name))
					break;
			}
			if (!br->name) {
				ui__warning("unknown branch filter %s,"
					    " check man page\n", s);
				goto error;
			}
640

641
			*mode |= br->mode;
642

643 644
			if (!p)
				break;
645

646 647
			s = p + 1;
		}
648 649 650
	}
	ret = 0;

651
	/* default to any branch */
652
	if ((*mode & ~ONLY_PLM) == 0) {
653
		*mode = PERF_SAMPLE_BRANCH_ANY;
654 655 656 657 658 659
	}
error:
	free(os);
	return ret;
}

660
static void callchain_debug(void)
J
Jiri Olsa 已提交
661
{
662
	static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
663

664
	pr_debug("callchain: type %s\n", str[callchain_param.record_mode]);
665

666
	if (callchain_param.record_mode == CALLCHAIN_DWARF)
J
Jiri Olsa 已提交
667
		pr_debug("callchain: stack dump size %d\n",
668
			 callchain_param.dump_size);
J
Jiri Olsa 已提交
669 670
}

671
int record_parse_callchain_opt(const struct option *opt __maybe_unused,
J
Jiri Olsa 已提交
672 673 674 675 676
			       const char *arg,
			       int unset)
{
	int ret;

677
	callchain_param.enabled = !unset;
678

J
Jiri Olsa 已提交
679 680
	/* --no-call-graph */
	if (unset) {
681
		callchain_param.record_mode = CALLCHAIN_NONE;
J
Jiri Olsa 已提交
682 683 684 685
		pr_debug("callchain: disabled\n");
		return 0;
	}

686
	ret = parse_callchain_record_opt(arg);
687
	if (!ret)
688
		callchain_debug();
689 690 691 692

	return ret;
}

693
int record_callchain_opt(const struct option *opt __maybe_unused,
J
Jiri Olsa 已提交
694 695 696
			 const char *arg __maybe_unused,
			 int unset __maybe_unused)
{
697
	callchain_param.enabled = true;
J
Jiri Olsa 已提交
698

699 700
	if (callchain_param.record_mode == CALLCHAIN_NONE)
		callchain_param.record_mode = CALLCHAIN_FP;
701

702
	callchain_debug();
J
Jiri Olsa 已提交
703 704 705
	return 0;
}

706 707 708
static int perf_record_config(const char *var, const char *value, void *cb)
{
	if (!strcmp(var, "record.call-graph"))
709
		var = "call-graph.record-mode"; /* fall-through */
710 711 712 713

	return perf_default_config(var, value, cb);
}

714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797
struct clockid_map {
	const char *name;
	int clockid;
};

#define CLOCKID_MAP(n, c)	\
	{ .name = n, .clockid = (c), }

#define CLOCKID_END	{ .name = NULL, }


/*
 * Add the missing ones, we need to build on many distros...
 */
#ifndef CLOCK_MONOTONIC_RAW
#define CLOCK_MONOTONIC_RAW 4
#endif
#ifndef CLOCK_BOOTTIME
#define CLOCK_BOOTTIME 7
#endif
#ifndef CLOCK_TAI
#define CLOCK_TAI 11
#endif

static const struct clockid_map clockids[] = {
	/* available for all events, NMI safe */
	CLOCKID_MAP("monotonic", CLOCK_MONOTONIC),
	CLOCKID_MAP("monotonic_raw", CLOCK_MONOTONIC_RAW),

	/* available for some events */
	CLOCKID_MAP("realtime", CLOCK_REALTIME),
	CLOCKID_MAP("boottime", CLOCK_BOOTTIME),
	CLOCKID_MAP("tai", CLOCK_TAI),

	/* available for the lazy */
	CLOCKID_MAP("mono", CLOCK_MONOTONIC),
	CLOCKID_MAP("raw", CLOCK_MONOTONIC_RAW),
	CLOCKID_MAP("real", CLOCK_REALTIME),
	CLOCKID_MAP("boot", CLOCK_BOOTTIME),

	CLOCKID_END,
};

static int parse_clockid(const struct option *opt, const char *str, int unset)
{
	struct record_opts *opts = (struct record_opts *)opt->value;
	const struct clockid_map *cm;
	const char *ostr = str;

	if (unset) {
		opts->use_clockid = 0;
		return 0;
	}

	/* no arg passed */
	if (!str)
		return 0;

	/* no setting it twice */
	if (opts->use_clockid)
		return -1;

	opts->use_clockid = true;

	/* if its a number, we're done */
	if (sscanf(str, "%d", &opts->clockid) == 1)
		return 0;

	/* allow a "CLOCK_" prefix to the name */
	if (!strncasecmp(str, "CLOCK_", 6))
		str += 6;

	for (cm = clockids; cm->name; cm++) {
		if (!strcasecmp(str, cm->name)) {
			opts->clockid = cm->clockid;
			return 0;
		}
	}

	opts->use_clockid = false;
	ui__warning("unknown clockid %s, check man page\n", ostr);
	return -1;
}

798
static const char * const __record_usage[] = {
799 800
	"perf record [<options>] [<command>]",
	"perf record [<options>] -- <command> [<options>]",
801 802
	NULL
};
803
const char * const *record_usage = __record_usage;
804

805
/*
806 807
 * XXX Ideally would be local to cmd_record() and passed to a record__new
 * because we need to have access to it in record__exit, that is called
808 809 810 811 812 813 814
 * after cmd_record() exits, but since record_options need to be accessible to
 * builtin-script, leave it here.
 *
 * At least we don't ouch it in all the other functions here directly.
 *
 * Just say no to tons of global variables, sigh.
 */
815
static struct record record = {
816
	.opts = {
817
		.sample_time	     = true,
818 819 820
		.mmap_pages	     = UINT_MAX,
		.user_freq	     = UINT_MAX,
		.user_interval	     = ULLONG_MAX,
821
		.freq		     = 4000,
N
Namhyung Kim 已提交
822 823
		.target		     = {
			.uses_mmap   = true,
824
			.default_per_cpu = true,
N
Namhyung Kim 已提交
825
		},
826
	},
827 828 829 830 831 832 833
	.tool = {
		.sample		= process_sample_event,
		.fork		= perf_event__process_fork,
		.comm		= perf_event__process_comm,
		.mmap		= perf_event__process_mmap,
		.mmap2		= perf_event__process_mmap2,
	},
834
};
835

J
Jiri Olsa 已提交
836
#define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace) recording: "
837

838
#ifdef HAVE_DWARF_UNWIND_SUPPORT
839
const char record_callchain_help[] = CALLCHAIN_HELP "fp dwarf lbr";
840
#else
841
const char record_callchain_help[] = CALLCHAIN_HELP "fp lbr";
842 843
#endif

844 845 846
/*
 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
 * with it and switch to use the library functions in perf_evlist that came
847
 * from builtin-record.c, i.e. use record_opts,
848 849 850
 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
 * using pipes, etc.
 */
851
struct option __record_options[] = {
852
	OPT_CALLBACK('e', "event", &record.evlist, "event",
853
		     "event selector. use 'perf list' to list available events",
854
		     parse_events_option),
855
	OPT_CALLBACK(0, "filter", &record.evlist, "filter",
L
Li Zefan 已提交
856
		     "event filter", parse_filter),
857
	OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
858
		    "record events on existing process id"),
859
	OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
860
		    "record events on existing thread id"),
861
	OPT_INTEGER('r', "realtime", &record.realtime_prio,
862
		    "collect data with this RT SCHED_FIFO priority"),
863
	OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
864
		    "collect data without buffering"),
865
	OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
866
		    "collect raw sample records from all opened counters"),
867
	OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
868
			    "system-wide collection from all CPUs"),
869
	OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
870
		    "list of cpus to monitor"),
871
	OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
872
	OPT_STRING('o', "output", &record.file.path, "file",
I
Ingo Molnar 已提交
873
		    "output file name"),
874 875 876
	OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
			&record.opts.no_inherit_set,
			"child tasks do not inherit counters"),
877
	OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
878 879 880
	OPT_CALLBACK('m', "mmap-pages", &record.opts.mmap_pages, "pages",
		     "number of mmap data pages",
		     perf_evlist__parse_mmap_pages),
881
	OPT_BOOLEAN(0, "group", &record.opts.group,
882
		    "put the counters into a counter group"),
J
Jiri Olsa 已提交
883 884 885 886 887 888
	OPT_CALLBACK_NOOPT('g', NULL, &record.opts,
			   NULL, "enables call-graph recording" ,
			   &record_callchain_opt),
	OPT_CALLBACK(0, "call-graph", &record.opts,
		     "mode[,dump_size]", record_callchain_help,
		     &record_parse_callchain_opt),
889
	OPT_INCR('v', "verbose", &verbose,
890
		    "be more verbose (show counter open errors, etc)"),
891
	OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
892
	OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
893
		    "per thread counts"),
894
	OPT_BOOLEAN('d', "data", &record.opts.sample_address,
895
		    "Sample addresses"),
896
	OPT_BOOLEAN('T', "timestamp", &record.opts.sample_time, "Sample timestamps"),
897
	OPT_BOOLEAN('P', "period", &record.opts.period, "Sample period"),
898
	OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
899
		    "don't sample"),
900
	OPT_BOOLEAN('N', "no-buildid-cache", &record.no_buildid_cache,
901
		    "do not update the buildid cache"),
902
	OPT_BOOLEAN('B', "no-buildid", &record.no_buildid,
903
		    "do not collect buildids in perf.data"),
904
	OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
S
Stephane Eranian 已提交
905 906
		     "monitor event in cgroup name only",
		     parse_cgroups),
907
	OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
908
		  "ms to wait before starting measurement after program start"),
909 910
	OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
		   "user to profile"),
911 912 913 914 915 916 917

	OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
		     "branch any", "sample any taken branches",
		     parse_branch_stack),

	OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
		     "branch filter mask", "branch stack filter modes",
918
		     parse_branch_stack),
919 920
	OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
		    "sample by weight (on special events only)"),
921 922
	OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
		    "sample transaction flags (special events only)"),
923 924
	OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
		    "use per-thread mmaps"),
925 926
	OPT_BOOLEAN('I', "intr-regs", &record.opts.sample_intr_regs,
		    "Sample machine registers on interrupt"),
927 928
	OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
		    "Record running/enabled time of read (:S) events"),
929 930 931
	OPT_CALLBACK('k', "clockid", &record.opts,
	"clockid", "clockid to use for events, see clock_gettime()",
	parse_clockid),
932 933 934
	OPT_END()
};

935 936
struct option *record_options = __record_options;

937
int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
938
{
939
	int err = -ENOMEM;
940
	struct record *rec = &record;
941
	char errbuf[BUFSIZ];
942

943 944
	rec->evlist = perf_evlist__new();
	if (rec->evlist == NULL)
945 946
		return -ENOMEM;

947 948
	perf_config(perf_record_config, rec);

949
	argc = parse_options(argc, argv, record_options, record_usage,
950
			    PARSE_OPT_STOP_AT_NON_OPTION);
951
	if (!argc && target__none(&rec->opts.target))
952
		usage_with_options(record_usage, record_options);
953

954
	if (nr_cgroups && !rec->opts.target.system_wide) {
955 956
		ui__error("cgroup monitoring only available in"
			  " system-wide mode\n");
S
Stephane Eranian 已提交
957 958 959
		usage_with_options(record_usage, record_options);
	}

960
	symbol__init(NULL);
961

962
	if (symbol_conf.kptr_restrict)
963 964 965 966 967 968 969 970
		pr_warning(
"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
"check /proc/sys/kernel/kptr_restrict.\n\n"
"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
"file is not found in the buildid cache or in the vmlinux path.\n\n"
"Samples in kernel modules won't be resolved at all.\n\n"
"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
"even with a suitable vmlinux or kallsyms file.\n\n");
971

972
	if (rec->no_buildid_cache || rec->no_buildid)
973
		disable_buildid_cache();
974

975 976
	if (rec->evlist->nr_entries == 0 &&
	    perf_evlist__add_default(rec->evlist) < 0) {
977 978
		pr_err("Not enough memory for event selector list\n");
		goto out_symbol_exit;
979
	}
980

981 982 983
	if (rec->opts.target.tid && !rec->opts.no_inherit_set)
		rec->opts.no_inherit = true;

984
	err = target__validate(&rec->opts.target);
985
	if (err) {
986
		target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
987 988 989
		ui__warning("%s", errbuf);
	}

990
	err = target__parse_uid(&rec->opts.target);
991 992
	if (err) {
		int saved_errno = errno;
993

994
		target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
995
		ui__error("%s", errbuf);
996 997

		err = -saved_errno;
998
		goto out_symbol_exit;
999
	}
1000

1001
	err = -ENOMEM;
1002
	if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
1003
		usage_with_options(record_usage, record_options);
1004

1005
	if (record_opts__config(&rec->opts)) {
1006
		err = -EINVAL;
1007
		goto out_symbol_exit;
1008 1009
	}

1010
	err = __cmd_record(&record, argc, argv);
1011
out_symbol_exit:
1012
	perf_evlist__delete(rec->evlist);
1013
	symbol__exit();
1014
	return err;
1015
}