builtin-record.c 24.7 KB
Newer Older
I
Ingo Molnar 已提交
1
/*
2 3 4 5 6
 * builtin-record.c
 *
 * Builtin record command: Record the profile of a workload
 * (or a CPU, or a PID) into the perf.data output file - for
 * later analysis via perf report.
I
Ingo Molnar 已提交
7
 */
8
#include "builtin.h"
9 10 11

#include "perf.h"

12
#include "util/build-id.h"
13
#include "util/util.h"
14
#include "util/parse-options.h"
15
#include "util/parse-events.h"
16

17
#include "util/header.h"
18
#include "util/event.h"
19
#include "util/evlist.h"
20
#include "util/evsel.h"
21
#include "util/debug.h"
22
#include "util/session.h"
23
#include "util/tool.h"
24
#include "util/symbol.h"
25
#include "util/cpumap.h"
26
#include "util/thread_map.h"
27
#include "util/data.h"
28

29
#include <unistd.h>
30
#include <sched.h>
31
#include <sys/mman.h>
32

33
#ifndef HAVE_ON_EXIT_SUPPORT
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
#ifndef ATEXIT_MAX
#define ATEXIT_MAX 32
#endif
static int __on_exit_count = 0;
typedef void (*on_exit_func_t) (int, void *);
static on_exit_func_t __on_exit_funcs[ATEXIT_MAX];
static void *__on_exit_args[ATEXIT_MAX];
static int __exitcode = 0;
static void __handle_on_exit_funcs(void);
static int on_exit(on_exit_func_t function, void *arg);
#define exit(x) (exit)(__exitcode = (x))

static int on_exit(on_exit_func_t function, void *arg)
{
	if (__on_exit_count == ATEXIT_MAX)
		return -ENOMEM;
	else if (__on_exit_count == 0)
		atexit(__handle_on_exit_funcs);
	__on_exit_funcs[__on_exit_count] = function;
	__on_exit_args[__on_exit_count++] = arg;
	return 0;
}

static void __handle_on_exit_funcs(void)
{
	int i;
	for (i = 0; i < __on_exit_count; i++)
		__on_exit_funcs[i] (__exitcode, __on_exit_args[i]);
}
#endif

65
struct perf_record {
66
	struct perf_tool	tool;
67 68
	struct perf_record_opts	opts;
	u64			bytes_written;
69
	struct perf_data_file	file;
70 71 72 73 74 75 76
	struct perf_evlist	*evlist;
	struct perf_session	*session;
	const char		*progname;
	int			realtime_prio;
	bool			no_buildid;
	bool			no_buildid_cache;
	long			samples;
77
};
78

79 80
static ssize_t perf_record__write(struct perf_record *rec,
				  void *buf, size_t size)
81
{
82 83
	struct perf_session *session = rec->session;
	ssize_t ret;
84

85 86 87 88
	ret = perf_data_file__write(session->file, buf, size);
	if (ret < 0) {
		pr_err("failed to write perf data, error: %m\n");
		return -1;
89
	}
90

91
	rec->bytes_written += ret;
92
	return 0;
93 94
}

95
static int process_synthesized_event(struct perf_tool *tool,
96
				     union perf_event *event,
97 98
				     struct perf_sample *sample __maybe_unused,
				     struct machine *machine __maybe_unused)
99
{
100
	struct perf_record *rec = container_of(tool, struct perf_record, tool);
101
	return perf_record__write(rec, event, event->header.size);
102 103
}

104
static int perf_record__mmap_read(struct perf_record *rec,
105
				   struct perf_mmap *md)
106
{
107
	unsigned int head = perf_mmap__read_head(md);
108
	unsigned int old = md->prev;
J
Jiri Olsa 已提交
109
	unsigned char *data = md->base + page_size;
110 111
	unsigned long size;
	void *buf;
112
	int rc = 0;
113

114
	if (old == head)
115
		return 0;
116

117
	rec->samples++;
118 119 120 121 122 123 124

	size = head - old;

	if ((old & md->mask) + size != (head & md->mask)) {
		buf = &data[old & md->mask];
		size = md->mask + 1 - (old & md->mask);
		old += size;
125

126
		if (perf_record__write(rec, buf, size) < 0) {
127 128 129
			rc = -1;
			goto out;
		}
130 131 132 133 134
	}

	buf = &data[old & md->mask];
	size = head - old;
	old += size;
135

136
	if (perf_record__write(rec, buf, size) < 0) {
137 138 139
		rc = -1;
		goto out;
	}
140 141

	md->prev = old;
142
	perf_mmap__write_tail(md, old);
143 144 145

out:
	return rc;
146 147 148
}

static volatile int done = 0;
149
static volatile int signr = -1;
150
static volatile int child_finished = 0;
151

152
static void sig_handler(int sig)
153
{
154 155 156
	if (sig == SIGCHLD)
		child_finished = 1;

157
	done = 1;
158 159 160
	signr = sig;
}

161
static void perf_record__sig_exit(int exit_status __maybe_unused, void *arg)
162
{
163
	struct perf_record *rec = arg;
164 165
	int status;

166
	if (rec->evlist->workload.pid > 0) {
167
		if (!child_finished)
168
			kill(rec->evlist->workload.pid, SIGTERM);
169 170 171

		wait(&status);
		if (WIFSIGNALED(status))
172
			psignal(WTERMSIG(status), rec->progname);
173
	}
174

175
	if (signr == -1 || signr == SIGUSR1)
176 177 178
		return;

	signal(signr, SIG_DFL);
179 180
}

181
static int perf_record__open(struct perf_record *rec)
182
{
183
	char msg[512];
184
	struct perf_evsel *pos;
185 186 187
	struct perf_evlist *evlist = rec->evlist;
	struct perf_session *session = rec->session;
	struct perf_record_opts *opts = &rec->opts;
188
	int rc = 0;
189

190
	perf_evlist__config(evlist, opts);
191

192 193
	list_for_each_entry(pos, &evlist->entries, node) {
try_again:
194
		if (perf_evsel__open(pos, evlist->cpus, evlist->threads) < 0) {
195
			if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
196
				if (verbose)
197
					ui__warning("%s\n", msg);
198 199
				goto try_again;
			}
200

201 202 203 204
			rc = -errno;
			perf_evsel__open_strerror(pos, &opts->target,
						  errno, msg, sizeof(msg));
			ui__error("%s\n", msg);
205
			goto out;
L
Li Zefan 已提交
206 207
		}
	}
208

209
	if (perf_evlist__apply_filters(evlist)) {
210 211
		error("failed to set filter with %d (%s)\n", errno,
			strerror(errno));
212 213
		rc = -1;
		goto out;
214 215
	}

216
	if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) {
217 218 219 220 221
		if (errno == EPERM) {
			pr_err("Permission error mapping pages.\n"
			       "Consider increasing "
			       "/proc/sys/kernel/perf_event_mlock_kb,\n"
			       "or try again with a smaller value of -m/--mmap_pages.\n"
222
			       "(current value: %u)\n", opts->mmap_pages);
223 224 225 226 227 228
			rc = -errno;
		} else {
			pr_err("failed to mmap with %d (%s)\n", errno, strerror(errno));
			rc = -errno;
		}
		goto out;
229
	}
230

231
	session->evlist = evlist;
232
	perf_session__set_id_hdr_size(session);
233 234
out:
	return rc;
235 236
}

237
static int process_buildids(struct perf_record *rec)
238
{
239 240
	struct perf_data_file *file  = &rec->file;
	struct perf_session *session = rec->session;
241
	u64 start = session->header.data_offset;
242

243
	u64 size = lseek(file->fd, 0, SEEK_CUR);
244 245 246
	if (size == 0)
		return 0;

247 248
	return __perf_session__process_events(session, start,
					      size - start,
249 250 251
					      size, &build_id__mark_dso_hit_ops);
}

252
static void perf_record__exit(int status, void *arg)
253
{
254
	struct perf_record *rec = arg;
255
	struct perf_data_file *file = &rec->file;
256

257 258 259
	if (status != 0)
		return;

260
	if (!file->is_pipe) {
261 262 263 264 265
		rec->session->header.data_size += rec->bytes_written;

		if (!rec->no_buildid)
			process_buildids(rec);
		perf_session__write_header(rec->session, rec->evlist,
266
					   file->fd, true);
267 268
		perf_session__delete(rec->session);
		perf_evlist__delete(rec->evlist);
269
		symbol__exit();
270
	}
271 272
}

273
static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
274 275
{
	int err;
276
	struct perf_tool *tool = data;
277 278 279 280 281 282 283 284
	/*
	 *As for guest kernel when processing subcommand record&report,
	 *we arrange module mmap prior to guest kernel mmap and trigger
	 *a preload dso because default guest module symbols are loaded
	 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
	 *method is used to avoid symbol missing when the first addr is
	 *in module instead of in guest kernel.
	 */
285
	err = perf_event__synthesize_modules(tool, process_synthesized_event,
286
					     machine);
287 288
	if (err < 0)
		pr_err("Couldn't record guest kernel [%d]'s reference"
289
		       " relocation symbol.\n", machine->pid);
290 291 292 293 294

	/*
	 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
	 * have no _text sometimes.
	 */
295
	err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
296
						 machine, "_text");
297
	if (err < 0)
298
		err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
299
							 machine, "_stext");
300 301
	if (err < 0)
		pr_err("Couldn't record guest kernel [%d]'s reference"
302
		       " relocation symbol.\n", machine->pid);
303 304
}

305 306 307 308 309
static struct perf_event_header finished_round_event = {
	.size = sizeof(struct perf_event_header),
	.type = PERF_RECORD_FINISHED_ROUND,
};

310
static int perf_record__mmap_read_all(struct perf_record *rec)
311
{
312
	int i;
313
	int rc = 0;
314

315
	for (i = 0; i < rec->evlist->nr_mmaps; i++) {
316 317 318 319 320 321
		if (rec->evlist->mmap[i].base) {
			if (perf_record__mmap_read(rec, &rec->evlist->mmap[i]) != 0) {
				rc = -1;
				goto out;
			}
		}
322 323
	}

324
	if (perf_header__has_feat(&rec->session->header, HEADER_TRACING_DATA))
325 326
		rc = perf_record__write(rec, &finished_round_event,
					sizeof(finished_round_event));
327 328 329

out:
	return rc;
330 331
}

332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350
static void perf_record__init_features(struct perf_record *rec)
{
	struct perf_evlist *evsel_list = rec->evlist;
	struct perf_session *session = rec->session;
	int feat;

	for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
		perf_header__set_feat(&session->header, feat);

	if (rec->no_buildid)
		perf_header__clear_feat(&session->header, HEADER_BUILD_ID);

	if (!have_tracepoints(&evsel_list->entries))
		perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);

	if (!rec->opts.branch_stack)
		perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
}

351
static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
352
{
353
	int err;
354
	unsigned long waking = 0;
355
	const bool forks = argc > 0;
356
	struct machine *machine;
357
	struct perf_tool *tool = &rec->tool;
358 359
	struct perf_record_opts *opts = &rec->opts;
	struct perf_evlist *evsel_list = rec->evlist;
360
	struct perf_data_file *file = &rec->file;
361
	struct perf_session *session;
362
	bool disabled = false;
363

364
	rec->progname = argv[0];
365

366
	on_exit(perf_record__sig_exit, rec);
367 368
	signal(SIGCHLD, sig_handler);
	signal(SIGINT, sig_handler);
369
	signal(SIGUSR1, sig_handler);
370
	signal(SIGTERM, sig_handler);
371

372
	session = perf_session__new(file, false, NULL);
373
	if (session == NULL) {
374 375 376 377
		pr_err("Not enough memory for reading perf file header\n");
		return -1;
	}

378 379
	rec->session = session;

380
	perf_record__init_features(rec);
381

382
	if (forks) {
383
		err = perf_evlist__prepare_workload(evsel_list, &opts->target,
384
						    argv, file->is_pipe,
385
						    true);
386 387 388
		if (err < 0) {
			pr_err("Couldn't run the workload!\n");
			goto out_delete_session;
389 390 391
		}
	}

392 393 394 395
	if (perf_record__open(rec) != 0) {
		err = -1;
		goto out_delete_session;
	}
396

397 398 399
	if (!evsel_list->nr_groups)
		perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);

400
	/*
401
	 * perf_session__delete(session) will be called at perf_record__exit()
402
	 */
403
	on_exit(perf_record__exit, rec);
404

405 406
	if (file->is_pipe) {
		err = perf_header__write_pipe(file->fd);
407
		if (err < 0)
408
			goto out_delete_session;
409
	} else {
410
		err = perf_session__write_header(session, evsel_list,
411
						 file->fd, false);
412
		if (err < 0)
413
			goto out_delete_session;
414 415
	}

416
	if (!rec->no_buildid
417
	    && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
418
		pr_err("Couldn't generate buildids. "
419
		       "Use --no-buildid to profile anyway.\n");
420 421
		err = -1;
		goto out_delete_session;
422 423
	}

424
	machine = &session->machines.host;
425

426
	if (file->is_pipe) {
427
		err = perf_event__synthesize_attrs(tool, session,
428
						   process_synthesized_event);
429 430
		if (err < 0) {
			pr_err("Couldn't synthesize attrs.\n");
431
			goto out_delete_session;
432
		}
433

434
		if (have_tracepoints(&evsel_list->entries)) {
435 436 437 438 439 440 441 442
			/*
			 * FIXME err <= 0 here actually means that
			 * there were no tracepoints so its not really
			 * an error, just that we don't need to
			 * synthesize anything.  We really have to
			 * return this more properly and also
			 * propagate errors that now are calling die()
			 */
443
			err = perf_event__synthesize_tracing_data(tool, file->fd, evsel_list,
444
								  process_synthesized_event);
445 446
			if (err <= 0) {
				pr_err("Couldn't record tracing data.\n");
447
				goto out_delete_session;
448
			}
449
			rec->bytes_written += err;
450
		}
451 452
	}

453
	err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
454
						 machine, "_text");
455
	if (err < 0)
456
		err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
457
							 machine, "_stext");
458 459 460 461
	if (err < 0)
		pr_err("Couldn't record kernel reference relocation symbol\n"
		       "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
		       "Check /proc/kallsyms permission or run as root.\n");
462

463
	err = perf_event__synthesize_modules(tool, process_synthesized_event,
464
					     machine);
465 466 467 468 469
	if (err < 0)
		pr_err("Couldn't record kernel module information.\n"
		       "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
		       "Check /proc/modules permission or run as root.\n");

470
	if (perf_guest) {
471 472
		machines__process_guests(&session->machines,
					 perf_event__synthesize_guest_os, tool);
473
	}
474

475 476
	err = __machine__synthesize_threads(machine, tool, &opts->target, evsel_list->threads,
					    process_synthesized_event, opts->sample_address);
477 478 479
	if (err != 0)
		goto out_delete_session;

480
	if (rec->realtime_prio) {
481 482
		struct sched_param param;

483
		param.sched_priority = rec->realtime_prio;
484
		if (sched_setscheduler(0, SCHED_FIFO, &param)) {
485
			pr_err("Could not set realtime priority.\n");
486 487
			err = -1;
			goto out_delete_session;
488 489 490
		}
	}

491 492 493 494 495
	/*
	 * When perf is starting the traced process, all the events
	 * (apart from group members) have enable_on_exec=1 set,
	 * so don't spoil it by prematurely enabling them.
	 */
496
	if (!target__none(&opts->target))
497
		perf_evlist__enable(evsel_list);
498

499 500 501
	/*
	 * Let the child rip
	 */
502
	if (forks)
503
		perf_evlist__start_workload(evsel_list);
504

505
	for (;;) {
506
		int hits = rec->samples;
507

508 509 510 511
		if (perf_record__mmap_read_all(rec) < 0) {
			err = -1;
			goto out_delete_session;
		}
512

513
		if (hits == rec->samples) {
514 515
			if (done)
				break;
516
			err = poll(evsel_list->pollfd, evsel_list->nr_fds, -1);
517 518 519
			waking++;
		}

520 521 522 523 524
		/*
		 * When perf is starting the traced process, at the end events
		 * die with the process and we wait for that. Thus no need to
		 * disable events in this case.
		 */
525
		if (done && !disabled && !target__none(&opts->target)) {
526
			perf_evlist__disable(evsel_list);
527 528
			disabled = true;
		}
529 530
	}

531
	if (quiet || signr == SIGUSR1)
532 533
		return 0;

534 535
	fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);

536 537 538 539
	/*
	 * Approximate RIP event size: 24 bytes.
	 */
	fprintf(stderr,
540
		"[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n",
541
		(double)rec->bytes_written / 1024.0 / 1024.0,
542
		file->path,
543
		rec->bytes_written / 24);
544

545
	return 0;
546 547 548 549

out_delete_session:
	perf_session__delete(session);
	return err;
550
}
551

552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569
#define BRANCH_OPT(n, m) \
	{ .name = n, .mode = (m) }

#define BRANCH_END { .name = NULL }

struct branch_mode {
	const char *name;
	int mode;
};

static const struct branch_mode branch_modes[] = {
	BRANCH_OPT("u", PERF_SAMPLE_BRANCH_USER),
	BRANCH_OPT("k", PERF_SAMPLE_BRANCH_KERNEL),
	BRANCH_OPT("hv", PERF_SAMPLE_BRANCH_HV),
	BRANCH_OPT("any", PERF_SAMPLE_BRANCH_ANY),
	BRANCH_OPT("any_call", PERF_SAMPLE_BRANCH_ANY_CALL),
	BRANCH_OPT("any_ret", PERF_SAMPLE_BRANCH_ANY_RETURN),
	BRANCH_OPT("ind_call", PERF_SAMPLE_BRANCH_IND_CALL),
570 571 572
	BRANCH_OPT("abort_tx", PERF_SAMPLE_BRANCH_ABORT_TX),
	BRANCH_OPT("in_tx", PERF_SAMPLE_BRANCH_IN_TX),
	BRANCH_OPT("no_tx", PERF_SAMPLE_BRANCH_NO_TX),
573 574 575 576
	BRANCH_END
};

static int
577
parse_branch_stack(const struct option *opt, const char *str, int unset)
578 579 580 581 582 583 584 585
{
#define ONLY_PLM \
	(PERF_SAMPLE_BRANCH_USER	|\
	 PERF_SAMPLE_BRANCH_KERNEL	|\
	 PERF_SAMPLE_BRANCH_HV)

	uint64_t *mode = (uint64_t *)opt->value;
	const struct branch_mode *br;
586
	char *s, *os = NULL, *p;
587 588
	int ret = -1;

589 590
	if (unset)
		return 0;
591

592 593 594 595
	/*
	 * cannot set it twice, -b + --branch-filter for instance
	 */
	if (*mode)
596 597
		return -1;

598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618
	/* str may be NULL in case no arg is passed to -b */
	if (str) {
		/* because str is read-only */
		s = os = strdup(str);
		if (!s)
			return -1;

		for (;;) {
			p = strchr(s, ',');
			if (p)
				*p = '\0';

			for (br = branch_modes; br->name; br++) {
				if (!strcasecmp(s, br->name))
					break;
			}
			if (!br->name) {
				ui__warning("unknown branch filter %s,"
					    " check man page\n", s);
				goto error;
			}
619

620
			*mode |= br->mode;
621

622 623
			if (!p)
				break;
624

625 626
			s = p + 1;
		}
627 628 629
	}
	ret = 0;

630
	/* default to any branch */
631
	if ((*mode & ~ONLY_PLM) == 0) {
632
		*mode = PERF_SAMPLE_BRANCH_ANY;
633 634 635 636 637 638
	}
error:
	free(os);
	return ret;
}

639
#ifdef HAVE_LIBUNWIND_SUPPORT
640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664
static int get_stack_size(char *str, unsigned long *_size)
{
	char *endptr;
	unsigned long size;
	unsigned long max_size = round_down(USHRT_MAX, sizeof(u64));

	size = strtoul(str, &endptr, 0);

	do {
		if (*endptr)
			break;

		size = round_up(size, sizeof(u64));
		if (!size || size > max_size)
			break;

		*_size = size;
		return 0;

	} while (0);

	pr_err("callchain: Incorrect stack dump size (max %ld): %s\n",
	       max_size, str);
	return -1;
}
665
#endif /* HAVE_LIBUNWIND_SUPPORT */
666

J
Jiri Olsa 已提交
667
int record_parse_callchain(const char *arg, struct perf_record_opts *opts)
668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686
{
	char *tok, *name, *saveptr = NULL;
	char *buf;
	int ret = -1;

	/* We need buffer that we know we can write to. */
	buf = malloc(strlen(arg) + 1);
	if (!buf)
		return -ENOMEM;

	strcpy(buf, arg);

	tok = strtok_r((char *)buf, ",", &saveptr);
	name = tok ? : (char *)buf;

	do {
		/* Framepointer style */
		if (!strncmp(name, "fp", sizeof("fp"))) {
			if (!strtok_r(NULL, ",", &saveptr)) {
687
				opts->call_graph = CALLCHAIN_FP;
688 689 690 691 692 693
				ret = 0;
			} else
				pr_err("callchain: No more arguments "
				       "needed for -g fp\n");
			break;

694
#ifdef HAVE_LIBUNWIND_SUPPORT
695 696
		/* Dwarf style */
		} else if (!strncmp(name, "dwarf", sizeof("dwarf"))) {
697 698
			const unsigned long default_stack_dump_size = 8192;

699
			ret = 0;
700 701
			opts->call_graph = CALLCHAIN_DWARF;
			opts->stack_dump_size = default_stack_dump_size;
702 703 704 705 706 707

			tok = strtok_r(NULL, ",", &saveptr);
			if (tok) {
				unsigned long size = 0;

				ret = get_stack_size(tok, &size);
708
				opts->stack_dump_size = size;
709
			}
710
#endif /* HAVE_LIBUNWIND_SUPPORT */
711
		} else {
J
Jiri Olsa 已提交
712
			pr_err("callchain: Unknown --call-graph option "
713 714 715 716 717 718 719
			       "value: %s\n", arg);
			break;
		}

	} while (0);

	free(buf);
J
Jiri Olsa 已提交
720 721 722 723 724 725
	return ret;
}

static void callchain_debug(struct perf_record_opts *opts)
{
	pr_debug("callchain: type %d\n", opts->call_graph);
726

J
Jiri Olsa 已提交
727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746
	if (opts->call_graph == CALLCHAIN_DWARF)
		pr_debug("callchain: stack dump size %d\n",
			 opts->stack_dump_size);
}

int record_parse_callchain_opt(const struct option *opt,
			       const char *arg,
			       int unset)
{
	struct perf_record_opts *opts = opt->value;
	int ret;

	/* --no-call-graph */
	if (unset) {
		opts->call_graph = CALLCHAIN_NONE;
		pr_debug("callchain: disabled\n");
		return 0;
	}

	ret = record_parse_callchain(arg, opts);
747
	if (!ret)
J
Jiri Olsa 已提交
748
		callchain_debug(opts);
749 750 751 752

	return ret;
}

J
Jiri Olsa 已提交
753 754 755 756 757 758 759 760 761 762 763 764 765
int record_callchain_opt(const struct option *opt,
			 const char *arg __maybe_unused,
			 int unset __maybe_unused)
{
	struct perf_record_opts *opts = opt->value;

	if (opts->call_graph == CALLCHAIN_NONE)
		opts->call_graph = CALLCHAIN_FP;

	callchain_debug(opts);
	return 0;
}

766
static const char * const record_usage[] = {
767 768
	"perf record [<options>] [<command>]",
	"perf record [<options>] -- <command> [<options>]",
769 770 771
	NULL
};

772 773 774 775 776 777 778 779 780 781 782 783 784 785 786
/*
 * XXX Ideally would be local to cmd_record() and passed to a perf_record__new
 * because we need to have access to it in perf_record__exit, that is called
 * after cmd_record() exits, but since record_options need to be accessible to
 * builtin-script, leave it here.
 *
 * At least we don't ouch it in all the other functions here directly.
 *
 * Just say no to tons of global variables, sigh.
 */
static struct perf_record record = {
	.opts = {
		.mmap_pages	     = UINT_MAX,
		.user_freq	     = UINT_MAX,
		.user_interval	     = ULLONG_MAX,
787
		.freq		     = 4000,
N
Namhyung Kim 已提交
788 789
		.target		     = {
			.uses_mmap   = true,
790
			.default_per_cpu = true,
N
Namhyung Kim 已提交
791
		},
792 793
	},
};
794

J
Jiri Olsa 已提交
795
#define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace) recording: "
796

797
#ifdef HAVE_LIBUNWIND_SUPPORT
J
Jiri Olsa 已提交
798
const char record_callchain_help[] = CALLCHAIN_HELP "fp dwarf";
799
#else
J
Jiri Olsa 已提交
800
const char record_callchain_help[] = CALLCHAIN_HELP "fp";
801 802
#endif

803 804 805 806 807 808 809
/*
 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
 * with it and switch to use the library functions in perf_evlist that came
 * from builtin-record.c, i.e. use perf_record_opts,
 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
 * using pipes, etc.
 */
810
const struct option record_options[] = {
811
	OPT_CALLBACK('e', "event", &record.evlist, "event",
812
		     "event selector. use 'perf list' to list available events",
813
		     parse_events_option),
814
	OPT_CALLBACK(0, "filter", &record.evlist, "filter",
L
Li Zefan 已提交
815
		     "event filter", parse_filter),
816
	OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
817
		    "record events on existing process id"),
818
	OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
819
		    "record events on existing thread id"),
820
	OPT_INTEGER('r', "realtime", &record.realtime_prio,
821
		    "collect data with this RT SCHED_FIFO priority"),
822
	OPT_BOOLEAN('D', "no-delay", &record.opts.no_delay,
823
		    "collect data without buffering"),
824
	OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
825
		    "collect raw sample records from all opened counters"),
826
	OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
827
			    "system-wide collection from all CPUs"),
828
	OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
829
		    "list of cpus to monitor"),
830
	OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
831
	OPT_STRING('o', "output", &record.file.path, "file",
I
Ingo Molnar 已提交
832
		    "output file name"),
833 834 835
	OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
			&record.opts.no_inherit_set,
			"child tasks do not inherit counters"),
836
	OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
837 838 839
	OPT_CALLBACK('m', "mmap-pages", &record.opts.mmap_pages, "pages",
		     "number of mmap data pages",
		     perf_evlist__parse_mmap_pages),
840
	OPT_BOOLEAN(0, "group", &record.opts.group,
841
		    "put the counters into a counter group"),
J
Jiri Olsa 已提交
842 843 844 845 846 847
	OPT_CALLBACK_NOOPT('g', NULL, &record.opts,
			   NULL, "enables call-graph recording" ,
			   &record_callchain_opt),
	OPT_CALLBACK(0, "call-graph", &record.opts,
		     "mode[,dump_size]", record_callchain_help,
		     &record_parse_callchain_opt),
848
	OPT_INCR('v', "verbose", &verbose,
849
		    "be more verbose (show counter open errors, etc)"),
850
	OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
851
	OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
852
		    "per thread counts"),
853
	OPT_BOOLEAN('d', "data", &record.opts.sample_address,
854
		    "Sample addresses"),
855
	OPT_BOOLEAN('T', "timestamp", &record.opts.sample_time, "Sample timestamps"),
856
	OPT_BOOLEAN('P', "period", &record.opts.period, "Sample period"),
857
	OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
858
		    "don't sample"),
859
	OPT_BOOLEAN('N', "no-buildid-cache", &record.no_buildid_cache,
860
		    "do not update the buildid cache"),
861
	OPT_BOOLEAN('B', "no-buildid", &record.no_buildid,
862
		    "do not collect buildids in perf.data"),
863
	OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
S
Stephane Eranian 已提交
864 865
		     "monitor event in cgroup name only",
		     parse_cgroups),
866 867
	OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
		   "user to profile"),
868 869 870 871 872 873 874

	OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
		     "branch any", "sample any taken branches",
		     parse_branch_stack),

	OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
		     "branch filter mask", "branch stack filter modes",
875
		     parse_branch_stack),
876 877
	OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
		    "sample by weight (on special events only)"),
878 879
	OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
		    "sample transaction flags (special events only)"),
880 881
	OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
		    "use per-thread mmaps"),
882 883 884
	OPT_END()
};

885
int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
886
{
887
	int err = -ENOMEM;
888 889
	struct perf_evlist *evsel_list;
	struct perf_record *rec = &record;
890
	char errbuf[BUFSIZ];
891

892
	evsel_list = perf_evlist__new();
893 894 895
	if (evsel_list == NULL)
		return -ENOMEM;

896 897
	rec->evlist = evsel_list;

898
	argc = parse_options(argc, argv, record_options, record_usage,
899
			    PARSE_OPT_STOP_AT_NON_OPTION);
900
	if (!argc && target__none(&rec->opts.target))
901
		usage_with_options(record_usage, record_options);
902

903
	if (nr_cgroups && !rec->opts.target.system_wide) {
904 905
		ui__error("cgroup monitoring only available in"
			  " system-wide mode\n");
S
Stephane Eranian 已提交
906 907 908
		usage_with_options(record_usage, record_options);
	}

909
	symbol__init();
910

911
	if (symbol_conf.kptr_restrict)
912 913 914 915 916 917 918 919
		pr_warning(
"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
"check /proc/sys/kernel/kptr_restrict.\n\n"
"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
"file is not found in the buildid cache or in the vmlinux path.\n\n"
"Samples in kernel modules won't be resolved at all.\n\n"
"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
"even with a suitable vmlinux or kallsyms file.\n\n");
920

921
	if (rec->no_buildid_cache || rec->no_buildid)
922
		disable_buildid_cache();
923

924 925
	if (evsel_list->nr_entries == 0 &&
	    perf_evlist__add_default(evsel_list) < 0) {
926 927
		pr_err("Not enough memory for event selector list\n");
		goto out_symbol_exit;
928
	}
929

930 931 932
	if (rec->opts.target.tid && !rec->opts.no_inherit_set)
		rec->opts.no_inherit = true;

933
	err = target__validate(&rec->opts.target);
934
	if (err) {
935
		target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
936 937 938
		ui__warning("%s", errbuf);
	}

939
	err = target__parse_uid(&rec->opts.target);
940 941
	if (err) {
		int saved_errno = errno;
942

943
		target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
944
		ui__error("%s", errbuf);
945 946

		err = -saved_errno;
947
		goto out_symbol_exit;
948
	}
949

950
	err = -ENOMEM;
951
	if (perf_evlist__create_maps(evsel_list, &rec->opts.target) < 0)
952
		usage_with_options(record_usage, record_options);
953

954
	if (perf_record_opts__config(&rec->opts)) {
955
		err = -EINVAL;
956
		goto out_free_fd;
957 958
	}

959
	err = __cmd_record(&record, argc, argv);
960 961 962

	perf_evlist__munmap(evsel_list);
	perf_evlist__close(evsel_list);
963
out_free_fd:
964
	perf_evlist__delete_maps(evsel_list);
965 966
out_symbol_exit:
	symbol__exit();
967
	return err;
968
}