builtin-record.c 24.7 KB
Newer Older
I
Ingo Molnar 已提交
1
/*
2 3 4 5 6
 * builtin-record.c
 *
 * Builtin record command: Record the profile of a workload
 * (or a CPU, or a PID) into the perf.data output file - for
 * later analysis via perf report.
I
Ingo Molnar 已提交
7
 */
8
#include "builtin.h"
9 10 11

#include "perf.h"

12
#include "util/build-id.h"
13
#include "util/util.h"
14
#include "util/parse-options.h"
15
#include "util/parse-events.h"
16

17
#include "util/header.h"
18
#include "util/event.h"
19
#include "util/evlist.h"
20
#include "util/evsel.h"
21
#include "util/debug.h"
22
#include "util/session.h"
23
#include "util/tool.h"
24
#include "util/symbol.h"
25
#include "util/cpumap.h"
26
#include "util/thread_map.h"
27
#include "util/data.h"
28

29
#include <unistd.h>
30
#include <sched.h>
31
#include <sys/mman.h>
32

33
#ifndef HAVE_ON_EXIT_SUPPORT
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
#ifndef ATEXIT_MAX
#define ATEXIT_MAX 32
#endif
static int __on_exit_count = 0;
typedef void (*on_exit_func_t) (int, void *);
static on_exit_func_t __on_exit_funcs[ATEXIT_MAX];
static void *__on_exit_args[ATEXIT_MAX];
static int __exitcode = 0;
static void __handle_on_exit_funcs(void);
static int on_exit(on_exit_func_t function, void *arg);
#define exit(x) (exit)(__exitcode = (x))

static int on_exit(on_exit_func_t function, void *arg)
{
	if (__on_exit_count == ATEXIT_MAX)
		return -ENOMEM;
	else if (__on_exit_count == 0)
		atexit(__handle_on_exit_funcs);
	__on_exit_funcs[__on_exit_count] = function;
	__on_exit_args[__on_exit_count++] = arg;
	return 0;
}

static void __handle_on_exit_funcs(void)
{
	int i;
	for (i = 0; i < __on_exit_count; i++)
		__on_exit_funcs[i] (__exitcode, __on_exit_args[i]);
}
#endif

65
struct perf_record {
66
	struct perf_tool	tool;
67 68
	struct perf_record_opts	opts;
	u64			bytes_written;
69
	struct perf_data_file	file;
70 71 72 73 74 75 76
	struct perf_evlist	*evlist;
	struct perf_session	*session;
	const char		*progname;
	int			realtime_prio;
	bool			no_buildid;
	bool			no_buildid_cache;
	long			samples;
77
};
78

79
static int do_write_output(struct perf_record *rec, void *buf, size_t size)
80
{
81 82
	struct perf_data_file *file = &rec->file;

83
	while (size) {
84
		ssize_t ret = write(file->fd, buf, size);
85

86
		if (ret < 0) {
87
			pr_err("failed to write perf data, error: %m\n");
88 89
			return -1;
		}
90 91 92 93

		size -= ret;
		buf += ret;

94
		rec->bytes_written += ret;
95
	}
96 97

	return 0;
98 99
}

100 101 102 103 104
static int write_output(struct perf_record *rec, void *buf, size_t size)
{
	return do_write_output(rec, buf, size);
}

105
static int process_synthesized_event(struct perf_tool *tool,
106
				     union perf_event *event,
107 108
				     struct perf_sample *sample __maybe_unused,
				     struct machine *machine __maybe_unused)
109
{
110
	struct perf_record *rec = container_of(tool, struct perf_record, tool);
111 112 113
	if (write_output(rec, event, event->header.size) < 0)
		return -1;

114 115 116
	return 0;
}

117
static int perf_record__mmap_read(struct perf_record *rec,
118
				   struct perf_mmap *md)
119
{
120
	unsigned int head = perf_mmap__read_head(md);
121
	unsigned int old = md->prev;
J
Jiri Olsa 已提交
122
	unsigned char *data = md->base + page_size;
123 124
	unsigned long size;
	void *buf;
125
	int rc = 0;
126

127
	if (old == head)
128
		return 0;
129

130
	rec->samples++;
131 132 133 134 135 136 137

	size = head - old;

	if ((old & md->mask) + size != (head & md->mask)) {
		buf = &data[old & md->mask];
		size = md->mask + 1 - (old & md->mask);
		old += size;
138

139 140 141 142
		if (write_output(rec, buf, size) < 0) {
			rc = -1;
			goto out;
		}
143 144 145 146 147
	}

	buf = &data[old & md->mask];
	size = head - old;
	old += size;
148

149 150 151 152
	if (write_output(rec, buf, size) < 0) {
		rc = -1;
		goto out;
	}
153 154

	md->prev = old;
155
	perf_mmap__write_tail(md, old);
156 157 158

out:
	return rc;
159 160 161
}

static volatile int done = 0;
162
static volatile int signr = -1;
163
static volatile int child_finished = 0;
164

165
static void sig_handler(int sig)
166
{
167 168 169
	if (sig == SIGCHLD)
		child_finished = 1;

170
	done = 1;
171 172 173
	signr = sig;
}

174
static void perf_record__sig_exit(int exit_status __maybe_unused, void *arg)
175
{
176
	struct perf_record *rec = arg;
177 178
	int status;

179
	if (rec->evlist->workload.pid > 0) {
180
		if (!child_finished)
181
			kill(rec->evlist->workload.pid, SIGTERM);
182 183 184

		wait(&status);
		if (WIFSIGNALED(status))
185
			psignal(WTERMSIG(status), rec->progname);
186
	}
187

188
	if (signr == -1 || signr == SIGUSR1)
189 190 191
		return;

	signal(signr, SIG_DFL);
192 193
}

194
static int perf_record__open(struct perf_record *rec)
195
{
196
	char msg[512];
197
	struct perf_evsel *pos;
198 199 200
	struct perf_evlist *evlist = rec->evlist;
	struct perf_session *session = rec->session;
	struct perf_record_opts *opts = &rec->opts;
201
	int rc = 0;
202

203
	perf_evlist__config(evlist, opts);
204

205 206
	list_for_each_entry(pos, &evlist->entries, node) {
try_again:
207
		if (perf_evsel__open(pos, evlist->cpus, evlist->threads) < 0) {
208
			if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
209
				if (verbose)
210
					ui__warning("%s\n", msg);
211 212
				goto try_again;
			}
213

214 215 216 217
			rc = -errno;
			perf_evsel__open_strerror(pos, &opts->target,
						  errno, msg, sizeof(msg));
			ui__error("%s\n", msg);
218
			goto out;
L
Li Zefan 已提交
219 220
		}
	}
221

222
	if (perf_evlist__apply_filters(evlist)) {
223 224
		error("failed to set filter with %d (%s)\n", errno,
			strerror(errno));
225 226
		rc = -1;
		goto out;
227 228
	}

229
	if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) {
230 231 232 233 234 235 236 237 238 239 240 241
		if (errno == EPERM) {
			pr_err("Permission error mapping pages.\n"
			       "Consider increasing "
			       "/proc/sys/kernel/perf_event_mlock_kb,\n"
			       "or try again with a smaller value of -m/--mmap_pages.\n"
			       "(current value: %d)\n", opts->mmap_pages);
			rc = -errno;
		} else {
			pr_err("failed to mmap with %d (%s)\n", errno, strerror(errno));
			rc = -errno;
		}
		goto out;
242
	}
243

244
	session->evlist = evlist;
245
	perf_session__set_id_hdr_size(session);
246 247
out:
	return rc;
248 249
}

250
static int process_buildids(struct perf_record *rec)
251
{
252 253
	struct perf_data_file *file  = &rec->file;
	struct perf_session *session = rec->session;
254
	u64 start = session->header.data_offset;
255

256
	u64 size = lseek(file->fd, 0, SEEK_CUR);
257 258 259
	if (size == 0)
		return 0;

260 261
	return __perf_session__process_events(session, start,
					      size - start,
262 263 264
					      size, &build_id__mark_dso_hit_ops);
}

265
static void perf_record__exit(int status, void *arg)
266
{
267
	struct perf_record *rec = arg;
268
	struct perf_data_file *file = &rec->file;
269

270 271 272
	if (status != 0)
		return;

273
	if (!file->is_pipe) {
274 275 276 277 278
		rec->session->header.data_size += rec->bytes_written;

		if (!rec->no_buildid)
			process_buildids(rec);
		perf_session__write_header(rec->session, rec->evlist,
279
					   file->fd, true);
280 281
		perf_session__delete(rec->session);
		perf_evlist__delete(rec->evlist);
282
		symbol__exit();
283
	}
284 285
}

286
static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
287 288
{
	int err;
289
	struct perf_tool *tool = data;
290 291 292 293 294 295 296 297
	/*
	 *As for guest kernel when processing subcommand record&report,
	 *we arrange module mmap prior to guest kernel mmap and trigger
	 *a preload dso because default guest module symbols are loaded
	 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
	 *method is used to avoid symbol missing when the first addr is
	 *in module instead of in guest kernel.
	 */
298
	err = perf_event__synthesize_modules(tool, process_synthesized_event,
299
					     machine);
300 301
	if (err < 0)
		pr_err("Couldn't record guest kernel [%d]'s reference"
302
		       " relocation symbol.\n", machine->pid);
303 304 305 306 307

	/*
	 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
	 * have no _text sometimes.
	 */
308
	err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
309
						 machine, "_text");
310
	if (err < 0)
311
		err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
312
							 machine, "_stext");
313 314
	if (err < 0)
		pr_err("Couldn't record guest kernel [%d]'s reference"
315
		       " relocation symbol.\n", machine->pid);
316 317
}

318 319 320 321 322
static struct perf_event_header finished_round_event = {
	.size = sizeof(struct perf_event_header),
	.type = PERF_RECORD_FINISHED_ROUND,
};

323
static int perf_record__mmap_read_all(struct perf_record *rec)
324
{
325
	int i;
326
	int rc = 0;
327

328
	for (i = 0; i < rec->evlist->nr_mmaps; i++) {
329 330 331 332 333 334
		if (rec->evlist->mmap[i].base) {
			if (perf_record__mmap_read(rec, &rec->evlist->mmap[i]) != 0) {
				rc = -1;
				goto out;
			}
		}
335 336
	}

337
	if (perf_header__has_feat(&rec->session->header, HEADER_TRACING_DATA))
338 339 340 341 342
		rc = write_output(rec, &finished_round_event,
				  sizeof(finished_round_event));

out:
	return rc;
343 344
}

345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363
static void perf_record__init_features(struct perf_record *rec)
{
	struct perf_evlist *evsel_list = rec->evlist;
	struct perf_session *session = rec->session;
	int feat;

	for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
		perf_header__set_feat(&session->header, feat);

	if (rec->no_buildid)
		perf_header__clear_feat(&session->header, HEADER_BUILD_ID);

	if (!have_tracepoints(&evsel_list->entries))
		perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);

	if (!rec->opts.branch_stack)
		perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
}

364
static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
365
{
366
	int err;
367
	unsigned long waking = 0;
368
	const bool forks = argc > 0;
369
	struct machine *machine;
370
	struct perf_tool *tool = &rec->tool;
371 372
	struct perf_record_opts *opts = &rec->opts;
	struct perf_evlist *evsel_list = rec->evlist;
373
	struct perf_data_file *file = &rec->file;
374
	struct perf_session *session;
375
	bool disabled = false;
376

377
	rec->progname = argv[0];
378

379
	on_exit(perf_record__sig_exit, rec);
380 381
	signal(SIGCHLD, sig_handler);
	signal(SIGINT, sig_handler);
382
	signal(SIGUSR1, sig_handler);
383
	signal(SIGTERM, sig_handler);
384

385
	session = perf_session__new(file, false, NULL);
386
	if (session == NULL) {
387 388 389 390
		pr_err("Not enough memory for reading perf file header\n");
		return -1;
	}

391 392
	rec->session = session;

393
	perf_record__init_features(rec);
394

395
	if (forks) {
396
		err = perf_evlist__prepare_workload(evsel_list, &opts->target,
397
						    argv, file->is_pipe,
398
						    true);
399 400 401
		if (err < 0) {
			pr_err("Couldn't run the workload!\n");
			goto out_delete_session;
402 403 404
		}
	}

405 406 407 408
	if (perf_record__open(rec) != 0) {
		err = -1;
		goto out_delete_session;
	}
409

410 411 412
	if (!evsel_list->nr_groups)
		perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);

413
	/*
414
	 * perf_session__delete(session) will be called at perf_record__exit()
415
	 */
416
	on_exit(perf_record__exit, rec);
417

418 419
	if (file->is_pipe) {
		err = perf_header__write_pipe(file->fd);
420
		if (err < 0)
421
			goto out_delete_session;
422
	} else {
423
		err = perf_session__write_header(session, evsel_list,
424
						 file->fd, false);
425
		if (err < 0)
426
			goto out_delete_session;
427 428
	}

429
	if (!rec->no_buildid
430
	    && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
431
		pr_err("Couldn't generate buildids. "
432
		       "Use --no-buildid to profile anyway.\n");
433 434
		err = -1;
		goto out_delete_session;
435 436
	}

437
	machine = &session->machines.host;
438

439
	if (file->is_pipe) {
440
		err = perf_event__synthesize_attrs(tool, session,
441
						   process_synthesized_event);
442 443
		if (err < 0) {
			pr_err("Couldn't synthesize attrs.\n");
444
			goto out_delete_session;
445
		}
446

447
		if (have_tracepoints(&evsel_list->entries)) {
448 449 450 451 452 453 454 455
			/*
			 * FIXME err <= 0 here actually means that
			 * there were no tracepoints so its not really
			 * an error, just that we don't need to
			 * synthesize anything.  We really have to
			 * return this more properly and also
			 * propagate errors that now are calling die()
			 */
456
			err = perf_event__synthesize_tracing_data(tool, file->fd, evsel_list,
457
								  process_synthesized_event);
458 459
			if (err <= 0) {
				pr_err("Couldn't record tracing data.\n");
460
				goto out_delete_session;
461
			}
462
			rec->bytes_written += err;
463
		}
464 465
	}

466
	err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
467
						 machine, "_text");
468
	if (err < 0)
469
		err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
470
							 machine, "_stext");
471 472 473 474
	if (err < 0)
		pr_err("Couldn't record kernel reference relocation symbol\n"
		       "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
		       "Check /proc/kallsyms permission or run as root.\n");
475

476
	err = perf_event__synthesize_modules(tool, process_synthesized_event,
477
					     machine);
478 479 480 481 482
	if (err < 0)
		pr_err("Couldn't record kernel module information.\n"
		       "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
		       "Check /proc/modules permission or run as root.\n");

483
	if (perf_guest) {
484 485
		machines__process_guests(&session->machines,
					 perf_event__synthesize_guest_os, tool);
486
	}
487

488 489
	err = __machine__synthesize_threads(machine, tool, &opts->target, evsel_list->threads,
					    process_synthesized_event, opts->sample_address);
490 491 492
	if (err != 0)
		goto out_delete_session;

493
	if (rec->realtime_prio) {
494 495
		struct sched_param param;

496
		param.sched_priority = rec->realtime_prio;
497
		if (sched_setscheduler(0, SCHED_FIFO, &param)) {
498
			pr_err("Could not set realtime priority.\n");
499 500
			err = -1;
			goto out_delete_session;
501 502 503
		}
	}

504 505 506 507 508
	/*
	 * When perf is starting the traced process, all the events
	 * (apart from group members) have enable_on_exec=1 set,
	 * so don't spoil it by prematurely enabling them.
	 */
509
	if (!target__none(&opts->target))
510
		perf_evlist__enable(evsel_list);
511

512 513 514
	/*
	 * Let the child rip
	 */
515
	if (forks)
516
		perf_evlist__start_workload(evsel_list);
517

518
	for (;;) {
519
		int hits = rec->samples;
520

521 522 523 524
		if (perf_record__mmap_read_all(rec) < 0) {
			err = -1;
			goto out_delete_session;
		}
525

526
		if (hits == rec->samples) {
527 528
			if (done)
				break;
529
			err = poll(evsel_list->pollfd, evsel_list->nr_fds, -1);
530 531 532
			waking++;
		}

533 534 535 536 537
		/*
		 * When perf is starting the traced process, at the end events
		 * die with the process and we wait for that. Thus no need to
		 * disable events in this case.
		 */
538
		if (done && !disabled && !target__none(&opts->target)) {
539
			perf_evlist__disable(evsel_list);
540 541
			disabled = true;
		}
542 543
	}

544
	if (quiet || signr == SIGUSR1)
545 546
		return 0;

547 548
	fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);

549 550 551 552
	/*
	 * Approximate RIP event size: 24 bytes.
	 */
	fprintf(stderr,
553
		"[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n",
554
		(double)rec->bytes_written / 1024.0 / 1024.0,
555
		file->path,
556
		rec->bytes_written / 24);
557

558
	return 0;
559 560 561 562

out_delete_session:
	perf_session__delete(session);
	return err;
563
}
564

565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582
#define BRANCH_OPT(n, m) \
	{ .name = n, .mode = (m) }

#define BRANCH_END { .name = NULL }

struct branch_mode {
	const char *name;
	int mode;
};

static const struct branch_mode branch_modes[] = {
	BRANCH_OPT("u", PERF_SAMPLE_BRANCH_USER),
	BRANCH_OPT("k", PERF_SAMPLE_BRANCH_KERNEL),
	BRANCH_OPT("hv", PERF_SAMPLE_BRANCH_HV),
	BRANCH_OPT("any", PERF_SAMPLE_BRANCH_ANY),
	BRANCH_OPT("any_call", PERF_SAMPLE_BRANCH_ANY_CALL),
	BRANCH_OPT("any_ret", PERF_SAMPLE_BRANCH_ANY_RETURN),
	BRANCH_OPT("ind_call", PERF_SAMPLE_BRANCH_IND_CALL),
583 584 585
	BRANCH_OPT("abort_tx", PERF_SAMPLE_BRANCH_ABORT_TX),
	BRANCH_OPT("in_tx", PERF_SAMPLE_BRANCH_IN_TX),
	BRANCH_OPT("no_tx", PERF_SAMPLE_BRANCH_NO_TX),
586 587 588 589
	BRANCH_END
};

static int
590
parse_branch_stack(const struct option *opt, const char *str, int unset)
591 592 593 594 595 596 597 598
{
#define ONLY_PLM \
	(PERF_SAMPLE_BRANCH_USER	|\
	 PERF_SAMPLE_BRANCH_KERNEL	|\
	 PERF_SAMPLE_BRANCH_HV)

	uint64_t *mode = (uint64_t *)opt->value;
	const struct branch_mode *br;
599
	char *s, *os = NULL, *p;
600 601
	int ret = -1;

602 603
	if (unset)
		return 0;
604

605 606 607 608
	/*
	 * cannot set it twice, -b + --branch-filter for instance
	 */
	if (*mode)
609 610
		return -1;

611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631
	/* str may be NULL in case no arg is passed to -b */
	if (str) {
		/* because str is read-only */
		s = os = strdup(str);
		if (!s)
			return -1;

		for (;;) {
			p = strchr(s, ',');
			if (p)
				*p = '\0';

			for (br = branch_modes; br->name; br++) {
				if (!strcasecmp(s, br->name))
					break;
			}
			if (!br->name) {
				ui__warning("unknown branch filter %s,"
					    " check man page\n", s);
				goto error;
			}
632

633
			*mode |= br->mode;
634

635 636
			if (!p)
				break;
637

638 639
			s = p + 1;
		}
640 641 642
	}
	ret = 0;

643
	/* default to any branch */
644
	if ((*mode & ~ONLY_PLM) == 0) {
645
		*mode = PERF_SAMPLE_BRANCH_ANY;
646 647 648 649 650 651
	}
error:
	free(os);
	return ret;
}

652
#ifdef HAVE_LIBUNWIND_SUPPORT
653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677
static int get_stack_size(char *str, unsigned long *_size)
{
	char *endptr;
	unsigned long size;
	unsigned long max_size = round_down(USHRT_MAX, sizeof(u64));

	size = strtoul(str, &endptr, 0);

	do {
		if (*endptr)
			break;

		size = round_up(size, sizeof(u64));
		if (!size || size > max_size)
			break;

		*_size = size;
		return 0;

	} while (0);

	pr_err("callchain: Incorrect stack dump size (max %ld): %s\n",
	       max_size, str);
	return -1;
}
678
#endif /* HAVE_LIBUNWIND_SUPPORT */
679

J
Jiri Olsa 已提交
680
int record_parse_callchain(const char *arg, struct perf_record_opts *opts)
681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699
{
	char *tok, *name, *saveptr = NULL;
	char *buf;
	int ret = -1;

	/* We need buffer that we know we can write to. */
	buf = malloc(strlen(arg) + 1);
	if (!buf)
		return -ENOMEM;

	strcpy(buf, arg);

	tok = strtok_r((char *)buf, ",", &saveptr);
	name = tok ? : (char *)buf;

	do {
		/* Framepointer style */
		if (!strncmp(name, "fp", sizeof("fp"))) {
			if (!strtok_r(NULL, ",", &saveptr)) {
700
				opts->call_graph = CALLCHAIN_FP;
701 702 703 704 705 706
				ret = 0;
			} else
				pr_err("callchain: No more arguments "
				       "needed for -g fp\n");
			break;

707
#ifdef HAVE_LIBUNWIND_SUPPORT
708 709
		/* Dwarf style */
		} else if (!strncmp(name, "dwarf", sizeof("dwarf"))) {
710 711
			const unsigned long default_stack_dump_size = 8192;

712
			ret = 0;
713 714
			opts->call_graph = CALLCHAIN_DWARF;
			opts->stack_dump_size = default_stack_dump_size;
715 716 717 718 719 720

			tok = strtok_r(NULL, ",", &saveptr);
			if (tok) {
				unsigned long size = 0;

				ret = get_stack_size(tok, &size);
721
				opts->stack_dump_size = size;
722
			}
723
#endif /* HAVE_LIBUNWIND_SUPPORT */
724
		} else {
J
Jiri Olsa 已提交
725
			pr_err("callchain: Unknown --call-graph option "
726 727 728 729 730 731 732
			       "value: %s\n", arg);
			break;
		}

	} while (0);

	free(buf);
J
Jiri Olsa 已提交
733 734 735 736 737 738
	return ret;
}

static void callchain_debug(struct perf_record_opts *opts)
{
	pr_debug("callchain: type %d\n", opts->call_graph);
739

J
Jiri Olsa 已提交
740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759
	if (opts->call_graph == CALLCHAIN_DWARF)
		pr_debug("callchain: stack dump size %d\n",
			 opts->stack_dump_size);
}

int record_parse_callchain_opt(const struct option *opt,
			       const char *arg,
			       int unset)
{
	struct perf_record_opts *opts = opt->value;
	int ret;

	/* --no-call-graph */
	if (unset) {
		opts->call_graph = CALLCHAIN_NONE;
		pr_debug("callchain: disabled\n");
		return 0;
	}

	ret = record_parse_callchain(arg, opts);
760
	if (!ret)
J
Jiri Olsa 已提交
761
		callchain_debug(opts);
762 763 764 765

	return ret;
}

J
Jiri Olsa 已提交
766 767 768 769 770 771 772 773 774 775 776 777 778
int record_callchain_opt(const struct option *opt,
			 const char *arg __maybe_unused,
			 int unset __maybe_unused)
{
	struct perf_record_opts *opts = opt->value;

	if (opts->call_graph == CALLCHAIN_NONE)
		opts->call_graph = CALLCHAIN_FP;

	callchain_debug(opts);
	return 0;
}

779
static const char * const record_usage[] = {
780 781
	"perf record [<options>] [<command>]",
	"perf record [<options>] -- <command> [<options>]",
782 783 784
	NULL
};

785 786 787 788 789 790 791 792 793 794 795 796 797 798 799
/*
 * XXX Ideally would be local to cmd_record() and passed to a perf_record__new
 * because we need to have access to it in perf_record__exit, that is called
 * after cmd_record() exits, but since record_options need to be accessible to
 * builtin-script, leave it here.
 *
 * At least we don't ouch it in all the other functions here directly.
 *
 * Just say no to tons of global variables, sigh.
 */
static struct perf_record record = {
	.opts = {
		.mmap_pages	     = UINT_MAX,
		.user_freq	     = UINT_MAX,
		.user_interval	     = ULLONG_MAX,
800
		.freq		     = 4000,
N
Namhyung Kim 已提交
801 802 803
		.target		     = {
			.uses_mmap   = true,
		},
804 805
	},
};
806

J
Jiri Olsa 已提交
807
#define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace) recording: "
808

809
#ifdef HAVE_LIBUNWIND_SUPPORT
J
Jiri Olsa 已提交
810
const char record_callchain_help[] = CALLCHAIN_HELP "fp dwarf";
811
#else
J
Jiri Olsa 已提交
812
const char record_callchain_help[] = CALLCHAIN_HELP "fp";
813 814
#endif

815 816 817 818 819 820 821
/*
 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
 * with it and switch to use the library functions in perf_evlist that came
 * from builtin-record.c, i.e. use perf_record_opts,
 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
 * using pipes, etc.
 */
822
const struct option record_options[] = {
823
	OPT_CALLBACK('e', "event", &record.evlist, "event",
824
		     "event selector. use 'perf list' to list available events",
825
		     parse_events_option),
826
	OPT_CALLBACK(0, "filter", &record.evlist, "filter",
L
Li Zefan 已提交
827
		     "event filter", parse_filter),
828
	OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
829
		    "record events on existing process id"),
830
	OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
831
		    "record events on existing thread id"),
832
	OPT_INTEGER('r', "realtime", &record.realtime_prio,
833
		    "collect data with this RT SCHED_FIFO priority"),
834
	OPT_BOOLEAN('D', "no-delay", &record.opts.no_delay,
835
		    "collect data without buffering"),
836
	OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
837
		    "collect raw sample records from all opened counters"),
838
	OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
839
			    "system-wide collection from all CPUs"),
840
	OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
841
		    "list of cpus to monitor"),
842
	OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
843
	OPT_STRING('o', "output", &record.file.path, "file",
I
Ingo Molnar 已提交
844
		    "output file name"),
845
	OPT_BOOLEAN('i', "no-inherit", &record.opts.no_inherit,
846
		    "child tasks do not inherit counters"),
847
	OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
848 849 850
	OPT_CALLBACK('m', "mmap-pages", &record.opts.mmap_pages, "pages",
		     "number of mmap data pages",
		     perf_evlist__parse_mmap_pages),
851
	OPT_BOOLEAN(0, "group", &record.opts.group,
852
		    "put the counters into a counter group"),
J
Jiri Olsa 已提交
853 854 855 856 857 858
	OPT_CALLBACK_NOOPT('g', NULL, &record.opts,
			   NULL, "enables call-graph recording" ,
			   &record_callchain_opt),
	OPT_CALLBACK(0, "call-graph", &record.opts,
		     "mode[,dump_size]", record_callchain_help,
		     &record_parse_callchain_opt),
859
	OPT_INCR('v', "verbose", &verbose,
860
		    "be more verbose (show counter open errors, etc)"),
861
	OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
862
	OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
863
		    "per thread counts"),
864
	OPT_BOOLEAN('d', "data", &record.opts.sample_address,
865
		    "Sample addresses"),
866
	OPT_BOOLEAN('T', "timestamp", &record.opts.sample_time, "Sample timestamps"),
867
	OPT_BOOLEAN('P', "period", &record.opts.period, "Sample period"),
868
	OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
869
		    "don't sample"),
870
	OPT_BOOLEAN('N', "no-buildid-cache", &record.no_buildid_cache,
871
		    "do not update the buildid cache"),
872
	OPT_BOOLEAN('B', "no-buildid", &record.no_buildid,
873
		    "do not collect buildids in perf.data"),
874
	OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
S
Stephane Eranian 已提交
875 876
		     "monitor event in cgroup name only",
		     parse_cgroups),
877 878
	OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
		   "user to profile"),
879 880 881 882 883 884 885

	OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
		     "branch any", "sample any taken branches",
		     parse_branch_stack),

	OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
		     "branch filter mask", "branch stack filter modes",
886
		     parse_branch_stack),
887 888
	OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
		    "sample by weight (on special events only)"),
889 890
	OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
		    "sample transaction flags (special events only)"),
891 892
	OPT_BOOLEAN(0, "force-per-cpu", &record.opts.target.force_per_cpu,
		    "force the use of per-cpu mmaps"),
893 894 895
	OPT_END()
};

896
int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
897
{
898
	int err = -ENOMEM;
899 900
	struct perf_evlist *evsel_list;
	struct perf_record *rec = &record;
901
	char errbuf[BUFSIZ];
902

903
	evsel_list = perf_evlist__new();
904 905 906
	if (evsel_list == NULL)
		return -ENOMEM;

907 908
	rec->evlist = evsel_list;

909
	argc = parse_options(argc, argv, record_options, record_usage,
910
			    PARSE_OPT_STOP_AT_NON_OPTION);
911
	if (!argc && target__none(&rec->opts.target))
912
		usage_with_options(record_usage, record_options);
913

914
	if (nr_cgroups && !rec->opts.target.system_wide) {
915 916
		ui__error("cgroup monitoring only available in"
			  " system-wide mode\n");
S
Stephane Eranian 已提交
917 918 919
		usage_with_options(record_usage, record_options);
	}

920
	symbol__init();
921

922
	if (symbol_conf.kptr_restrict)
923 924 925 926 927 928 929 930
		pr_warning(
"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
"check /proc/sys/kernel/kptr_restrict.\n\n"
"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
"file is not found in the buildid cache or in the vmlinux path.\n\n"
"Samples in kernel modules won't be resolved at all.\n\n"
"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
"even with a suitable vmlinux or kallsyms file.\n\n");
931

932
	if (rec->no_buildid_cache || rec->no_buildid)
933
		disable_buildid_cache();
934

935 936
	if (evsel_list->nr_entries == 0 &&
	    perf_evlist__add_default(evsel_list) < 0) {
937 938
		pr_err("Not enough memory for event selector list\n");
		goto out_symbol_exit;
939
	}
940

941
	err = target__validate(&rec->opts.target);
942
	if (err) {
943
		target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
944 945 946
		ui__warning("%s", errbuf);
	}

947
	err = target__parse_uid(&rec->opts.target);
948 949
	if (err) {
		int saved_errno = errno;
950

951
		target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
952
		ui__error("%s", errbuf);
953 954

		err = -saved_errno;
955
		goto out_symbol_exit;
956
	}
957

958
	err = -ENOMEM;
959
	if (perf_evlist__create_maps(evsel_list, &rec->opts.target) < 0)
960
		usage_with_options(record_usage, record_options);
961

962
	if (perf_record_opts__config(&rec->opts)) {
963
		err = -EINVAL;
964
		goto out_free_fd;
965 966
	}

967
	err = __cmd_record(&record, argc, argv);
968 969 970

	perf_evlist__munmap(evsel_list);
	perf_evlist__close(evsel_list);
971
out_free_fd:
972
	perf_evlist__delete_maps(evsel_list);
973 974
out_symbol_exit:
	symbol__exit();
975
	return err;
976
}