builtin-record.c 25.6 KB
Newer Older
I
Ingo Molnar 已提交
1
/*
2 3 4 5 6
 * builtin-record.c
 *
 * Builtin record command: Record the profile of a workload
 * (or a CPU, or a PID) into the perf.data output file - for
 * later analysis via perf report.
I
Ingo Molnar 已提交
7
 */
8
#include "builtin.h"
9 10 11

#include "perf.h"

12
#include "util/build-id.h"
13
#include "util/util.h"
14
#include "util/parse-options.h"
15
#include "util/parse-events.h"
16

17
#include "util/header.h"
18
#include "util/event.h"
19
#include "util/evlist.h"
20
#include "util/evsel.h"
21
#include "util/debug.h"
22
#include "util/session.h"
23
#include "util/tool.h"
24
#include "util/symbol.h"
25
#include "util/cpumap.h"
26
#include "util/thread_map.h"
27

28
#include <unistd.h>
29
#include <sched.h>
30
#include <sys/mman.h>
31

32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
#ifndef HAVE_ON_EXIT
#ifndef ATEXIT_MAX
#define ATEXIT_MAX 32
#endif
static int __on_exit_count = 0;
typedef void (*on_exit_func_t) (int, void *);
static on_exit_func_t __on_exit_funcs[ATEXIT_MAX];
static void *__on_exit_args[ATEXIT_MAX];
static int __exitcode = 0;
static void __handle_on_exit_funcs(void);
static int on_exit(on_exit_func_t function, void *arg);
#define exit(x) (exit)(__exitcode = (x))

static int on_exit(on_exit_func_t function, void *arg)
{
	if (__on_exit_count == ATEXIT_MAX)
		return -ENOMEM;
	else if (__on_exit_count == 0)
		atexit(__handle_on_exit_funcs);
	__on_exit_funcs[__on_exit_count] = function;
	__on_exit_args[__on_exit_count++] = arg;
	return 0;
}

static void __handle_on_exit_funcs(void)
{
	int i;
	for (i = 0; i < __on_exit_count; i++)
		__on_exit_funcs[i] (__exitcode, __on_exit_args[i]);
}
#endif

64
struct perf_record {
65
	struct perf_tool	tool;
66 67 68 69 70 71 72 73 74 75 76 77 78
	struct perf_record_opts	opts;
	u64			bytes_written;
	const char		*output_name;
	struct perf_evlist	*evlist;
	struct perf_session	*session;
	const char		*progname;
	int			output;
	unsigned int		page_size;
	int			realtime_prio;
	bool			no_buildid;
	bool			no_buildid_cache;
	long			samples;
	off_t			post_processing_offset;
79
};
80

81
static void advance_output(struct perf_record *rec, size_t size)
82
{
83
	rec->bytes_written += size;
84 85
}

86
static int write_output(struct perf_record *rec, void *buf, size_t size)
87 88
{
	while (size) {
89
		int ret = write(rec->output, buf, size);
90

91 92 93 94
		if (ret < 0) {
			pr_err("failed to write\n");
			return -1;
		}
95 96 97 98

		size -= ret;
		buf += ret;

99
		rec->bytes_written += ret;
100
	}
101 102

	return 0;
103 104
}

105
static int process_synthesized_event(struct perf_tool *tool,
106
				     union perf_event *event,
107 108
				     struct perf_sample *sample __maybe_unused,
				     struct machine *machine __maybe_unused)
109
{
110
	struct perf_record *rec = container_of(tool, struct perf_record, tool);
111 112 113
	if (write_output(rec, event, event->header.size) < 0)
		return -1;

114 115 116
	return 0;
}

117
static int perf_record__mmap_read(struct perf_record *rec,
118
				   struct perf_mmap *md)
119
{
120
	unsigned int head = perf_mmap__read_head(md);
121
	unsigned int old = md->prev;
122
	unsigned char *data = md->base + rec->page_size;
123 124
	unsigned long size;
	void *buf;
125
	int rc = 0;
126

127
	if (old == head)
128
		return 0;
129

130
	rec->samples++;
131 132 133 134 135 136 137

	size = head - old;

	if ((old & md->mask) + size != (head & md->mask)) {
		buf = &data[old & md->mask];
		size = md->mask + 1 - (old & md->mask);
		old += size;
138

139 140 141 142
		if (write_output(rec, buf, size) < 0) {
			rc = -1;
			goto out;
		}
143 144 145 146 147
	}

	buf = &data[old & md->mask];
	size = head - old;
	old += size;
148

149 150 151 152
	if (write_output(rec, buf, size) < 0) {
		rc = -1;
		goto out;
	}
153 154

	md->prev = old;
155
	perf_mmap__write_tail(md, old);
156 157 158

out:
	return rc;
159 160 161
}

static volatile int done = 0;
162
static volatile int signr = -1;
163
static volatile int child_finished = 0;
164

165
static void sig_handler(int sig)
166
{
167 168 169
	if (sig == SIGCHLD)
		child_finished = 1;

170
	done = 1;
171 172 173
	signr = sig;
}

174
static void perf_record__sig_exit(int exit_status __maybe_unused, void *arg)
175
{
176
	struct perf_record *rec = arg;
177 178
	int status;

179
	if (rec->evlist->workload.pid > 0) {
180
		if (!child_finished)
181
			kill(rec->evlist->workload.pid, SIGTERM);
182 183 184

		wait(&status);
		if (WIFSIGNALED(status))
185
			psignal(WTERMSIG(status), rec->progname);
186
	}
187

188
	if (signr == -1 || signr == SIGUSR1)
189 190 191
		return;

	signal(signr, SIG_DFL);
192 193
}

194
static int perf_record__open(struct perf_record *rec)
195
{
196
	char msg[512];
197
	struct perf_evsel *pos;
198 199 200
	struct perf_evlist *evlist = rec->evlist;
	struct perf_session *session = rec->session;
	struct perf_record_opts *opts = &rec->opts;
201
	int rc = 0;
202

203
	perf_evlist__config(evlist, opts);
204

205 206
	list_for_each_entry(pos, &evlist->entries, node) {
try_again:
207
		if (perf_evsel__open(pos, evlist->cpus, evlist->threads) < 0) {
208
			if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
209
				if (verbose)
210
					ui__warning("%s\n", msg);
211 212
				goto try_again;
			}
213

214 215 216 217
			rc = -errno;
			perf_evsel__open_strerror(pos, &opts->target,
						  errno, msg, sizeof(msg));
			ui__error("%s\n", msg);
218
			goto out;
L
Li Zefan 已提交
219 220
		}
	}
221

222
	if (perf_evlist__apply_filters(evlist)) {
223 224
		error("failed to set filter with %d (%s)\n", errno,
			strerror(errno));
225 226
		rc = -1;
		goto out;
227 228
	}

229
	if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) {
230 231 232 233 234 235 236
		if (errno == EPERM) {
			pr_err("Permission error mapping pages.\n"
			       "Consider increasing "
			       "/proc/sys/kernel/perf_event_mlock_kb,\n"
			       "or try again with a smaller value of -m/--mmap_pages.\n"
			       "(current value: %d)\n", opts->mmap_pages);
			rc = -errno;
237 238
		} else if (!is_power_of_2(opts->mmap_pages) &&
			   (opts->mmap_pages != UINT_MAX)) {
239 240 241 242 243 244 245
			pr_err("--mmap_pages/-m value must be a power of two.");
			rc = -EINVAL;
		} else {
			pr_err("failed to mmap with %d (%s)\n", errno, strerror(errno));
			rc = -errno;
		}
		goto out;
246
	}
247

248
	session->evlist = evlist;
249
	perf_session__set_id_hdr_size(session);
250 251
out:
	return rc;
252 253
}

254
static int process_buildids(struct perf_record *rec)
255
{
256
	u64 size = lseek(rec->output, 0, SEEK_CUR);
257

258 259 260
	if (size == 0)
		return 0;

261 262 263
	rec->session->fd = rec->output;
	return __perf_session__process_events(rec->session, rec->post_processing_offset,
					      size - rec->post_processing_offset,
264 265 266
					      size, &build_id__mark_dso_hit_ops);
}

267
static void perf_record__exit(int status, void *arg)
268
{
269 270
	struct perf_record *rec = arg;

271 272 273
	if (status != 0)
		return;

274 275 276 277 278 279 280 281 282
	if (!rec->opts.pipe_output) {
		rec->session->header.data_size += rec->bytes_written;

		if (!rec->no_buildid)
			process_buildids(rec);
		perf_session__write_header(rec->session, rec->evlist,
					   rec->output, true);
		perf_session__delete(rec->session);
		perf_evlist__delete(rec->evlist);
283
		symbol__exit();
284
	}
285 286
}

287
static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
288 289
{
	int err;
290
	struct perf_tool *tool = data;
291 292 293 294 295 296 297 298
	/*
	 *As for guest kernel when processing subcommand record&report,
	 *we arrange module mmap prior to guest kernel mmap and trigger
	 *a preload dso because default guest module symbols are loaded
	 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
	 *method is used to avoid symbol missing when the first addr is
	 *in module instead of in guest kernel.
	 */
299
	err = perf_event__synthesize_modules(tool, process_synthesized_event,
300
					     machine);
301 302
	if (err < 0)
		pr_err("Couldn't record guest kernel [%d]'s reference"
303
		       " relocation symbol.\n", machine->pid);
304 305 306 307 308

	/*
	 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
	 * have no _text sometimes.
	 */
309
	err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
310
						 machine, "_text");
311
	if (err < 0)
312
		err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
313
							 machine, "_stext");
314 315
	if (err < 0)
		pr_err("Couldn't record guest kernel [%d]'s reference"
316
		       " relocation symbol.\n", machine->pid);
317 318
}

319 320 321 322 323
static struct perf_event_header finished_round_event = {
	.size = sizeof(struct perf_event_header),
	.type = PERF_RECORD_FINISHED_ROUND,
};

324
static int perf_record__mmap_read_all(struct perf_record *rec)
325
{
326
	int i;
327
	int rc = 0;
328

329
	for (i = 0; i < rec->evlist->nr_mmaps; i++) {
330 331 332 333 334 335
		if (rec->evlist->mmap[i].base) {
			if (perf_record__mmap_read(rec, &rec->evlist->mmap[i]) != 0) {
				rc = -1;
				goto out;
			}
		}
336 337
	}

338
	if (perf_header__has_feat(&rec->session->header, HEADER_TRACING_DATA))
339 340 341 342 343
		rc = write_output(rec, &finished_round_event,
				  sizeof(finished_round_event));

out:
	return rc;
344 345
}

346
static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
347
{
I
Ingo Molnar 已提交
348 349
	struct stat st;
	int flags;
350
	int err, output, feat;
351
	unsigned long waking = 0;
352
	const bool forks = argc > 0;
353
	struct machine *machine;
354
	struct perf_tool *tool = &rec->tool;
355 356 357 358
	struct perf_record_opts *opts = &rec->opts;
	struct perf_evlist *evsel_list = rec->evlist;
	const char *output_name = rec->output_name;
	struct perf_session *session;
359
	bool disabled = false;
360

361
	rec->progname = argv[0];
362

363
	rec->page_size = sysconf(_SC_PAGE_SIZE);
364

365
	on_exit(perf_record__sig_exit, rec);
366 367
	signal(SIGCHLD, sig_handler);
	signal(SIGINT, sig_handler);
368
	signal(SIGUSR1, sig_handler);
369
	signal(SIGTERM, sig_handler);
370

371 372
	if (!output_name) {
		if (!fstat(STDOUT_FILENO, &st) && S_ISFIFO(st.st_mode))
373
			opts->pipe_output = true;
374
		else
375
			rec->output_name = output_name = "perf.data";
376 377 378
	}
	if (output_name) {
		if (!strcmp(output_name, "-"))
379
			opts->pipe_output = true;
380
		else if (!stat(output_name, &st) && st.st_size) {
381 382 383 384 385
			char oldname[PATH_MAX];
			snprintf(oldname, sizeof(oldname), "%s.old",
				 output_name);
			unlink(oldname);
			rename(output_name, oldname);
386
		}
387 388
	}

389
	flags = O_CREAT|O_RDWR|O_TRUNC;
I
Ingo Molnar 已提交
390

391
	if (opts->pipe_output)
392 393 394
		output = STDOUT_FILENO;
	else
		output = open(output_name, flags, S_IRUSR | S_IWUSR);
395 396
	if (output < 0) {
		perror("failed to create output file");
397
		return -1;
398 399
	}

400 401
	rec->output = output;

402
	session = perf_session__new(output_name, O_WRONLY,
403
				    true, false, NULL);
404
	if (session == NULL) {
405 406 407 408
		pr_err("Not enough memory for reading perf file header\n");
		return -1;
	}

409 410
	rec->session = session;

411 412 413 414 415 416 417
	for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
		perf_header__set_feat(&session->header, feat);

	if (rec->no_buildid)
		perf_header__clear_feat(&session->header, HEADER_BUILD_ID);

	if (!have_tracepoints(&evsel_list->entries))
418
		perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
419

420 421 422
	if (!rec->opts.branch_stack)
		perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);

423
	if (forks) {
424
		err = perf_evlist__prepare_workload(evsel_list, &opts->target,
425 426
						    argv, opts->pipe_output,
						    true);
427 428 429
		if (err < 0) {
			pr_err("Couldn't run the workload!\n");
			goto out_delete_session;
430 431 432
		}
	}

433 434 435 436
	if (perf_record__open(rec) != 0) {
		err = -1;
		goto out_delete_session;
	}
437

438 439 440
	if (!evsel_list->nr_groups)
		perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);

441
	/*
442
	 * perf_session__delete(session) will be called at perf_record__exit()
443
	 */
444
	on_exit(perf_record__exit, rec);
445

446
	if (opts->pipe_output) {
447 448
		err = perf_header__write_pipe(output);
		if (err < 0)
449
			goto out_delete_session;
450
	} else {
451 452
		err = perf_session__write_header(session, evsel_list,
						 output, false);
453
		if (err < 0)
454
			goto out_delete_session;
455 456
	}

457
	if (!rec->no_buildid
458
	    && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
459
		pr_err("Couldn't generate buildids. "
460
		       "Use --no-buildid to profile anyway.\n");
461 462
		err = -1;
		goto out_delete_session;
463 464
	}

465
	rec->post_processing_offset = lseek(output, 0, SEEK_CUR);
466

467
	machine = &session->machines.host;
468

469
	if (opts->pipe_output) {
470
		err = perf_event__synthesize_attrs(tool, session,
471
						   process_synthesized_event);
472 473
		if (err < 0) {
			pr_err("Couldn't synthesize attrs.\n");
474
			goto out_delete_session;
475
		}
476

477
		if (have_tracepoints(&evsel_list->entries)) {
478 479 480 481 482 483 484 485
			/*
			 * FIXME err <= 0 here actually means that
			 * there were no tracepoints so its not really
			 * an error, just that we don't need to
			 * synthesize anything.  We really have to
			 * return this more properly and also
			 * propagate errors that now are calling die()
			 */
486
			err = perf_event__synthesize_tracing_data(tool, output, evsel_list,
487
								  process_synthesized_event);
488 489
			if (err <= 0) {
				pr_err("Couldn't record tracing data.\n");
490
				goto out_delete_session;
491
			}
492
			advance_output(rec, err);
493
		}
494 495
	}

496
	err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
497
						 machine, "_text");
498
	if (err < 0)
499
		err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
500
							 machine, "_stext");
501 502 503 504
	if (err < 0)
		pr_err("Couldn't record kernel reference relocation symbol\n"
		       "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
		       "Check /proc/kallsyms permission or run as root.\n");
505

506
	err = perf_event__synthesize_modules(tool, process_synthesized_event,
507
					     machine);
508 509 510 511 512
	if (err < 0)
		pr_err("Couldn't record kernel module information.\n"
		       "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
		       "Check /proc/modules permission or run as root.\n");

513
	if (perf_guest) {
514 515
		machines__process_guests(&session->machines,
					 perf_event__synthesize_guest_os, tool);
516
	}
517

J
Jiri Olsa 已提交
518
	if (perf_target__has_task(&opts->target))
519
		err = perf_event__synthesize_thread_map(tool, evsel_list->threads,
520
						  process_synthesized_event,
521
						  machine);
J
Jiri Olsa 已提交
522
	else if (perf_target__has_cpu(&opts->target))
523
		err = perf_event__synthesize_threads(tool, process_synthesized_event,
524
					       machine);
J
Jiri Olsa 已提交
525 526
	else /* command specified */
		err = 0;
527

528 529 530
	if (err != 0)
		goto out_delete_session;

531
	if (rec->realtime_prio) {
532 533
		struct sched_param param;

534
		param.sched_priority = rec->realtime_prio;
535
		if (sched_setscheduler(0, SCHED_FIFO, &param)) {
536
			pr_err("Could not set realtime priority.\n");
537 538
			err = -1;
			goto out_delete_session;
539 540 541
		}
	}

542 543 544 545 546 547 548
	/*
	 * When perf is starting the traced process, all the events
	 * (apart from group members) have enable_on_exec=1 set,
	 * so don't spoil it by prematurely enabling them.
	 */
	if (!perf_target__none(&opts->target))
		perf_evlist__enable(evsel_list);
549

550 551 552
	/*
	 * Let the child rip
	 */
553
	if (forks)
554
		perf_evlist__start_workload(evsel_list);
555

556
	for (;;) {
557
		int hits = rec->samples;
558

559 560 561 562
		if (perf_record__mmap_read_all(rec) < 0) {
			err = -1;
			goto out_delete_session;
		}
563

564
		if (hits == rec->samples) {
565 566
			if (done)
				break;
567
			err = poll(evsel_list->pollfd, evsel_list->nr_fds, -1);
568 569 570
			waking++;
		}

571 572 573 574 575
		/*
		 * When perf is starting the traced process, at the end events
		 * die with the process and we wait for that. Thus no need to
		 * disable events in this case.
		 */
576
		if (done && !disabled && !perf_target__none(&opts->target)) {
577
			perf_evlist__disable(evsel_list);
578 579
			disabled = true;
		}
580 581
	}

582
	if (quiet || signr == SIGUSR1)
583 584
		return 0;

585 586
	fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);

587 588 589 590
	/*
	 * Approximate RIP event size: 24 bytes.
	 */
	fprintf(stderr,
591
		"[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n",
592
		(double)rec->bytes_written / 1024.0 / 1024.0,
593
		output_name,
594
		rec->bytes_written / 24);
595

596
	return 0;
597 598 599 600

out_delete_session:
	perf_session__delete(session);
	return err;
601
}
602

603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624
#define BRANCH_OPT(n, m) \
	{ .name = n, .mode = (m) }

#define BRANCH_END { .name = NULL }

struct branch_mode {
	const char *name;
	int mode;
};

static const struct branch_mode branch_modes[] = {
	BRANCH_OPT("u", PERF_SAMPLE_BRANCH_USER),
	BRANCH_OPT("k", PERF_SAMPLE_BRANCH_KERNEL),
	BRANCH_OPT("hv", PERF_SAMPLE_BRANCH_HV),
	BRANCH_OPT("any", PERF_SAMPLE_BRANCH_ANY),
	BRANCH_OPT("any_call", PERF_SAMPLE_BRANCH_ANY_CALL),
	BRANCH_OPT("any_ret", PERF_SAMPLE_BRANCH_ANY_RETURN),
	BRANCH_OPT("ind_call", PERF_SAMPLE_BRANCH_IND_CALL),
	BRANCH_END
};

static int
625
parse_branch_stack(const struct option *opt, const char *str, int unset)
626 627 628 629 630 631 632 633
{
#define ONLY_PLM \
	(PERF_SAMPLE_BRANCH_USER	|\
	 PERF_SAMPLE_BRANCH_KERNEL	|\
	 PERF_SAMPLE_BRANCH_HV)

	uint64_t *mode = (uint64_t *)opt->value;
	const struct branch_mode *br;
634
	char *s, *os = NULL, *p;
635 636
	int ret = -1;

637 638
	if (unset)
		return 0;
639

640 641 642 643
	/*
	 * cannot set it twice, -b + --branch-filter for instance
	 */
	if (*mode)
644 645
		return -1;

646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666
	/* str may be NULL in case no arg is passed to -b */
	if (str) {
		/* because str is read-only */
		s = os = strdup(str);
		if (!s)
			return -1;

		for (;;) {
			p = strchr(s, ',');
			if (p)
				*p = '\0';

			for (br = branch_modes; br->name; br++) {
				if (!strcasecmp(s, br->name))
					break;
			}
			if (!br->name) {
				ui__warning("unknown branch filter %s,"
					    " check man page\n", s);
				goto error;
			}
667

668
			*mode |= br->mode;
669

670 671
			if (!p)
				break;
672

673 674
			s = p + 1;
		}
675 676 677
	}
	ret = 0;

678
	/* default to any branch */
679
	if ((*mode & ~ONLY_PLM) == 0) {
680
		*mode = PERF_SAMPLE_BRANCH_ANY;
681 682 683 684 685 686
	}
error:
	free(os);
	return ret;
}

687
#ifdef LIBUNWIND_SUPPORT
688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712
static int get_stack_size(char *str, unsigned long *_size)
{
	char *endptr;
	unsigned long size;
	unsigned long max_size = round_down(USHRT_MAX, sizeof(u64));

	size = strtoul(str, &endptr, 0);

	do {
		if (*endptr)
			break;

		size = round_up(size, sizeof(u64));
		if (!size || size > max_size)
			break;

		*_size = size;
		return 0;

	} while (0);

	pr_err("callchain: Incorrect stack dump size (max %ld): %s\n",
	       max_size, str);
	return -1;
}
713
#endif /* LIBUNWIND_SUPPORT */
714

J
Jiri Olsa 已提交
715
int record_parse_callchain(const char *arg, struct perf_record_opts *opts)
716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734
{
	char *tok, *name, *saveptr = NULL;
	char *buf;
	int ret = -1;

	/* We need buffer that we know we can write to. */
	buf = malloc(strlen(arg) + 1);
	if (!buf)
		return -ENOMEM;

	strcpy(buf, arg);

	tok = strtok_r((char *)buf, ",", &saveptr);
	name = tok ? : (char *)buf;

	do {
		/* Framepointer style */
		if (!strncmp(name, "fp", sizeof("fp"))) {
			if (!strtok_r(NULL, ",", &saveptr)) {
735
				opts->call_graph = CALLCHAIN_FP;
736 737 738 739 740 741
				ret = 0;
			} else
				pr_err("callchain: No more arguments "
				       "needed for -g fp\n");
			break;

742
#ifdef LIBUNWIND_SUPPORT
743 744
		/* Dwarf style */
		} else if (!strncmp(name, "dwarf", sizeof("dwarf"))) {
745 746
			const unsigned long default_stack_dump_size = 8192;

747
			ret = 0;
748 749
			opts->call_graph = CALLCHAIN_DWARF;
			opts->stack_dump_size = default_stack_dump_size;
750 751 752 753 754 755

			tok = strtok_r(NULL, ",", &saveptr);
			if (tok) {
				unsigned long size = 0;

				ret = get_stack_size(tok, &size);
756
				opts->stack_dump_size = size;
757
			}
758
#endif /* LIBUNWIND_SUPPORT */
759
		} else {
J
Jiri Olsa 已提交
760
			pr_err("callchain: Unknown --call-graph option "
761 762 763 764 765 766 767
			       "value: %s\n", arg);
			break;
		}

	} while (0);

	free(buf);
J
Jiri Olsa 已提交
768 769 770 771 772 773
	return ret;
}

static void callchain_debug(struct perf_record_opts *opts)
{
	pr_debug("callchain: type %d\n", opts->call_graph);
774

J
Jiri Olsa 已提交
775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794
	if (opts->call_graph == CALLCHAIN_DWARF)
		pr_debug("callchain: stack dump size %d\n",
			 opts->stack_dump_size);
}

int record_parse_callchain_opt(const struct option *opt,
			       const char *arg,
			       int unset)
{
	struct perf_record_opts *opts = opt->value;
	int ret;

	/* --no-call-graph */
	if (unset) {
		opts->call_graph = CALLCHAIN_NONE;
		pr_debug("callchain: disabled\n");
		return 0;
	}

	ret = record_parse_callchain(arg, opts);
795
	if (!ret)
J
Jiri Olsa 已提交
796
		callchain_debug(opts);
797 798 799 800

	return ret;
}

J
Jiri Olsa 已提交
801 802 803 804 805 806 807 808 809 810 811 812 813
int record_callchain_opt(const struct option *opt,
			 const char *arg __maybe_unused,
			 int unset __maybe_unused)
{
	struct perf_record_opts *opts = opt->value;

	if (opts->call_graph == CALLCHAIN_NONE)
		opts->call_graph = CALLCHAIN_FP;

	callchain_debug(opts);
	return 0;
}

814
static const char * const record_usage[] = {
815 816
	"perf record [<options>] [<command>]",
	"perf record [<options>] -- <command> [<options>]",
817 818 819
	NULL
};

820 821 822 823 824 825 826 827 828 829 830 831 832 833 834
/*
 * XXX Ideally would be local to cmd_record() and passed to a perf_record__new
 * because we need to have access to it in perf_record__exit, that is called
 * after cmd_record() exits, but since record_options need to be accessible to
 * builtin-script, leave it here.
 *
 * At least we don't ouch it in all the other functions here directly.
 *
 * Just say no to tons of global variables, sigh.
 */
static struct perf_record record = {
	.opts = {
		.mmap_pages	     = UINT_MAX,
		.user_freq	     = UINT_MAX,
		.user_interval	     = ULLONG_MAX,
835
		.freq		     = 4000,
N
Namhyung Kim 已提交
836 837 838
		.target		     = {
			.uses_mmap   = true,
		},
839 840
	},
};
841

J
Jiri Olsa 已提交
842
#define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace) recording: "
843 844

#ifdef LIBUNWIND_SUPPORT
J
Jiri Olsa 已提交
845
const char record_callchain_help[] = CALLCHAIN_HELP "fp dwarf";
846
#else
J
Jiri Olsa 已提交
847
const char record_callchain_help[] = CALLCHAIN_HELP "fp";
848 849
#endif

850 851 852 853 854 855 856
/*
 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
 * with it and switch to use the library functions in perf_evlist that came
 * from builtin-record.c, i.e. use perf_record_opts,
 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
 * using pipes, etc.
 */
857
const struct option record_options[] = {
858
	OPT_CALLBACK('e', "event", &record.evlist, "event",
859
		     "event selector. use 'perf list' to list available events",
860
		     parse_events_option),
861
	OPT_CALLBACK(0, "filter", &record.evlist, "filter",
L
Li Zefan 已提交
862
		     "event filter", parse_filter),
863
	OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
864
		    "record events on existing process id"),
865
	OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
866
		    "record events on existing thread id"),
867
	OPT_INTEGER('r', "realtime", &record.realtime_prio,
868
		    "collect data with this RT SCHED_FIFO priority"),
869
	OPT_BOOLEAN('D', "no-delay", &record.opts.no_delay,
870
		    "collect data without buffering"),
871
	OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
872
		    "collect raw sample records from all opened counters"),
873
	OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
874
			    "system-wide collection from all CPUs"),
875
	OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
876
		    "list of cpus to monitor"),
877 878
	OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
	OPT_STRING('o', "output", &record.output_name, "file",
I
Ingo Molnar 已提交
879
		    "output file name"),
880
	OPT_BOOLEAN('i', "no-inherit", &record.opts.no_inherit,
881
		    "child tasks do not inherit counters"),
882 883
	OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
	OPT_UINTEGER('m', "mmap-pages", &record.opts.mmap_pages,
884
		     "number of mmap data pages"),
885
	OPT_BOOLEAN(0, "group", &record.opts.group,
886
		    "put the counters into a counter group"),
J
Jiri Olsa 已提交
887 888 889 890 891 892
	OPT_CALLBACK_NOOPT('g', NULL, &record.opts,
			   NULL, "enables call-graph recording" ,
			   &record_callchain_opt),
	OPT_CALLBACK(0, "call-graph", &record.opts,
		     "mode[,dump_size]", record_callchain_help,
		     &record_parse_callchain_opt),
893
	OPT_INCR('v', "verbose", &verbose,
894
		    "be more verbose (show counter open errors, etc)"),
895
	OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
896
	OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
897
		    "per thread counts"),
898
	OPT_BOOLEAN('d', "data", &record.opts.sample_address,
899
		    "Sample addresses"),
900
	OPT_BOOLEAN('T', "timestamp", &record.opts.sample_time, "Sample timestamps"),
901
	OPT_BOOLEAN('P', "period", &record.opts.period, "Sample period"),
902
	OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
903
		    "don't sample"),
904
	OPT_BOOLEAN('N', "no-buildid-cache", &record.no_buildid_cache,
905
		    "do not update the buildid cache"),
906
	OPT_BOOLEAN('B', "no-buildid", &record.no_buildid,
907
		    "do not collect buildids in perf.data"),
908
	OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
S
Stephane Eranian 已提交
909 910
		     "monitor event in cgroup name only",
		     parse_cgroups),
911 912
	OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
		   "user to profile"),
913 914 915 916 917 918 919

	OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
		     "branch any", "sample any taken branches",
		     parse_branch_stack),

	OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
		     "branch filter mask", "branch stack filter modes",
920
		     parse_branch_stack),
921 922
	OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
		    "sample by weight (on special events only)"),
923 924 925
	OPT_END()
};

926
int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
927
{
928
	int err = -ENOMEM;
929 930
	struct perf_evlist *evsel_list;
	struct perf_record *rec = &record;
931
	char errbuf[BUFSIZ];
932

933
	evsel_list = perf_evlist__new();
934 935 936
	if (evsel_list == NULL)
		return -ENOMEM;

937 938
	rec->evlist = evsel_list;

939
	argc = parse_options(argc, argv, record_options, record_usage,
940
			    PARSE_OPT_STOP_AT_NON_OPTION);
941
	if (!argc && perf_target__none(&rec->opts.target))
942
		usage_with_options(record_usage, record_options);
943

944
	if (nr_cgroups && !rec->opts.target.system_wide) {
945 946
		ui__error("cgroup monitoring only available in"
			  " system-wide mode\n");
S
Stephane Eranian 已提交
947 948 949
		usage_with_options(record_usage, record_options);
	}

950
	symbol__init();
951

952
	if (symbol_conf.kptr_restrict)
953 954 955 956 957 958 959 960
		pr_warning(
"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
"check /proc/sys/kernel/kptr_restrict.\n\n"
"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
"file is not found in the buildid cache or in the vmlinux path.\n\n"
"Samples in kernel modules won't be resolved at all.\n\n"
"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
"even with a suitable vmlinux or kallsyms file.\n\n");
961

962
	if (rec->no_buildid_cache || rec->no_buildid)
963
		disable_buildid_cache();
964

965 966
	if (evsel_list->nr_entries == 0 &&
	    perf_evlist__add_default(evsel_list) < 0) {
967 968
		pr_err("Not enough memory for event selector list\n");
		goto out_symbol_exit;
969
	}
970

971 972 973 974 975 976 977 978 979
	err = perf_target__validate(&rec->opts.target);
	if (err) {
		perf_target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
		ui__warning("%s", errbuf);
	}

	err = perf_target__parse_uid(&rec->opts.target);
	if (err) {
		int saved_errno = errno;
980

981
		perf_target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
982
		ui__error("%s", errbuf);
983 984

		err = -saved_errno;
985
		goto out_symbol_exit;
986
	}
987

988
	err = -ENOMEM;
989
	if (perf_evlist__create_maps(evsel_list, &rec->opts.target) < 0)
990
		usage_with_options(record_usage, record_options);
991

992 993 994 995
	if (rec->opts.user_interval != ULLONG_MAX)
		rec->opts.default_interval = rec->opts.user_interval;
	if (rec->opts.user_freq != UINT_MAX)
		rec->opts.freq = rec->opts.user_freq;
996

997 998 999
	/*
	 * User specified count overrides default frequency.
	 */
1000 1001 1002 1003
	if (rec->opts.default_interval)
		rec->opts.freq = 0;
	else if (rec->opts.freq) {
		rec->opts.default_interval = rec->opts.freq;
1004
	} else {
1005
		ui__error("frequency and count are zero, aborting\n");
1006
		err = -EINVAL;
1007
		goto out_free_fd;
1008 1009
	}

1010
	err = __cmd_record(&record, argc, argv);
1011 1012 1013

	perf_evlist__munmap(evsel_list);
	perf_evlist__close(evsel_list);
1014
out_free_fd:
1015
	perf_evlist__delete_maps(evsel_list);
1016 1017
out_symbol_exit:
	symbol__exit();
1018
	return err;
1019
}