builtin-record.c 25.1 KB
Newer Older
I
Ingo Molnar 已提交
1
/*
2 3 4 5 6
 * builtin-record.c
 *
 * Builtin record command: Record the profile of a workload
 * (or a CPU, or a PID) into the perf.data output file - for
 * later analysis via perf report.
I
Ingo Molnar 已提交
7
 */
8
#include "builtin.h"
9 10 11

#include "perf.h"

12
#include "util/build-id.h"
13
#include "util/util.h"
14
#include "util/parse-options.h"
15
#include "util/parse-events.h"
16

17
#include "util/header.h"
18
#include "util/event.h"
19
#include "util/evlist.h"
20
#include "util/evsel.h"
21
#include "util/debug.h"
22
#include "util/session.h"
23
#include "util/tool.h"
24
#include "util/symbol.h"
25
#include "util/cpumap.h"
26
#include "util/thread_map.h"
27
#include "util/data.h"
28

29
#include <unistd.h>
30
#include <sched.h>
31
#include <sys/mman.h>
32

33
#ifndef HAVE_ON_EXIT_SUPPORT
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
#ifndef ATEXIT_MAX
#define ATEXIT_MAX 32
#endif
static int __on_exit_count = 0;
typedef void (*on_exit_func_t) (int, void *);
static on_exit_func_t __on_exit_funcs[ATEXIT_MAX];
static void *__on_exit_args[ATEXIT_MAX];
static int __exitcode = 0;
static void __handle_on_exit_funcs(void);
static int on_exit(on_exit_func_t function, void *arg);
#define exit(x) (exit)(__exitcode = (x))

static int on_exit(on_exit_func_t function, void *arg)
{
	if (__on_exit_count == ATEXIT_MAX)
		return -ENOMEM;
	else if (__on_exit_count == 0)
		atexit(__handle_on_exit_funcs);
	__on_exit_funcs[__on_exit_count] = function;
	__on_exit_args[__on_exit_count++] = arg;
	return 0;
}

static void __handle_on_exit_funcs(void)
{
	int i;
	for (i = 0; i < __on_exit_count; i++)
		__on_exit_funcs[i] (__exitcode, __on_exit_args[i]);
}
#endif

65
struct perf_record {
66
	struct perf_tool	tool;
67 68
	struct perf_record_opts	opts;
	u64			bytes_written;
69
	struct perf_data_file	file;
70 71 72 73 74 75 76 77
	struct perf_evlist	*evlist;
	struct perf_session	*session;
	const char		*progname;
	int			realtime_prio;
	bool			no_buildid;
	bool			no_buildid_cache;
	long			samples;
	off_t			post_processing_offset;
78
};
79

80
static void advance_output(struct perf_record *rec, size_t size)
81
{
82
	rec->bytes_written += size;
83 84
}

85
static int write_output(struct perf_record *rec, void *buf, size_t size)
86
{
87 88
	struct perf_data_file *file = &rec->file;

89
	while (size) {
90
		int ret = write(file->fd, buf, size);
91

92
		if (ret < 0) {
93
			pr_err("failed to write perf data, error: %m\n");
94 95
			return -1;
		}
96 97 98 99

		size -= ret;
		buf += ret;

100
		rec->bytes_written += ret;
101
	}
102 103

	return 0;
104 105
}

106
static int process_synthesized_event(struct perf_tool *tool,
107
				     union perf_event *event,
108 109
				     struct perf_sample *sample __maybe_unused,
				     struct machine *machine __maybe_unused)
110
{
111
	struct perf_record *rec = container_of(tool, struct perf_record, tool);
112 113 114
	if (write_output(rec, event, event->header.size) < 0)
		return -1;

115 116 117
	return 0;
}

118
static int perf_record__mmap_read(struct perf_record *rec,
119
				   struct perf_mmap *md)
120
{
121
	unsigned int head = perf_mmap__read_head(md);
122
	unsigned int old = md->prev;
J
Jiri Olsa 已提交
123
	unsigned char *data = md->base + page_size;
124 125
	unsigned long size;
	void *buf;
126
	int rc = 0;
127

128
	if (old == head)
129
		return 0;
130

131
	rec->samples++;
132 133 134 135 136 137 138

	size = head - old;

	if ((old & md->mask) + size != (head & md->mask)) {
		buf = &data[old & md->mask];
		size = md->mask + 1 - (old & md->mask);
		old += size;
139

140 141 142 143
		if (write_output(rec, buf, size) < 0) {
			rc = -1;
			goto out;
		}
144 145 146 147 148
	}

	buf = &data[old & md->mask];
	size = head - old;
	old += size;
149

150 151 152 153
	if (write_output(rec, buf, size) < 0) {
		rc = -1;
		goto out;
	}
154 155

	md->prev = old;
156
	perf_mmap__write_tail(md, old);
157 158 159

out:
	return rc;
160 161 162
}

static volatile int done = 0;
163
static volatile int signr = -1;
164
static volatile int child_finished = 0;
165

166
static void sig_handler(int sig)
167
{
168 169 170
	if (sig == SIGCHLD)
		child_finished = 1;

171
	done = 1;
172 173 174
	signr = sig;
}

175
static void perf_record__sig_exit(int exit_status __maybe_unused, void *arg)
176
{
177
	struct perf_record *rec = arg;
178 179
	int status;

180
	if (rec->evlist->workload.pid > 0) {
181
		if (!child_finished)
182
			kill(rec->evlist->workload.pid, SIGTERM);
183 184 185

		wait(&status);
		if (WIFSIGNALED(status))
186
			psignal(WTERMSIG(status), rec->progname);
187
	}
188

189
	if (signr == -1 || signr == SIGUSR1)
190 191 192
		return;

	signal(signr, SIG_DFL);
193 194
}

195
static int perf_record__open(struct perf_record *rec)
196
{
197
	char msg[512];
198
	struct perf_evsel *pos;
199 200 201
	struct perf_evlist *evlist = rec->evlist;
	struct perf_session *session = rec->session;
	struct perf_record_opts *opts = &rec->opts;
202
	int rc = 0;
203

204
	perf_evlist__config(evlist, opts);
205

206 207
	list_for_each_entry(pos, &evlist->entries, node) {
try_again:
208
		if (perf_evsel__open(pos, evlist->cpus, evlist->threads) < 0) {
209
			if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
210
				if (verbose)
211
					ui__warning("%s\n", msg);
212 213
				goto try_again;
			}
214

215 216 217 218
			rc = -errno;
			perf_evsel__open_strerror(pos, &opts->target,
						  errno, msg, sizeof(msg));
			ui__error("%s\n", msg);
219
			goto out;
L
Li Zefan 已提交
220 221
		}
	}
222

223
	if (perf_evlist__apply_filters(evlist)) {
224 225
		error("failed to set filter with %d (%s)\n", errno,
			strerror(errno));
226 227
		rc = -1;
		goto out;
228 229
	}

230
	if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) {
231 232 233 234 235 236 237 238 239 240 241 242
		if (errno == EPERM) {
			pr_err("Permission error mapping pages.\n"
			       "Consider increasing "
			       "/proc/sys/kernel/perf_event_mlock_kb,\n"
			       "or try again with a smaller value of -m/--mmap_pages.\n"
			       "(current value: %d)\n", opts->mmap_pages);
			rc = -errno;
		} else {
			pr_err("failed to mmap with %d (%s)\n", errno, strerror(errno));
			rc = -errno;
		}
		goto out;
243
	}
244

245
	session->evlist = evlist;
246
	perf_session__set_id_hdr_size(session);
247 248
out:
	return rc;
249 250
}

251
static int process_buildids(struct perf_record *rec)
252
{
253 254
	struct perf_data_file *file  = &rec->file;
	struct perf_session *session = rec->session;
255

256
	u64 size = lseek(file->fd, 0, SEEK_CUR);
257 258 259
	if (size == 0)
		return 0;

260
	return __perf_session__process_events(session, rec->post_processing_offset,
261
					      size - rec->post_processing_offset,
262 263 264
					      size, &build_id__mark_dso_hit_ops);
}

265
static void perf_record__exit(int status, void *arg)
266
{
267
	struct perf_record *rec = arg;
268
	struct perf_data_file *file = &rec->file;
269

270 271 272
	if (status != 0)
		return;

273
	if (!file->is_pipe) {
274 275 276 277 278
		rec->session->header.data_size += rec->bytes_written;

		if (!rec->no_buildid)
			process_buildids(rec);
		perf_session__write_header(rec->session, rec->evlist,
279
					   file->fd, true);
280 281
		perf_session__delete(rec->session);
		perf_evlist__delete(rec->evlist);
282
		symbol__exit();
283
	}
284 285
}

286
static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
287 288
{
	int err;
289
	struct perf_tool *tool = data;
290 291 292 293 294 295 296 297
	/*
	 *As for guest kernel when processing subcommand record&report,
	 *we arrange module mmap prior to guest kernel mmap and trigger
	 *a preload dso because default guest module symbols are loaded
	 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
	 *method is used to avoid symbol missing when the first addr is
	 *in module instead of in guest kernel.
	 */
298
	err = perf_event__synthesize_modules(tool, process_synthesized_event,
299
					     machine);
300 301
	if (err < 0)
		pr_err("Couldn't record guest kernel [%d]'s reference"
302
		       " relocation symbol.\n", machine->pid);
303 304 305 306 307

	/*
	 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
	 * have no _text sometimes.
	 */
308
	err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
309
						 machine, "_text");
310
	if (err < 0)
311
		err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
312
							 machine, "_stext");
313 314
	if (err < 0)
		pr_err("Couldn't record guest kernel [%d]'s reference"
315
		       " relocation symbol.\n", machine->pid);
316 317
}

318 319 320 321 322
static struct perf_event_header finished_round_event = {
	.size = sizeof(struct perf_event_header),
	.type = PERF_RECORD_FINISHED_ROUND,
};

323
static int perf_record__mmap_read_all(struct perf_record *rec)
324
{
325
	int i;
326
	int rc = 0;
327

328
	for (i = 0; i < rec->evlist->nr_mmaps; i++) {
329 330 331 332 333 334
		if (rec->evlist->mmap[i].base) {
			if (perf_record__mmap_read(rec, &rec->evlist->mmap[i]) != 0) {
				rc = -1;
				goto out;
			}
		}
335 336
	}

337
	if (perf_header__has_feat(&rec->session->header, HEADER_TRACING_DATA))
338 339 340 341 342
		rc = write_output(rec, &finished_round_event,
				  sizeof(finished_round_event));

out:
	return rc;
343 344
}

345
static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
346
{
347
	int err, feat;
348
	unsigned long waking = 0;
349
	const bool forks = argc > 0;
350
	struct machine *machine;
351
	struct perf_tool *tool = &rec->tool;
352 353
	struct perf_record_opts *opts = &rec->opts;
	struct perf_evlist *evsel_list = rec->evlist;
354
	struct perf_data_file *file = &rec->file;
355
	struct perf_session *session;
356
	bool disabled = false;
357

358
	rec->progname = argv[0];
359

360
	on_exit(perf_record__sig_exit, rec);
361 362
	signal(SIGCHLD, sig_handler);
	signal(SIGINT, sig_handler);
363
	signal(SIGUSR1, sig_handler);
364
	signal(SIGTERM, sig_handler);
365

366
	session = perf_session__new(file, false, NULL);
367
	if (session == NULL) {
368 369 370 371
		pr_err("Not enough memory for reading perf file header\n");
		return -1;
	}

372 373
	rec->session = session;

374 375 376 377 378 379 380
	for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
		perf_header__set_feat(&session->header, feat);

	if (rec->no_buildid)
		perf_header__clear_feat(&session->header, HEADER_BUILD_ID);

	if (!have_tracepoints(&evsel_list->entries))
381
		perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
382

383 384 385
	if (!rec->opts.branch_stack)
		perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);

386
	if (forks) {
387
		err = perf_evlist__prepare_workload(evsel_list, &opts->target,
388
						    argv, file->is_pipe,
389
						    true);
390 391 392
		if (err < 0) {
			pr_err("Couldn't run the workload!\n");
			goto out_delete_session;
393 394 395
		}
	}

396 397 398 399
	if (perf_record__open(rec) != 0) {
		err = -1;
		goto out_delete_session;
	}
400

401 402 403
	if (!evsel_list->nr_groups)
		perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);

404
	/*
405
	 * perf_session__delete(session) will be called at perf_record__exit()
406
	 */
407
	on_exit(perf_record__exit, rec);
408

409 410
	if (file->is_pipe) {
		err = perf_header__write_pipe(file->fd);
411
		if (err < 0)
412
			goto out_delete_session;
413
	} else {
414
		err = perf_session__write_header(session, evsel_list,
415
						 file->fd, false);
416
		if (err < 0)
417
			goto out_delete_session;
418 419
	}

420
	if (!rec->no_buildid
421
	    && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
422
		pr_err("Couldn't generate buildids. "
423
		       "Use --no-buildid to profile anyway.\n");
424 425
		err = -1;
		goto out_delete_session;
426 427
	}

428
	rec->post_processing_offset = lseek(file->fd, 0, SEEK_CUR);
429

430
	machine = &session->machines.host;
431

432
	if (file->is_pipe) {
433
		err = perf_event__synthesize_attrs(tool, session,
434
						   process_synthesized_event);
435 436
		if (err < 0) {
			pr_err("Couldn't synthesize attrs.\n");
437
			goto out_delete_session;
438
		}
439

440
		if (have_tracepoints(&evsel_list->entries)) {
441 442 443 444 445 446 447 448
			/*
			 * FIXME err <= 0 here actually means that
			 * there were no tracepoints so its not really
			 * an error, just that we don't need to
			 * synthesize anything.  We really have to
			 * return this more properly and also
			 * propagate errors that now are calling die()
			 */
449
			err = perf_event__synthesize_tracing_data(tool, file->fd, evsel_list,
450
								  process_synthesized_event);
451 452
			if (err <= 0) {
				pr_err("Couldn't record tracing data.\n");
453
				goto out_delete_session;
454
			}
455
			advance_output(rec, err);
456
		}
457 458
	}

459
	err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
460
						 machine, "_text");
461
	if (err < 0)
462
		err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
463
							 machine, "_stext");
464 465 466 467
	if (err < 0)
		pr_err("Couldn't record kernel reference relocation symbol\n"
		       "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
		       "Check /proc/kallsyms permission or run as root.\n");
468

469
	err = perf_event__synthesize_modules(tool, process_synthesized_event,
470
					     machine);
471 472 473 474 475
	if (err < 0)
		pr_err("Couldn't record kernel module information.\n"
		       "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
		       "Check /proc/modules permission or run as root.\n");

476
	if (perf_guest) {
477 478
		machines__process_guests(&session->machines,
					 perf_event__synthesize_guest_os, tool);
479
	}
480

J
Jiri Olsa 已提交
481
	if (perf_target__has_task(&opts->target))
482
		err = perf_event__synthesize_thread_map(tool, evsel_list->threads,
483
						  process_synthesized_event,
484
						  machine);
J
Jiri Olsa 已提交
485
	else if (perf_target__has_cpu(&opts->target))
486
		err = perf_event__synthesize_threads(tool, process_synthesized_event,
487
					       machine);
J
Jiri Olsa 已提交
488 489
	else /* command specified */
		err = 0;
490

491 492 493
	if (err != 0)
		goto out_delete_session;

494
	if (rec->realtime_prio) {
495 496
		struct sched_param param;

497
		param.sched_priority = rec->realtime_prio;
498
		if (sched_setscheduler(0, SCHED_FIFO, &param)) {
499
			pr_err("Could not set realtime priority.\n");
500 501
			err = -1;
			goto out_delete_session;
502 503 504
		}
	}

505 506 507 508 509 510 511
	/*
	 * When perf is starting the traced process, all the events
	 * (apart from group members) have enable_on_exec=1 set,
	 * so don't spoil it by prematurely enabling them.
	 */
	if (!perf_target__none(&opts->target))
		perf_evlist__enable(evsel_list);
512

513 514 515
	/*
	 * Let the child rip
	 */
516
	if (forks)
517
		perf_evlist__start_workload(evsel_list);
518

519
	for (;;) {
520
		int hits = rec->samples;
521

522 523 524 525
		if (perf_record__mmap_read_all(rec) < 0) {
			err = -1;
			goto out_delete_session;
		}
526

527
		if (hits == rec->samples) {
528 529
			if (done)
				break;
530
			err = poll(evsel_list->pollfd, evsel_list->nr_fds, -1);
531 532 533
			waking++;
		}

534 535 536 537 538
		/*
		 * When perf is starting the traced process, at the end events
		 * die with the process and we wait for that. Thus no need to
		 * disable events in this case.
		 */
539
		if (done && !disabled && !perf_target__none(&opts->target)) {
540
			perf_evlist__disable(evsel_list);
541 542
			disabled = true;
		}
543 544
	}

545
	if (quiet || signr == SIGUSR1)
546 547
		return 0;

548 549
	fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);

550 551 552 553
	/*
	 * Approximate RIP event size: 24 bytes.
	 */
	fprintf(stderr,
554
		"[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n",
555
		(double)rec->bytes_written / 1024.0 / 1024.0,
556
		file->path,
557
		rec->bytes_written / 24);
558

559
	return 0;
560 561 562 563

out_delete_session:
	perf_session__delete(session);
	return err;
564
}
565

566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583
#define BRANCH_OPT(n, m) \
	{ .name = n, .mode = (m) }

#define BRANCH_END { .name = NULL }

struct branch_mode {
	const char *name;
	int mode;
};

static const struct branch_mode branch_modes[] = {
	BRANCH_OPT("u", PERF_SAMPLE_BRANCH_USER),
	BRANCH_OPT("k", PERF_SAMPLE_BRANCH_KERNEL),
	BRANCH_OPT("hv", PERF_SAMPLE_BRANCH_HV),
	BRANCH_OPT("any", PERF_SAMPLE_BRANCH_ANY),
	BRANCH_OPT("any_call", PERF_SAMPLE_BRANCH_ANY_CALL),
	BRANCH_OPT("any_ret", PERF_SAMPLE_BRANCH_ANY_RETURN),
	BRANCH_OPT("ind_call", PERF_SAMPLE_BRANCH_IND_CALL),
584 585 586
	BRANCH_OPT("abort_tx", PERF_SAMPLE_BRANCH_ABORT_TX),
	BRANCH_OPT("in_tx", PERF_SAMPLE_BRANCH_IN_TX),
	BRANCH_OPT("no_tx", PERF_SAMPLE_BRANCH_NO_TX),
587 588 589 590
	BRANCH_END
};

static int
591
parse_branch_stack(const struct option *opt, const char *str, int unset)
592 593 594 595 596 597 598 599
{
#define ONLY_PLM \
	(PERF_SAMPLE_BRANCH_USER	|\
	 PERF_SAMPLE_BRANCH_KERNEL	|\
	 PERF_SAMPLE_BRANCH_HV)

	uint64_t *mode = (uint64_t *)opt->value;
	const struct branch_mode *br;
600
	char *s, *os = NULL, *p;
601 602
	int ret = -1;

603 604
	if (unset)
		return 0;
605

606 607 608 609
	/*
	 * cannot set it twice, -b + --branch-filter for instance
	 */
	if (*mode)
610 611
		return -1;

612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632
	/* str may be NULL in case no arg is passed to -b */
	if (str) {
		/* because str is read-only */
		s = os = strdup(str);
		if (!s)
			return -1;

		for (;;) {
			p = strchr(s, ',');
			if (p)
				*p = '\0';

			for (br = branch_modes; br->name; br++) {
				if (!strcasecmp(s, br->name))
					break;
			}
			if (!br->name) {
				ui__warning("unknown branch filter %s,"
					    " check man page\n", s);
				goto error;
			}
633

634
			*mode |= br->mode;
635

636 637
			if (!p)
				break;
638

639 640
			s = p + 1;
		}
641 642 643
	}
	ret = 0;

644
	/* default to any branch */
645
	if ((*mode & ~ONLY_PLM) == 0) {
646
		*mode = PERF_SAMPLE_BRANCH_ANY;
647 648 649 650 651 652
	}
error:
	free(os);
	return ret;
}

653
#ifdef HAVE_LIBUNWIND_SUPPORT
654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678
static int get_stack_size(char *str, unsigned long *_size)
{
	char *endptr;
	unsigned long size;
	unsigned long max_size = round_down(USHRT_MAX, sizeof(u64));

	size = strtoul(str, &endptr, 0);

	do {
		if (*endptr)
			break;

		size = round_up(size, sizeof(u64));
		if (!size || size > max_size)
			break;

		*_size = size;
		return 0;

	} while (0);

	pr_err("callchain: Incorrect stack dump size (max %ld): %s\n",
	       max_size, str);
	return -1;
}
679
#endif /* HAVE_LIBUNWIND_SUPPORT */
680

J
Jiri Olsa 已提交
681
int record_parse_callchain(const char *arg, struct perf_record_opts *opts)
682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700
{
	char *tok, *name, *saveptr = NULL;
	char *buf;
	int ret = -1;

	/* We need buffer that we know we can write to. */
	buf = malloc(strlen(arg) + 1);
	if (!buf)
		return -ENOMEM;

	strcpy(buf, arg);

	tok = strtok_r((char *)buf, ",", &saveptr);
	name = tok ? : (char *)buf;

	do {
		/* Framepointer style */
		if (!strncmp(name, "fp", sizeof("fp"))) {
			if (!strtok_r(NULL, ",", &saveptr)) {
701
				opts->call_graph = CALLCHAIN_FP;
702 703 704 705 706 707
				ret = 0;
			} else
				pr_err("callchain: No more arguments "
				       "needed for -g fp\n");
			break;

708
#ifdef HAVE_LIBUNWIND_SUPPORT
709 710
		/* Dwarf style */
		} else if (!strncmp(name, "dwarf", sizeof("dwarf"))) {
711 712
			const unsigned long default_stack_dump_size = 8192;

713
			ret = 0;
714 715
			opts->call_graph = CALLCHAIN_DWARF;
			opts->stack_dump_size = default_stack_dump_size;
716 717 718 719 720 721

			tok = strtok_r(NULL, ",", &saveptr);
			if (tok) {
				unsigned long size = 0;

				ret = get_stack_size(tok, &size);
722
				opts->stack_dump_size = size;
723
			}
724
#endif /* HAVE_LIBUNWIND_SUPPORT */
725
		} else {
J
Jiri Olsa 已提交
726
			pr_err("callchain: Unknown --call-graph option "
727 728 729 730 731 732 733
			       "value: %s\n", arg);
			break;
		}

	} while (0);

	free(buf);
J
Jiri Olsa 已提交
734 735 736 737 738 739
	return ret;
}

static void callchain_debug(struct perf_record_opts *opts)
{
	pr_debug("callchain: type %d\n", opts->call_graph);
740

J
Jiri Olsa 已提交
741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760
	if (opts->call_graph == CALLCHAIN_DWARF)
		pr_debug("callchain: stack dump size %d\n",
			 opts->stack_dump_size);
}

int record_parse_callchain_opt(const struct option *opt,
			       const char *arg,
			       int unset)
{
	struct perf_record_opts *opts = opt->value;
	int ret;

	/* --no-call-graph */
	if (unset) {
		opts->call_graph = CALLCHAIN_NONE;
		pr_debug("callchain: disabled\n");
		return 0;
	}

	ret = record_parse_callchain(arg, opts);
761
	if (!ret)
J
Jiri Olsa 已提交
762
		callchain_debug(opts);
763 764 765 766

	return ret;
}

J
Jiri Olsa 已提交
767 768 769 770 771 772 773 774 775 776 777 778 779
int record_callchain_opt(const struct option *opt,
			 const char *arg __maybe_unused,
			 int unset __maybe_unused)
{
	struct perf_record_opts *opts = opt->value;

	if (opts->call_graph == CALLCHAIN_NONE)
		opts->call_graph = CALLCHAIN_FP;

	callchain_debug(opts);
	return 0;
}

780
static const char * const record_usage[] = {
781 782
	"perf record [<options>] [<command>]",
	"perf record [<options>] -- <command> [<options>]",
783 784 785
	NULL
};

786 787 788 789 790 791 792 793 794 795 796 797 798 799 800
/*
 * XXX Ideally would be local to cmd_record() and passed to a perf_record__new
 * because we need to have access to it in perf_record__exit, that is called
 * after cmd_record() exits, but since record_options need to be accessible to
 * builtin-script, leave it here.
 *
 * At least we don't ouch it in all the other functions here directly.
 *
 * Just say no to tons of global variables, sigh.
 */
static struct perf_record record = {
	.opts = {
		.mmap_pages	     = UINT_MAX,
		.user_freq	     = UINT_MAX,
		.user_interval	     = ULLONG_MAX,
801
		.freq		     = 4000,
N
Namhyung Kim 已提交
802 803 804
		.target		     = {
			.uses_mmap   = true,
		},
805 806
	},
};
807

J
Jiri Olsa 已提交
808
#define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace) recording: "
809

810
#ifdef HAVE_LIBUNWIND_SUPPORT
J
Jiri Olsa 已提交
811
const char record_callchain_help[] = CALLCHAIN_HELP "fp dwarf";
812
#else
J
Jiri Olsa 已提交
813
const char record_callchain_help[] = CALLCHAIN_HELP "fp";
814 815
#endif

816 817 818 819 820 821 822
/*
 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
 * with it and switch to use the library functions in perf_evlist that came
 * from builtin-record.c, i.e. use perf_record_opts,
 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
 * using pipes, etc.
 */
823
const struct option record_options[] = {
824
	OPT_CALLBACK('e', "event", &record.evlist, "event",
825
		     "event selector. use 'perf list' to list available events",
826
		     parse_events_option),
827
	OPT_CALLBACK(0, "filter", &record.evlist, "filter",
L
Li Zefan 已提交
828
		     "event filter", parse_filter),
829
	OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
830
		    "record events on existing process id"),
831
	OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
832
		    "record events on existing thread id"),
833
	OPT_INTEGER('r', "realtime", &record.realtime_prio,
834
		    "collect data with this RT SCHED_FIFO priority"),
835
	OPT_BOOLEAN('D', "no-delay", &record.opts.no_delay,
836
		    "collect data without buffering"),
837
	OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
838
		    "collect raw sample records from all opened counters"),
839
	OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
840
			    "system-wide collection from all CPUs"),
841
	OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
842
		    "list of cpus to monitor"),
843
	OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
844
	OPT_STRING('o', "output", &record.file.path, "file",
I
Ingo Molnar 已提交
845
		    "output file name"),
846
	OPT_BOOLEAN('i', "no-inherit", &record.opts.no_inherit,
847
		    "child tasks do not inherit counters"),
848
	OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
849 850 851
	OPT_CALLBACK('m', "mmap-pages", &record.opts.mmap_pages, "pages",
		     "number of mmap data pages",
		     perf_evlist__parse_mmap_pages),
852
	OPT_BOOLEAN(0, "group", &record.opts.group,
853
		    "put the counters into a counter group"),
J
Jiri Olsa 已提交
854 855 856 857 858 859
	OPT_CALLBACK_NOOPT('g', NULL, &record.opts,
			   NULL, "enables call-graph recording" ,
			   &record_callchain_opt),
	OPT_CALLBACK(0, "call-graph", &record.opts,
		     "mode[,dump_size]", record_callchain_help,
		     &record_parse_callchain_opt),
860
	OPT_INCR('v', "verbose", &verbose,
861
		    "be more verbose (show counter open errors, etc)"),
862
	OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
863
	OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
864
		    "per thread counts"),
865
	OPT_BOOLEAN('d', "data", &record.opts.sample_address,
866
		    "Sample addresses"),
867
	OPT_BOOLEAN('T', "timestamp", &record.opts.sample_time, "Sample timestamps"),
868
	OPT_BOOLEAN('P', "period", &record.opts.period, "Sample period"),
869
	OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
870
		    "don't sample"),
871
	OPT_BOOLEAN('N', "no-buildid-cache", &record.no_buildid_cache,
872
		    "do not update the buildid cache"),
873
	OPT_BOOLEAN('B', "no-buildid", &record.no_buildid,
874
		    "do not collect buildids in perf.data"),
875
	OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
S
Stephane Eranian 已提交
876 877
		     "monitor event in cgroup name only",
		     parse_cgroups),
878 879
	OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
		   "user to profile"),
880 881 882 883 884 885 886

	OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
		     "branch any", "sample any taken branches",
		     parse_branch_stack),

	OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
		     "branch filter mask", "branch stack filter modes",
887
		     parse_branch_stack),
888 889
	OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
		    "sample by weight (on special events only)"),
890 891
	OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
		    "sample transaction flags (special events only)"),
892 893 894
	OPT_END()
};

895
int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
896
{
897
	int err = -ENOMEM;
898 899
	struct perf_evlist *evsel_list;
	struct perf_record *rec = &record;
900
	char errbuf[BUFSIZ];
901

902
	evsel_list = perf_evlist__new();
903 904 905
	if (evsel_list == NULL)
		return -ENOMEM;

906 907
	rec->evlist = evsel_list;

908
	argc = parse_options(argc, argv, record_options, record_usage,
909
			    PARSE_OPT_STOP_AT_NON_OPTION);
910
	if (!argc && perf_target__none(&rec->opts.target))
911
		usage_with_options(record_usage, record_options);
912

913
	if (nr_cgroups && !rec->opts.target.system_wide) {
914 915
		ui__error("cgroup monitoring only available in"
			  " system-wide mode\n");
S
Stephane Eranian 已提交
916 917 918
		usage_with_options(record_usage, record_options);
	}

919
	symbol__init();
920

921
	if (symbol_conf.kptr_restrict)
922 923 924 925 926 927 928 929
		pr_warning(
"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
"check /proc/sys/kernel/kptr_restrict.\n\n"
"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
"file is not found in the buildid cache or in the vmlinux path.\n\n"
"Samples in kernel modules won't be resolved at all.\n\n"
"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
"even with a suitable vmlinux or kallsyms file.\n\n");
930

931
	if (rec->no_buildid_cache || rec->no_buildid)
932
		disable_buildid_cache();
933

934 935
	if (evsel_list->nr_entries == 0 &&
	    perf_evlist__add_default(evsel_list) < 0) {
936 937
		pr_err("Not enough memory for event selector list\n");
		goto out_symbol_exit;
938
	}
939

940 941 942 943 944 945 946 947 948
	err = perf_target__validate(&rec->opts.target);
	if (err) {
		perf_target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
		ui__warning("%s", errbuf);
	}

	err = perf_target__parse_uid(&rec->opts.target);
	if (err) {
		int saved_errno = errno;
949

950
		perf_target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
951
		ui__error("%s", errbuf);
952 953

		err = -saved_errno;
954
		goto out_symbol_exit;
955
	}
956

957
	err = -ENOMEM;
958
	if (perf_evlist__create_maps(evsel_list, &rec->opts.target) < 0)
959
		usage_with_options(record_usage, record_options);
960

961 962 963 964
	if (rec->opts.user_interval != ULLONG_MAX)
		rec->opts.default_interval = rec->opts.user_interval;
	if (rec->opts.user_freq != UINT_MAX)
		rec->opts.freq = rec->opts.user_freq;
965

966 967 968
	/*
	 * User specified count overrides default frequency.
	 */
969 970 971 972
	if (rec->opts.default_interval)
		rec->opts.freq = 0;
	else if (rec->opts.freq) {
		rec->opts.default_interval = rec->opts.freq;
973
	} else {
974
		ui__error("frequency and count are zero, aborting\n");
975
		err = -EINVAL;
976
		goto out_free_fd;
977 978
	}

979
	err = __cmd_record(&record, argc, argv);
980 981 982

	perf_evlist__munmap(evsel_list);
	perf_evlist__close(evsel_list);
983
out_free_fd:
984
	perf_evlist__delete_maps(evsel_list);
985 986
out_symbol_exit:
	symbol__exit();
987
	return err;
988
}