builtin-record.c 24.9 KB
Newer Older
I
Ingo Molnar 已提交
1
/*
2 3 4 5 6
 * builtin-record.c
 *
 * Builtin record command: Record the profile of a workload
 * (or a CPU, or a PID) into the perf.data output file - for
 * later analysis via perf report.
I
Ingo Molnar 已提交
7
 */
8
#include "builtin.h"
9 10 11

#include "perf.h"

12
#include "util/build-id.h"
13
#include "util/util.h"
14
#include "util/parse-options.h"
15
#include "util/parse-events.h"
16

17
#include "util/header.h"
18
#include "util/event.h"
19
#include "util/evlist.h"
20
#include "util/evsel.h"
21
#include "util/debug.h"
22
#include "util/session.h"
23
#include "util/tool.h"
24
#include "util/symbol.h"
25
#include "util/cpumap.h"
26
#include "util/thread_map.h"
27
#include "util/data.h"
28

29
#include <unistd.h>
30
#include <sched.h>
31
#include <sys/mman.h>
32

33
#ifndef HAVE_ON_EXIT_SUPPORT
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
#ifndef ATEXIT_MAX
#define ATEXIT_MAX 32
#endif
static int __on_exit_count = 0;
typedef void (*on_exit_func_t) (int, void *);
static on_exit_func_t __on_exit_funcs[ATEXIT_MAX];
static void *__on_exit_args[ATEXIT_MAX];
static int __exitcode = 0;
static void __handle_on_exit_funcs(void);
static int on_exit(on_exit_func_t function, void *arg);
#define exit(x) (exit)(__exitcode = (x))

static int on_exit(on_exit_func_t function, void *arg)
{
	if (__on_exit_count == ATEXIT_MAX)
		return -ENOMEM;
	else if (__on_exit_count == 0)
		atexit(__handle_on_exit_funcs);
	__on_exit_funcs[__on_exit_count] = function;
	__on_exit_args[__on_exit_count++] = arg;
	return 0;
}

static void __handle_on_exit_funcs(void)
{
	int i;
	for (i = 0; i < __on_exit_count; i++)
		__on_exit_funcs[i] (__exitcode, __on_exit_args[i]);
}
#endif

65
struct perf_record {
66
	struct perf_tool	tool;
67 68
	struct perf_record_opts	opts;
	u64			bytes_written;
69
	struct perf_data_file	file;
70 71 72 73 74 75 76 77
	struct perf_evlist	*evlist;
	struct perf_session	*session;
	const char		*progname;
	int			realtime_prio;
	bool			no_buildid;
	bool			no_buildid_cache;
	long			samples;
	off_t			post_processing_offset;
78
};
79

80
static void advance_output(struct perf_record *rec, size_t size)
81
{
82
	rec->bytes_written += size;
83 84
}

85
static int write_output(struct perf_record *rec, void *buf, size_t size)
86
{
87 88
	struct perf_data_file *file = &rec->file;

89
	while (size) {
90
		int ret = write(file->fd, buf, size);
91

92
		if (ret < 0) {
93
			pr_err("failed to write perf data, error: %m\n");
94 95
			return -1;
		}
96 97 98 99

		size -= ret;
		buf += ret;

100
		rec->bytes_written += ret;
101
	}
102 103

	return 0;
104 105
}

106
static int process_synthesized_event(struct perf_tool *tool,
107
				     union perf_event *event,
108 109
				     struct perf_sample *sample __maybe_unused,
				     struct machine *machine __maybe_unused)
110
{
111
	struct perf_record *rec = container_of(tool, struct perf_record, tool);
112 113 114
	if (write_output(rec, event, event->header.size) < 0)
		return -1;

115 116 117
	return 0;
}

118
static int perf_record__mmap_read(struct perf_record *rec,
119
				   struct perf_mmap *md)
120
{
121
	unsigned int head = perf_mmap__read_head(md);
122
	unsigned int old = md->prev;
J
Jiri Olsa 已提交
123
	unsigned char *data = md->base + page_size;
124 125
	unsigned long size;
	void *buf;
126
	int rc = 0;
127

128
	if (old == head)
129
		return 0;
130

131
	rec->samples++;
132 133 134 135 136 137 138

	size = head - old;

	if ((old & md->mask) + size != (head & md->mask)) {
		buf = &data[old & md->mask];
		size = md->mask + 1 - (old & md->mask);
		old += size;
139

140 141 142 143
		if (write_output(rec, buf, size) < 0) {
			rc = -1;
			goto out;
		}
144 145 146 147 148
	}

	buf = &data[old & md->mask];
	size = head - old;
	old += size;
149

150 151 152 153
	if (write_output(rec, buf, size) < 0) {
		rc = -1;
		goto out;
	}
154 155

	md->prev = old;
156
	perf_mmap__write_tail(md, old);
157 158 159

out:
	return rc;
160 161 162
}

static volatile int done = 0;
163
static volatile int signr = -1;
164
static volatile int child_finished = 0;
165

166
static void sig_handler(int sig)
167
{
168 169 170
	if (sig == SIGCHLD)
		child_finished = 1;

171
	done = 1;
172 173 174
	signr = sig;
}

175
static void perf_record__sig_exit(int exit_status __maybe_unused, void *arg)
176
{
177
	struct perf_record *rec = arg;
178 179
	int status;

180
	if (rec->evlist->workload.pid > 0) {
181
		if (!child_finished)
182
			kill(rec->evlist->workload.pid, SIGTERM);
183 184 185

		wait(&status);
		if (WIFSIGNALED(status))
186
			psignal(WTERMSIG(status), rec->progname);
187
	}
188

189
	if (signr == -1 || signr == SIGUSR1)
190 191 192
		return;

	signal(signr, SIG_DFL);
193 194
}

195
static int perf_record__open(struct perf_record *rec)
196
{
197
	char msg[512];
198
	struct perf_evsel *pos;
199 200 201
	struct perf_evlist *evlist = rec->evlist;
	struct perf_session *session = rec->session;
	struct perf_record_opts *opts = &rec->opts;
202
	int rc = 0;
203

204
	perf_evlist__config(evlist, opts);
205

206 207
	list_for_each_entry(pos, &evlist->entries, node) {
try_again:
208
		if (perf_evsel__open(pos, evlist->cpus, evlist->threads) < 0) {
209
			if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
210
				if (verbose)
211
					ui__warning("%s\n", msg);
212 213
				goto try_again;
			}
214

215 216 217 218
			rc = -errno;
			perf_evsel__open_strerror(pos, &opts->target,
						  errno, msg, sizeof(msg));
			ui__error("%s\n", msg);
219
			goto out;
L
Li Zefan 已提交
220 221
		}
	}
222

223
	if (perf_evlist__apply_filters(evlist)) {
224 225
		error("failed to set filter with %d (%s)\n", errno,
			strerror(errno));
226 227
		rc = -1;
		goto out;
228 229
	}

230
	if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) {
231 232 233 234 235 236 237 238 239 240 241 242
		if (errno == EPERM) {
			pr_err("Permission error mapping pages.\n"
			       "Consider increasing "
			       "/proc/sys/kernel/perf_event_mlock_kb,\n"
			       "or try again with a smaller value of -m/--mmap_pages.\n"
			       "(current value: %d)\n", opts->mmap_pages);
			rc = -errno;
		} else {
			pr_err("failed to mmap with %d (%s)\n", errno, strerror(errno));
			rc = -errno;
		}
		goto out;
243
	}
244

245
	session->evlist = evlist;
246
	perf_session__set_id_hdr_size(session);
247 248
out:
	return rc;
249 250
}

251
static int process_buildids(struct perf_record *rec)
252
{
253 254
	struct perf_data_file *file  = &rec->file;
	struct perf_session *session = rec->session;
255

256
	u64 size = lseek(file->fd, 0, SEEK_CUR);
257 258 259
	if (size == 0)
		return 0;

260
	return __perf_session__process_events(session, rec->post_processing_offset,
261
					      size - rec->post_processing_offset,
262 263 264
					      size, &build_id__mark_dso_hit_ops);
}

265
static void perf_record__exit(int status, void *arg)
266
{
267
	struct perf_record *rec = arg;
268
	struct perf_data_file *file = &rec->file;
269

270 271 272
	if (status != 0)
		return;

273
	if (!file->is_pipe) {
274 275 276 277 278
		rec->session->header.data_size += rec->bytes_written;

		if (!rec->no_buildid)
			process_buildids(rec);
		perf_session__write_header(rec->session, rec->evlist,
279
					   file->fd, true);
280 281
		perf_session__delete(rec->session);
		perf_evlist__delete(rec->evlist);
282
		symbol__exit();
283
	}
284 285
}

286
static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
287 288
{
	int err;
289
	struct perf_tool *tool = data;
290 291 292 293 294 295 296 297
	/*
	 *As for guest kernel when processing subcommand record&report,
	 *we arrange module mmap prior to guest kernel mmap and trigger
	 *a preload dso because default guest module symbols are loaded
	 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
	 *method is used to avoid symbol missing when the first addr is
	 *in module instead of in guest kernel.
	 */
298
	err = perf_event__synthesize_modules(tool, process_synthesized_event,
299
					     machine);
300 301
	if (err < 0)
		pr_err("Couldn't record guest kernel [%d]'s reference"
302
		       " relocation symbol.\n", machine->pid);
303 304 305 306 307

	/*
	 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
	 * have no _text sometimes.
	 */
308
	err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
309
						 machine, "_text");
310
	if (err < 0)
311
		err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
312
							 machine, "_stext");
313 314
	if (err < 0)
		pr_err("Couldn't record guest kernel [%d]'s reference"
315
		       " relocation symbol.\n", machine->pid);
316 317
}

318 319 320 321 322
static struct perf_event_header finished_round_event = {
	.size = sizeof(struct perf_event_header),
	.type = PERF_RECORD_FINISHED_ROUND,
};

323
static int perf_record__mmap_read_all(struct perf_record *rec)
324
{
325
	int i;
326
	int rc = 0;
327

328
	for (i = 0; i < rec->evlist->nr_mmaps; i++) {
329 330 331 332 333 334
		if (rec->evlist->mmap[i].base) {
			if (perf_record__mmap_read(rec, &rec->evlist->mmap[i]) != 0) {
				rc = -1;
				goto out;
			}
		}
335 336
	}

337
	if (perf_header__has_feat(&rec->session->header, HEADER_TRACING_DATA))
338 339 340 341 342
		rc = write_output(rec, &finished_round_event,
				  sizeof(finished_round_event));

out:
	return rc;
343 344
}

345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363
static void perf_record__init_features(struct perf_record *rec)
{
	struct perf_evlist *evsel_list = rec->evlist;
	struct perf_session *session = rec->session;
	int feat;

	for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
		perf_header__set_feat(&session->header, feat);

	if (rec->no_buildid)
		perf_header__clear_feat(&session->header, HEADER_BUILD_ID);

	if (!have_tracepoints(&evsel_list->entries))
		perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);

	if (!rec->opts.branch_stack)
		perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
}

364
static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
365
{
366
	int err;
367
	unsigned long waking = 0;
368
	const bool forks = argc > 0;
369
	struct machine *machine;
370
	struct perf_tool *tool = &rec->tool;
371 372
	struct perf_record_opts *opts = &rec->opts;
	struct perf_evlist *evsel_list = rec->evlist;
373
	struct perf_data_file *file = &rec->file;
374
	struct perf_session *session;
375
	bool disabled = false;
376

377
	rec->progname = argv[0];
378

379
	on_exit(perf_record__sig_exit, rec);
380 381
	signal(SIGCHLD, sig_handler);
	signal(SIGINT, sig_handler);
382
	signal(SIGUSR1, sig_handler);
383
	signal(SIGTERM, sig_handler);
384

385
	session = perf_session__new(file, false, NULL);
386
	if (session == NULL) {
387 388 389 390
		pr_err("Not enough memory for reading perf file header\n");
		return -1;
	}

391 392
	rec->session = session;

393
	perf_record__init_features(rec);
394

395
	if (forks) {
396
		err = perf_evlist__prepare_workload(evsel_list, &opts->target,
397
						    argv, file->is_pipe,
398
						    true);
399 400 401
		if (err < 0) {
			pr_err("Couldn't run the workload!\n");
			goto out_delete_session;
402 403 404
		}
	}

405 406 407 408
	if (perf_record__open(rec) != 0) {
		err = -1;
		goto out_delete_session;
	}
409

410 411 412
	if (!evsel_list->nr_groups)
		perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);

413
	/*
414
	 * perf_session__delete(session) will be called at perf_record__exit()
415
	 */
416
	on_exit(perf_record__exit, rec);
417

418 419
	if (file->is_pipe) {
		err = perf_header__write_pipe(file->fd);
420
		if (err < 0)
421
			goto out_delete_session;
422
	} else {
423
		err = perf_session__write_header(session, evsel_list,
424
						 file->fd, false);
425
		if (err < 0)
426
			goto out_delete_session;
427 428
	}

429
	if (!rec->no_buildid
430
	    && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
431
		pr_err("Couldn't generate buildids. "
432
		       "Use --no-buildid to profile anyway.\n");
433 434
		err = -1;
		goto out_delete_session;
435 436
	}

437
	rec->post_processing_offset = lseek(file->fd, 0, SEEK_CUR);
438

439
	machine = &session->machines.host;
440

441
	if (file->is_pipe) {
442
		err = perf_event__synthesize_attrs(tool, session,
443
						   process_synthesized_event);
444 445
		if (err < 0) {
			pr_err("Couldn't synthesize attrs.\n");
446
			goto out_delete_session;
447
		}
448

449
		if (have_tracepoints(&evsel_list->entries)) {
450 451 452 453 454 455 456 457
			/*
			 * FIXME err <= 0 here actually means that
			 * there were no tracepoints so its not really
			 * an error, just that we don't need to
			 * synthesize anything.  We really have to
			 * return this more properly and also
			 * propagate errors that now are calling die()
			 */
458
			err = perf_event__synthesize_tracing_data(tool, file->fd, evsel_list,
459
								  process_synthesized_event);
460 461
			if (err <= 0) {
				pr_err("Couldn't record tracing data.\n");
462
				goto out_delete_session;
463
			}
464
			advance_output(rec, err);
465
		}
466 467
	}

468
	err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
469
						 machine, "_text");
470
	if (err < 0)
471
		err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
472
							 machine, "_stext");
473 474 475 476
	if (err < 0)
		pr_err("Couldn't record kernel reference relocation symbol\n"
		       "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
		       "Check /proc/kallsyms permission or run as root.\n");
477

478
	err = perf_event__synthesize_modules(tool, process_synthesized_event,
479
					     machine);
480 481 482 483 484
	if (err < 0)
		pr_err("Couldn't record kernel module information.\n"
		       "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
		       "Check /proc/modules permission or run as root.\n");

485
	if (perf_guest) {
486 487
		machines__process_guests(&session->machines,
					 perf_event__synthesize_guest_os, tool);
488
	}
489

J
Jiri Olsa 已提交
490
	if (perf_target__has_task(&opts->target))
491
		err = perf_event__synthesize_thread_map(tool, evsel_list->threads,
492
						  process_synthesized_event,
493
						  machine);
J
Jiri Olsa 已提交
494
	else if (perf_target__has_cpu(&opts->target))
495
		err = perf_event__synthesize_threads(tool, process_synthesized_event,
496
					       machine);
J
Jiri Olsa 已提交
497 498
	else /* command specified */
		err = 0;
499

500 501 502
	if (err != 0)
		goto out_delete_session;

503
	if (rec->realtime_prio) {
504 505
		struct sched_param param;

506
		param.sched_priority = rec->realtime_prio;
507
		if (sched_setscheduler(0, SCHED_FIFO, &param)) {
508
			pr_err("Could not set realtime priority.\n");
509 510
			err = -1;
			goto out_delete_session;
511 512 513
		}
	}

514 515 516 517 518 519 520
	/*
	 * When perf is starting the traced process, all the events
	 * (apart from group members) have enable_on_exec=1 set,
	 * so don't spoil it by prematurely enabling them.
	 */
	if (!perf_target__none(&opts->target))
		perf_evlist__enable(evsel_list);
521

522 523 524
	/*
	 * Let the child rip
	 */
525
	if (forks)
526
		perf_evlist__start_workload(evsel_list);
527

528
	for (;;) {
529
		int hits = rec->samples;
530

531 532 533 534
		if (perf_record__mmap_read_all(rec) < 0) {
			err = -1;
			goto out_delete_session;
		}
535

536
		if (hits == rec->samples) {
537 538
			if (done)
				break;
539
			err = poll(evsel_list->pollfd, evsel_list->nr_fds, -1);
540 541 542
			waking++;
		}

543 544 545 546 547
		/*
		 * When perf is starting the traced process, at the end events
		 * die with the process and we wait for that. Thus no need to
		 * disable events in this case.
		 */
548
		if (done && !disabled && !perf_target__none(&opts->target)) {
549
			perf_evlist__disable(evsel_list);
550 551
			disabled = true;
		}
552 553
	}

554
	if (quiet || signr == SIGUSR1)
555 556
		return 0;

557 558
	fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);

559 560 561 562
	/*
	 * Approximate RIP event size: 24 bytes.
	 */
	fprintf(stderr,
563
		"[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n",
564
		(double)rec->bytes_written / 1024.0 / 1024.0,
565
		file->path,
566
		rec->bytes_written / 24);
567

568
	return 0;
569 570 571 572

out_delete_session:
	perf_session__delete(session);
	return err;
573
}
574

575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592
#define BRANCH_OPT(n, m) \
	{ .name = n, .mode = (m) }

#define BRANCH_END { .name = NULL }

struct branch_mode {
	const char *name;
	int mode;
};

static const struct branch_mode branch_modes[] = {
	BRANCH_OPT("u", PERF_SAMPLE_BRANCH_USER),
	BRANCH_OPT("k", PERF_SAMPLE_BRANCH_KERNEL),
	BRANCH_OPT("hv", PERF_SAMPLE_BRANCH_HV),
	BRANCH_OPT("any", PERF_SAMPLE_BRANCH_ANY),
	BRANCH_OPT("any_call", PERF_SAMPLE_BRANCH_ANY_CALL),
	BRANCH_OPT("any_ret", PERF_SAMPLE_BRANCH_ANY_RETURN),
	BRANCH_OPT("ind_call", PERF_SAMPLE_BRANCH_IND_CALL),
593 594 595
	BRANCH_OPT("abort_tx", PERF_SAMPLE_BRANCH_ABORT_TX),
	BRANCH_OPT("in_tx", PERF_SAMPLE_BRANCH_IN_TX),
	BRANCH_OPT("no_tx", PERF_SAMPLE_BRANCH_NO_TX),
596 597 598 599
	BRANCH_END
};

static int
600
parse_branch_stack(const struct option *opt, const char *str, int unset)
601 602 603 604 605 606 607 608
{
#define ONLY_PLM \
	(PERF_SAMPLE_BRANCH_USER	|\
	 PERF_SAMPLE_BRANCH_KERNEL	|\
	 PERF_SAMPLE_BRANCH_HV)

	uint64_t *mode = (uint64_t *)opt->value;
	const struct branch_mode *br;
609
	char *s, *os = NULL, *p;
610 611
	int ret = -1;

612 613
	if (unset)
		return 0;
614

615 616 617 618
	/*
	 * cannot set it twice, -b + --branch-filter for instance
	 */
	if (*mode)
619 620
		return -1;

621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641
	/* str may be NULL in case no arg is passed to -b */
	if (str) {
		/* because str is read-only */
		s = os = strdup(str);
		if (!s)
			return -1;

		for (;;) {
			p = strchr(s, ',');
			if (p)
				*p = '\0';

			for (br = branch_modes; br->name; br++) {
				if (!strcasecmp(s, br->name))
					break;
			}
			if (!br->name) {
				ui__warning("unknown branch filter %s,"
					    " check man page\n", s);
				goto error;
			}
642

643
			*mode |= br->mode;
644

645 646
			if (!p)
				break;
647

648 649
			s = p + 1;
		}
650 651 652
	}
	ret = 0;

653
	/* default to any branch */
654
	if ((*mode & ~ONLY_PLM) == 0) {
655
		*mode = PERF_SAMPLE_BRANCH_ANY;
656 657 658 659 660 661
	}
error:
	free(os);
	return ret;
}

662
#ifdef HAVE_LIBUNWIND_SUPPORT
663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687
static int get_stack_size(char *str, unsigned long *_size)
{
	char *endptr;
	unsigned long size;
	unsigned long max_size = round_down(USHRT_MAX, sizeof(u64));

	size = strtoul(str, &endptr, 0);

	do {
		if (*endptr)
			break;

		size = round_up(size, sizeof(u64));
		if (!size || size > max_size)
			break;

		*_size = size;
		return 0;

	} while (0);

	pr_err("callchain: Incorrect stack dump size (max %ld): %s\n",
	       max_size, str);
	return -1;
}
688
#endif /* HAVE_LIBUNWIND_SUPPORT */
689

J
Jiri Olsa 已提交
690
int record_parse_callchain(const char *arg, struct perf_record_opts *opts)
691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709
{
	char *tok, *name, *saveptr = NULL;
	char *buf;
	int ret = -1;

	/* We need buffer that we know we can write to. */
	buf = malloc(strlen(arg) + 1);
	if (!buf)
		return -ENOMEM;

	strcpy(buf, arg);

	tok = strtok_r((char *)buf, ",", &saveptr);
	name = tok ? : (char *)buf;

	do {
		/* Framepointer style */
		if (!strncmp(name, "fp", sizeof("fp"))) {
			if (!strtok_r(NULL, ",", &saveptr)) {
710
				opts->call_graph = CALLCHAIN_FP;
711 712 713 714 715 716
				ret = 0;
			} else
				pr_err("callchain: No more arguments "
				       "needed for -g fp\n");
			break;

717
#ifdef HAVE_LIBUNWIND_SUPPORT
718 719
		/* Dwarf style */
		} else if (!strncmp(name, "dwarf", sizeof("dwarf"))) {
720 721
			const unsigned long default_stack_dump_size = 8192;

722
			ret = 0;
723 724
			opts->call_graph = CALLCHAIN_DWARF;
			opts->stack_dump_size = default_stack_dump_size;
725 726 727 728 729 730

			tok = strtok_r(NULL, ",", &saveptr);
			if (tok) {
				unsigned long size = 0;

				ret = get_stack_size(tok, &size);
731
				opts->stack_dump_size = size;
732
			}
733
#endif /* HAVE_LIBUNWIND_SUPPORT */
734
		} else {
J
Jiri Olsa 已提交
735
			pr_err("callchain: Unknown --call-graph option "
736 737 738 739 740 741 742
			       "value: %s\n", arg);
			break;
		}

	} while (0);

	free(buf);
J
Jiri Olsa 已提交
743 744 745 746 747 748
	return ret;
}

static void callchain_debug(struct perf_record_opts *opts)
{
	pr_debug("callchain: type %d\n", opts->call_graph);
749

J
Jiri Olsa 已提交
750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769
	if (opts->call_graph == CALLCHAIN_DWARF)
		pr_debug("callchain: stack dump size %d\n",
			 opts->stack_dump_size);
}

int record_parse_callchain_opt(const struct option *opt,
			       const char *arg,
			       int unset)
{
	struct perf_record_opts *opts = opt->value;
	int ret;

	/* --no-call-graph */
	if (unset) {
		opts->call_graph = CALLCHAIN_NONE;
		pr_debug("callchain: disabled\n");
		return 0;
	}

	ret = record_parse_callchain(arg, opts);
770
	if (!ret)
J
Jiri Olsa 已提交
771
		callchain_debug(opts);
772 773 774 775

	return ret;
}

J
Jiri Olsa 已提交
776 777 778 779 780 781 782 783 784 785 786 787 788
int record_callchain_opt(const struct option *opt,
			 const char *arg __maybe_unused,
			 int unset __maybe_unused)
{
	struct perf_record_opts *opts = opt->value;

	if (opts->call_graph == CALLCHAIN_NONE)
		opts->call_graph = CALLCHAIN_FP;

	callchain_debug(opts);
	return 0;
}

789
static const char * const record_usage[] = {
790 791
	"perf record [<options>] [<command>]",
	"perf record [<options>] -- <command> [<options>]",
792 793 794
	NULL
};

795 796 797 798 799 800 801 802 803 804 805 806 807 808 809
/*
 * XXX Ideally would be local to cmd_record() and passed to a perf_record__new
 * because we need to have access to it in perf_record__exit, that is called
 * after cmd_record() exits, but since record_options need to be accessible to
 * builtin-script, leave it here.
 *
 * At least we don't ouch it in all the other functions here directly.
 *
 * Just say no to tons of global variables, sigh.
 */
static struct perf_record record = {
	.opts = {
		.mmap_pages	     = UINT_MAX,
		.user_freq	     = UINT_MAX,
		.user_interval	     = ULLONG_MAX,
810
		.freq		     = 4000,
N
Namhyung Kim 已提交
811 812 813
		.target		     = {
			.uses_mmap   = true,
		},
814 815
	},
};
816

J
Jiri Olsa 已提交
817
#define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace) recording: "
818

819
#ifdef HAVE_LIBUNWIND_SUPPORT
J
Jiri Olsa 已提交
820
const char record_callchain_help[] = CALLCHAIN_HELP "fp dwarf";
821
#else
J
Jiri Olsa 已提交
822
const char record_callchain_help[] = CALLCHAIN_HELP "fp";
823 824
#endif

825 826 827 828 829 830 831
/*
 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
 * with it and switch to use the library functions in perf_evlist that came
 * from builtin-record.c, i.e. use perf_record_opts,
 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
 * using pipes, etc.
 */
832
const struct option record_options[] = {
833
	OPT_CALLBACK('e', "event", &record.evlist, "event",
834
		     "event selector. use 'perf list' to list available events",
835
		     parse_events_option),
836
	OPT_CALLBACK(0, "filter", &record.evlist, "filter",
L
Li Zefan 已提交
837
		     "event filter", parse_filter),
838
	OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
839
		    "record events on existing process id"),
840
	OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
841
		    "record events on existing thread id"),
842
	OPT_INTEGER('r', "realtime", &record.realtime_prio,
843
		    "collect data with this RT SCHED_FIFO priority"),
844
	OPT_BOOLEAN('D', "no-delay", &record.opts.no_delay,
845
		    "collect data without buffering"),
846
	OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
847
		    "collect raw sample records from all opened counters"),
848
	OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
849
			    "system-wide collection from all CPUs"),
850
	OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
851
		    "list of cpus to monitor"),
852
	OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
853
	OPT_STRING('o', "output", &record.file.path, "file",
I
Ingo Molnar 已提交
854
		    "output file name"),
855
	OPT_BOOLEAN('i', "no-inherit", &record.opts.no_inherit,
856
		    "child tasks do not inherit counters"),
857
	OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
858 859 860
	OPT_CALLBACK('m', "mmap-pages", &record.opts.mmap_pages, "pages",
		     "number of mmap data pages",
		     perf_evlist__parse_mmap_pages),
861
	OPT_BOOLEAN(0, "group", &record.opts.group,
862
		    "put the counters into a counter group"),
J
Jiri Olsa 已提交
863 864 865 866 867 868
	OPT_CALLBACK_NOOPT('g', NULL, &record.opts,
			   NULL, "enables call-graph recording" ,
			   &record_callchain_opt),
	OPT_CALLBACK(0, "call-graph", &record.opts,
		     "mode[,dump_size]", record_callchain_help,
		     &record_parse_callchain_opt),
869
	OPT_INCR('v', "verbose", &verbose,
870
		    "be more verbose (show counter open errors, etc)"),
871
	OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
872
	OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
873
		    "per thread counts"),
874
	OPT_BOOLEAN('d', "data", &record.opts.sample_address,
875
		    "Sample addresses"),
876
	OPT_BOOLEAN('T', "timestamp", &record.opts.sample_time, "Sample timestamps"),
877
	OPT_BOOLEAN('P', "period", &record.opts.period, "Sample period"),
878
	OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
879
		    "don't sample"),
880
	OPT_BOOLEAN('N', "no-buildid-cache", &record.no_buildid_cache,
881
		    "do not update the buildid cache"),
882
	OPT_BOOLEAN('B', "no-buildid", &record.no_buildid,
883
		    "do not collect buildids in perf.data"),
884
	OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
S
Stephane Eranian 已提交
885 886
		     "monitor event in cgroup name only",
		     parse_cgroups),
887 888
	OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
		   "user to profile"),
889 890 891 892 893 894 895

	OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
		     "branch any", "sample any taken branches",
		     parse_branch_stack),

	OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
		     "branch filter mask", "branch stack filter modes",
896
		     parse_branch_stack),
897 898
	OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
		    "sample by weight (on special events only)"),
899 900
	OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
		    "sample transaction flags (special events only)"),
901 902 903
	OPT_END()
};

904
int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
905
{
906
	int err = -ENOMEM;
907 908
	struct perf_evlist *evsel_list;
	struct perf_record *rec = &record;
909
	char errbuf[BUFSIZ];
910

911
	evsel_list = perf_evlist__new();
912 913 914
	if (evsel_list == NULL)
		return -ENOMEM;

915 916
	rec->evlist = evsel_list;

917
	argc = parse_options(argc, argv, record_options, record_usage,
918
			    PARSE_OPT_STOP_AT_NON_OPTION);
919
	if (!argc && perf_target__none(&rec->opts.target))
920
		usage_with_options(record_usage, record_options);
921

922
	if (nr_cgroups && !rec->opts.target.system_wide) {
923 924
		ui__error("cgroup monitoring only available in"
			  " system-wide mode\n");
S
Stephane Eranian 已提交
925 926 927
		usage_with_options(record_usage, record_options);
	}

928
	symbol__init();
929

930
	if (symbol_conf.kptr_restrict)
931 932 933 934 935 936 937 938
		pr_warning(
"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
"check /proc/sys/kernel/kptr_restrict.\n\n"
"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
"file is not found in the buildid cache or in the vmlinux path.\n\n"
"Samples in kernel modules won't be resolved at all.\n\n"
"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
"even with a suitable vmlinux or kallsyms file.\n\n");
939

940
	if (rec->no_buildid_cache || rec->no_buildid)
941
		disable_buildid_cache();
942

943 944
	if (evsel_list->nr_entries == 0 &&
	    perf_evlist__add_default(evsel_list) < 0) {
945 946
		pr_err("Not enough memory for event selector list\n");
		goto out_symbol_exit;
947
	}
948

949 950 951 952 953 954 955 956 957
	err = perf_target__validate(&rec->opts.target);
	if (err) {
		perf_target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
		ui__warning("%s", errbuf);
	}

	err = perf_target__parse_uid(&rec->opts.target);
	if (err) {
		int saved_errno = errno;
958

959
		perf_target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
960
		ui__error("%s", errbuf);
961 962

		err = -saved_errno;
963
		goto out_symbol_exit;
964
	}
965

966
	err = -ENOMEM;
967
	if (perf_evlist__create_maps(evsel_list, &rec->opts.target) < 0)
968
		usage_with_options(record_usage, record_options);
969

970
	if (perf_record_opts__config(&rec->opts)) {
971
		err = -EINVAL;
972
		goto out_free_fd;
973 974
	}

975
	err = __cmd_record(&record, argc, argv);
976 977 978

	perf_evlist__munmap(evsel_list);
	perf_evlist__close(evsel_list);
979
out_free_fd:
980
	perf_evlist__delete_maps(evsel_list);
981 982
out_symbol_exit:
	symbol__exit();
983
	return err;
984
}