builtin-record.c 25.2 KB
Newer Older
I
Ingo Molnar 已提交
1
/*
2 3 4 5 6
 * builtin-record.c
 *
 * Builtin record command: Record the profile of a workload
 * (or a CPU, or a PID) into the perf.data output file - for
 * later analysis via perf report.
I
Ingo Molnar 已提交
7
 */
8
#include "builtin.h"
9 10 11

#include "perf.h"

12
#include "util/build-id.h"
13
#include "util/util.h"
14
#include "util/parse-options.h"
15
#include "util/parse-events.h"
16

17
#include "util/header.h"
18
#include "util/event.h"
19
#include "util/evlist.h"
20
#include "util/evsel.h"
21
#include "util/debug.h"
22
#include "util/session.h"
23
#include "util/tool.h"
24
#include "util/symbol.h"
25
#include "util/cpumap.h"
26
#include "util/thread_map.h"
27
#include "util/data.h"
28

29
#include <unistd.h>
30
#include <sched.h>
31
#include <sys/mman.h>
32

33
#ifndef HAVE_ON_EXIT_SUPPORT
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
#ifndef ATEXIT_MAX
#define ATEXIT_MAX 32
#endif
static int __on_exit_count = 0;
typedef void (*on_exit_func_t) (int, void *);
static on_exit_func_t __on_exit_funcs[ATEXIT_MAX];
static void *__on_exit_args[ATEXIT_MAX];
static int __exitcode = 0;
static void __handle_on_exit_funcs(void);
static int on_exit(on_exit_func_t function, void *arg);
#define exit(x) (exit)(__exitcode = (x))

static int on_exit(on_exit_func_t function, void *arg)
{
	if (__on_exit_count == ATEXIT_MAX)
		return -ENOMEM;
	else if (__on_exit_count == 0)
		atexit(__handle_on_exit_funcs);
	__on_exit_funcs[__on_exit_count] = function;
	__on_exit_args[__on_exit_count++] = arg;
	return 0;
}

static void __handle_on_exit_funcs(void)
{
	int i;
	for (i = 0; i < __on_exit_count; i++)
		__on_exit_funcs[i] (__exitcode, __on_exit_args[i]);
}
#endif

65
struct perf_record {
66
	struct perf_tool	tool;
67 68
	struct perf_record_opts	opts;
	u64			bytes_written;
69
	struct perf_data_file	file;
70 71 72 73 74 75 76 77
	struct perf_evlist	*evlist;
	struct perf_session	*session;
	const char		*progname;
	int			realtime_prio;
	bool			no_buildid;
	bool			no_buildid_cache;
	long			samples;
	off_t			post_processing_offset;
78
};
79

80
static void advance_output(struct perf_record *rec, size_t size)
81
{
82
	rec->bytes_written += size;
83 84
}

85
static int write_output(struct perf_record *rec, void *buf, size_t size)
86
{
87 88
	struct perf_data_file *file = &rec->file;

89
	while (size) {
90
		int ret = write(file->fd, buf, size);
91

92
		if (ret < 0) {
93
			pr_err("failed to write perf data, error: %m\n");
94 95
			return -1;
		}
96 97 98 99

		size -= ret;
		buf += ret;

100
		rec->bytes_written += ret;
101
	}
102 103

	return 0;
104 105
}

106
static int process_synthesized_event(struct perf_tool *tool,
107
				     union perf_event *event,
108 109
				     struct perf_sample *sample __maybe_unused,
				     struct machine *machine __maybe_unused)
110
{
111
	struct perf_record *rec = container_of(tool, struct perf_record, tool);
112 113 114
	if (write_output(rec, event, event->header.size) < 0)
		return -1;

115 116 117
	return 0;
}

118
static int perf_record__mmap_read(struct perf_record *rec,
119
				   struct perf_mmap *md)
120
{
121
	unsigned int head = perf_mmap__read_head(md);
122
	unsigned int old = md->prev;
J
Jiri Olsa 已提交
123
	unsigned char *data = md->base + page_size;
124 125
	unsigned long size;
	void *buf;
126
	int rc = 0;
127

128
	if (old == head)
129
		return 0;
130

131
	rec->samples++;
132 133 134 135 136 137 138

	size = head - old;

	if ((old & md->mask) + size != (head & md->mask)) {
		buf = &data[old & md->mask];
		size = md->mask + 1 - (old & md->mask);
		old += size;
139

140 141 142 143
		if (write_output(rec, buf, size) < 0) {
			rc = -1;
			goto out;
		}
144 145 146 147 148
	}

	buf = &data[old & md->mask];
	size = head - old;
	old += size;
149

150 151 152 153
	if (write_output(rec, buf, size) < 0) {
		rc = -1;
		goto out;
	}
154 155

	md->prev = old;
156
	perf_mmap__write_tail(md, old);
157 158 159

out:
	return rc;
160 161 162
}

static volatile int done = 0;
163
static volatile int signr = -1;
164
static volatile int child_finished = 0;
165

166
static void sig_handler(int sig)
167
{
168 169 170
	if (sig == SIGCHLD)
		child_finished = 1;

171
	done = 1;
172 173 174
	signr = sig;
}

175
static void perf_record__sig_exit(int exit_status __maybe_unused, void *arg)
176
{
177
	struct perf_record *rec = arg;
178 179
	int status;

180
	if (rec->evlist->workload.pid > 0) {
181
		if (!child_finished)
182
			kill(rec->evlist->workload.pid, SIGTERM);
183 184 185

		wait(&status);
		if (WIFSIGNALED(status))
186
			psignal(WTERMSIG(status), rec->progname);
187
	}
188

189
	if (signr == -1 || signr == SIGUSR1)
190 191 192
		return;

	signal(signr, SIG_DFL);
193 194
}

195
static int perf_record__open(struct perf_record *rec)
196
{
197
	char msg[512];
198
	struct perf_evsel *pos;
199 200 201
	struct perf_evlist *evlist = rec->evlist;
	struct perf_session *session = rec->session;
	struct perf_record_opts *opts = &rec->opts;
202
	int rc = 0;
203

204
	perf_evlist__config(evlist, opts);
205

206 207
	list_for_each_entry(pos, &evlist->entries, node) {
try_again:
208
		if (perf_evsel__open(pos, evlist->cpus, evlist->threads) < 0) {
209
			if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
210
				if (verbose)
211
					ui__warning("%s\n", msg);
212 213
				goto try_again;
			}
214

215 216 217 218
			rc = -errno;
			perf_evsel__open_strerror(pos, &opts->target,
						  errno, msg, sizeof(msg));
			ui__error("%s\n", msg);
219
			goto out;
L
Li Zefan 已提交
220 221
		}
	}
222

223
	if (perf_evlist__apply_filters(evlist)) {
224 225
		error("failed to set filter with %d (%s)\n", errno,
			strerror(errno));
226 227
		rc = -1;
		goto out;
228 229
	}

230
	if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) {
231 232 233 234 235 236 237 238 239 240 241 242
		if (errno == EPERM) {
			pr_err("Permission error mapping pages.\n"
			       "Consider increasing "
			       "/proc/sys/kernel/perf_event_mlock_kb,\n"
			       "or try again with a smaller value of -m/--mmap_pages.\n"
			       "(current value: %d)\n", opts->mmap_pages);
			rc = -errno;
		} else {
			pr_err("failed to mmap with %d (%s)\n", errno, strerror(errno));
			rc = -errno;
		}
		goto out;
243
	}
244

245
	session->evlist = evlist;
246
	perf_session__set_id_hdr_size(session);
247 248
out:
	return rc;
249 250
}

251
static int process_buildids(struct perf_record *rec)
252
{
253 254
	struct perf_data_file *file  = &rec->file;
	struct perf_session *session = rec->session;
255

256
	u64 size = lseek(file->fd, 0, SEEK_CUR);
257 258 259
	if (size == 0)
		return 0;

260 261
	session->fd = file->fd;
	return __perf_session__process_events(session, rec->post_processing_offset,
262
					      size - rec->post_processing_offset,
263 264 265
					      size, &build_id__mark_dso_hit_ops);
}

266
static void perf_record__exit(int status, void *arg)
267
{
268
	struct perf_record *rec = arg;
269
	struct perf_data_file *file = &rec->file;
270

271 272 273
	if (status != 0)
		return;

274
	if (!file->is_pipe) {
275 276 277 278 279
		rec->session->header.data_size += rec->bytes_written;

		if (!rec->no_buildid)
			process_buildids(rec);
		perf_session__write_header(rec->session, rec->evlist,
280
					   file->fd, true);
281 282
		perf_session__delete(rec->session);
		perf_evlist__delete(rec->evlist);
283
		symbol__exit();
284
	}
285 286
}

287
static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
288 289
{
	int err;
290
	struct perf_tool *tool = data;
291 292 293 294 295 296 297 298
	/*
	 *As for guest kernel when processing subcommand record&report,
	 *we arrange module mmap prior to guest kernel mmap and trigger
	 *a preload dso because default guest module symbols are loaded
	 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
	 *method is used to avoid symbol missing when the first addr is
	 *in module instead of in guest kernel.
	 */
299
	err = perf_event__synthesize_modules(tool, process_synthesized_event,
300
					     machine);
301 302
	if (err < 0)
		pr_err("Couldn't record guest kernel [%d]'s reference"
303
		       " relocation symbol.\n", machine->pid);
304 305 306 307 308

	/*
	 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
	 * have no _text sometimes.
	 */
309
	err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
310
						 machine, "_text");
311
	if (err < 0)
312
		err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
313
							 machine, "_stext");
314 315
	if (err < 0)
		pr_err("Couldn't record guest kernel [%d]'s reference"
316
		       " relocation symbol.\n", machine->pid);
317 318
}

319 320 321 322 323
static struct perf_event_header finished_round_event = {
	.size = sizeof(struct perf_event_header),
	.type = PERF_RECORD_FINISHED_ROUND,
};

324
static int perf_record__mmap_read_all(struct perf_record *rec)
325
{
326
	int i;
327
	int rc = 0;
328

329
	for (i = 0; i < rec->evlist->nr_mmaps; i++) {
330 331 332 333 334 335
		if (rec->evlist->mmap[i].base) {
			if (perf_record__mmap_read(rec, &rec->evlist->mmap[i]) != 0) {
				rc = -1;
				goto out;
			}
		}
336 337
	}

338
	if (perf_header__has_feat(&rec->session->header, HEADER_TRACING_DATA))
339 340 341 342 343
		rc = write_output(rec, &finished_round_event,
				  sizeof(finished_round_event));

out:
	return rc;
344 345
}

346
static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
347
{
I
Ingo Molnar 已提交
348 349
	struct stat st;
	int flags;
350
	int err, feat;
351
	unsigned long waking = 0;
352
	const bool forks = argc > 0;
353
	struct machine *machine;
354
	struct perf_tool *tool = &rec->tool;
355 356
	struct perf_record_opts *opts = &rec->opts;
	struct perf_evlist *evsel_list = rec->evlist;
357 358
	struct perf_data_file *file = &rec->file;
	const char *output_name = file->path;
359
	struct perf_session *session;
360
	bool disabled = false;
361

362
	rec->progname = argv[0];
363

364
	on_exit(perf_record__sig_exit, rec);
365 366
	signal(SIGCHLD, sig_handler);
	signal(SIGINT, sig_handler);
367
	signal(SIGUSR1, sig_handler);
368
	signal(SIGTERM, sig_handler);
369

370 371
	if (!output_name) {
		if (!fstat(STDOUT_FILENO, &st) && S_ISFIFO(st.st_mode))
372
			file->is_pipe = true;
373
		else
374
			file->path = output_name = "perf.data";
375 376 377
	}
	if (output_name) {
		if (!strcmp(output_name, "-"))
378
			file->is_pipe = true;
379
		else if (!stat(output_name, &st) && st.st_size) {
380 381 382 383 384
			char oldname[PATH_MAX];
			snprintf(oldname, sizeof(oldname), "%s.old",
				 output_name);
			unlink(oldname);
			rename(output_name, oldname);
385
		}
386 387
	}

388
	flags = O_CREAT|O_RDWR|O_TRUNC;
I
Ingo Molnar 已提交
389

390 391
	if (file->is_pipe)
		file->fd = STDOUT_FILENO;
392
	else
393 394
		file->fd = open(output_name, flags, S_IRUSR | S_IWUSR);
	if (file->fd < 0) {
395
		perror("failed to create output file");
396
		return -1;
397 398
	}

399
	session = perf_session__new(file, false, NULL);
400
	if (session == NULL) {
401 402 403 404
		pr_err("Not enough memory for reading perf file header\n");
		return -1;
	}

405 406
	rec->session = session;

407 408 409 410 411 412 413
	for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
		perf_header__set_feat(&session->header, feat);

	if (rec->no_buildid)
		perf_header__clear_feat(&session->header, HEADER_BUILD_ID);

	if (!have_tracepoints(&evsel_list->entries))
414
		perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
415

416 417 418
	if (!rec->opts.branch_stack)
		perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);

419
	if (forks) {
420
		err = perf_evlist__prepare_workload(evsel_list, &opts->target,
421
						    argv, file->is_pipe,
422
						    true);
423 424 425
		if (err < 0) {
			pr_err("Couldn't run the workload!\n");
			goto out_delete_session;
426 427 428
		}
	}

429 430 431 432
	if (perf_record__open(rec) != 0) {
		err = -1;
		goto out_delete_session;
	}
433

434 435 436
	if (!evsel_list->nr_groups)
		perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);

437
	/*
438
	 * perf_session__delete(session) will be called at perf_record__exit()
439
	 */
440
	on_exit(perf_record__exit, rec);
441

442 443
	if (file->is_pipe) {
		err = perf_header__write_pipe(file->fd);
444
		if (err < 0)
445
			goto out_delete_session;
446
	} else {
447
		err = perf_session__write_header(session, evsel_list,
448
						 file->fd, false);
449
		if (err < 0)
450
			goto out_delete_session;
451 452
	}

453
	if (!rec->no_buildid
454
	    && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
455
		pr_err("Couldn't generate buildids. "
456
		       "Use --no-buildid to profile anyway.\n");
457 458
		err = -1;
		goto out_delete_session;
459 460
	}

461
	rec->post_processing_offset = lseek(file->fd, 0, SEEK_CUR);
462

463
	machine = &session->machines.host;
464

465
	if (file->is_pipe) {
466
		err = perf_event__synthesize_attrs(tool, session,
467
						   process_synthesized_event);
468 469
		if (err < 0) {
			pr_err("Couldn't synthesize attrs.\n");
470
			goto out_delete_session;
471
		}
472

473
		if (have_tracepoints(&evsel_list->entries)) {
474 475 476 477 478 479 480 481
			/*
			 * FIXME err <= 0 here actually means that
			 * there were no tracepoints so its not really
			 * an error, just that we don't need to
			 * synthesize anything.  We really have to
			 * return this more properly and also
			 * propagate errors that now are calling die()
			 */
482
			err = perf_event__synthesize_tracing_data(tool, file->fd, evsel_list,
483
								  process_synthesized_event);
484 485
			if (err <= 0) {
				pr_err("Couldn't record tracing data.\n");
486
				goto out_delete_session;
487
			}
488
			advance_output(rec, err);
489
		}
490 491
	}

492
	err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
493
						 machine, "_text");
494
	if (err < 0)
495
		err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
496
							 machine, "_stext");
497 498 499 500
	if (err < 0)
		pr_err("Couldn't record kernel reference relocation symbol\n"
		       "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
		       "Check /proc/kallsyms permission or run as root.\n");
501

502
	err = perf_event__synthesize_modules(tool, process_synthesized_event,
503
					     machine);
504 505 506 507 508
	if (err < 0)
		pr_err("Couldn't record kernel module information.\n"
		       "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
		       "Check /proc/modules permission or run as root.\n");

509
	if (perf_guest) {
510 511
		machines__process_guests(&session->machines,
					 perf_event__synthesize_guest_os, tool);
512
	}
513

J
Jiri Olsa 已提交
514
	if (perf_target__has_task(&opts->target))
515
		err = perf_event__synthesize_thread_map(tool, evsel_list->threads,
516
						  process_synthesized_event,
517
						  machine);
J
Jiri Olsa 已提交
518
	else if (perf_target__has_cpu(&opts->target))
519
		err = perf_event__synthesize_threads(tool, process_synthesized_event,
520
					       machine);
J
Jiri Olsa 已提交
521 522
	else /* command specified */
		err = 0;
523

524 525 526
	if (err != 0)
		goto out_delete_session;

527
	if (rec->realtime_prio) {
528 529
		struct sched_param param;

530
		param.sched_priority = rec->realtime_prio;
531
		if (sched_setscheduler(0, SCHED_FIFO, &param)) {
532
			pr_err("Could not set realtime priority.\n");
533 534
			err = -1;
			goto out_delete_session;
535 536 537
		}
	}

538 539 540 541 542 543 544
	/*
	 * When perf is starting the traced process, all the events
	 * (apart from group members) have enable_on_exec=1 set,
	 * so don't spoil it by prematurely enabling them.
	 */
	if (!perf_target__none(&opts->target))
		perf_evlist__enable(evsel_list);
545

546 547 548
	/*
	 * Let the child rip
	 */
549
	if (forks)
550
		perf_evlist__start_workload(evsel_list);
551

552
	for (;;) {
553
		int hits = rec->samples;
554

555 556 557 558
		if (perf_record__mmap_read_all(rec) < 0) {
			err = -1;
			goto out_delete_session;
		}
559

560
		if (hits == rec->samples) {
561 562
			if (done)
				break;
563
			err = poll(evsel_list->pollfd, evsel_list->nr_fds, -1);
564 565 566
			waking++;
		}

567 568 569 570 571
		/*
		 * When perf is starting the traced process, at the end events
		 * die with the process and we wait for that. Thus no need to
		 * disable events in this case.
		 */
572
		if (done && !disabled && !perf_target__none(&opts->target)) {
573
			perf_evlist__disable(evsel_list);
574 575
			disabled = true;
		}
576 577
	}

578
	if (quiet || signr == SIGUSR1)
579 580
		return 0;

581 582
	fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);

583 584 585 586
	/*
	 * Approximate RIP event size: 24 bytes.
	 */
	fprintf(stderr,
587
		"[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n",
588
		(double)rec->bytes_written / 1024.0 / 1024.0,
589
		output_name,
590
		rec->bytes_written / 24);
591

592
	return 0;
593 594 595 596

out_delete_session:
	perf_session__delete(session);
	return err;
597
}
598

599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616
#define BRANCH_OPT(n, m) \
	{ .name = n, .mode = (m) }

#define BRANCH_END { .name = NULL }

struct branch_mode {
	const char *name;
	int mode;
};

static const struct branch_mode branch_modes[] = {
	BRANCH_OPT("u", PERF_SAMPLE_BRANCH_USER),
	BRANCH_OPT("k", PERF_SAMPLE_BRANCH_KERNEL),
	BRANCH_OPT("hv", PERF_SAMPLE_BRANCH_HV),
	BRANCH_OPT("any", PERF_SAMPLE_BRANCH_ANY),
	BRANCH_OPT("any_call", PERF_SAMPLE_BRANCH_ANY_CALL),
	BRANCH_OPT("any_ret", PERF_SAMPLE_BRANCH_ANY_RETURN),
	BRANCH_OPT("ind_call", PERF_SAMPLE_BRANCH_IND_CALL),
617 618 619
	BRANCH_OPT("abort_tx", PERF_SAMPLE_BRANCH_ABORT_TX),
	BRANCH_OPT("in_tx", PERF_SAMPLE_BRANCH_IN_TX),
	BRANCH_OPT("no_tx", PERF_SAMPLE_BRANCH_NO_TX),
620 621 622 623
	BRANCH_END
};

static int
624
parse_branch_stack(const struct option *opt, const char *str, int unset)
625 626 627 628 629 630 631 632
{
#define ONLY_PLM \
	(PERF_SAMPLE_BRANCH_USER	|\
	 PERF_SAMPLE_BRANCH_KERNEL	|\
	 PERF_SAMPLE_BRANCH_HV)

	uint64_t *mode = (uint64_t *)opt->value;
	const struct branch_mode *br;
633
	char *s, *os = NULL, *p;
634 635
	int ret = -1;

636 637
	if (unset)
		return 0;
638

639 640 641 642
	/*
	 * cannot set it twice, -b + --branch-filter for instance
	 */
	if (*mode)
643 644
		return -1;

645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665
	/* str may be NULL in case no arg is passed to -b */
	if (str) {
		/* because str is read-only */
		s = os = strdup(str);
		if (!s)
			return -1;

		for (;;) {
			p = strchr(s, ',');
			if (p)
				*p = '\0';

			for (br = branch_modes; br->name; br++) {
				if (!strcasecmp(s, br->name))
					break;
			}
			if (!br->name) {
				ui__warning("unknown branch filter %s,"
					    " check man page\n", s);
				goto error;
			}
666

667
			*mode |= br->mode;
668

669 670
			if (!p)
				break;
671

672 673
			s = p + 1;
		}
674 675 676
	}
	ret = 0;

677
	/* default to any branch */
678
	if ((*mode & ~ONLY_PLM) == 0) {
679
		*mode = PERF_SAMPLE_BRANCH_ANY;
680 681 682 683 684 685
	}
error:
	free(os);
	return ret;
}

686
#ifdef HAVE_LIBUNWIND_SUPPORT
687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711
static int get_stack_size(char *str, unsigned long *_size)
{
	char *endptr;
	unsigned long size;
	unsigned long max_size = round_down(USHRT_MAX, sizeof(u64));

	size = strtoul(str, &endptr, 0);

	do {
		if (*endptr)
			break;

		size = round_up(size, sizeof(u64));
		if (!size || size > max_size)
			break;

		*_size = size;
		return 0;

	} while (0);

	pr_err("callchain: Incorrect stack dump size (max %ld): %s\n",
	       max_size, str);
	return -1;
}
712
#endif /* HAVE_LIBUNWIND_SUPPORT */
713

714 715
int record_parse_callchain_opt(const struct option *opt,
			       const char *arg, int unset)
716
{
717
	struct perf_record_opts *opts = opt->value;
718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742
	char *tok, *name, *saveptr = NULL;
	char *buf;
	int ret = -1;

	/* --no-call-graph */
	if (unset)
		return 0;

	/* We specified default option if none is provided. */
	BUG_ON(!arg);

	/* We need buffer that we know we can write to. */
	buf = malloc(strlen(arg) + 1);
	if (!buf)
		return -ENOMEM;

	strcpy(buf, arg);

	tok = strtok_r((char *)buf, ",", &saveptr);
	name = tok ? : (char *)buf;

	do {
		/* Framepointer style */
		if (!strncmp(name, "fp", sizeof("fp"))) {
			if (!strtok_r(NULL, ",", &saveptr)) {
743
				opts->call_graph = CALLCHAIN_FP;
744 745 746 747 748 749
				ret = 0;
			} else
				pr_err("callchain: No more arguments "
				       "needed for -g fp\n");
			break;

750
#ifdef HAVE_LIBUNWIND_SUPPORT
751 752
		/* Dwarf style */
		} else if (!strncmp(name, "dwarf", sizeof("dwarf"))) {
753 754
			const unsigned long default_stack_dump_size = 8192;

755
			ret = 0;
756 757
			opts->call_graph = CALLCHAIN_DWARF;
			opts->stack_dump_size = default_stack_dump_size;
758 759 760 761 762 763

			tok = strtok_r(NULL, ",", &saveptr);
			if (tok) {
				unsigned long size = 0;

				ret = get_stack_size(tok, &size);
764
				opts->stack_dump_size = size;
765 766 767 768
			}

			if (!ret)
				pr_debug("callchain: stack dump size %d\n",
769
					 opts->stack_dump_size);
770
#endif /* HAVE_LIBUNWIND_SUPPORT */
771 772 773 774 775 776 777 778 779 780 781
		} else {
			pr_err("callchain: Unknown -g option "
			       "value: %s\n", arg);
			break;
		}

	} while (0);

	free(buf);

	if (!ret)
782
		pr_debug("callchain: type %d\n", opts->call_graph);
783 784 785 786

	return ret;
}

787
static const char * const record_usage[] = {
788 789
	"perf record [<options>] [<command>]",
	"perf record [<options>] -- <command> [<options>]",
790 791 792
	NULL
};

793 794 795 796 797 798 799 800 801 802 803 804 805 806 807
/*
 * XXX Ideally would be local to cmd_record() and passed to a perf_record__new
 * because we need to have access to it in perf_record__exit, that is called
 * after cmd_record() exits, but since record_options need to be accessible to
 * builtin-script, leave it here.
 *
 * At least we don't ouch it in all the other functions here directly.
 *
 * Just say no to tons of global variables, sigh.
 */
static struct perf_record record = {
	.opts = {
		.mmap_pages	     = UINT_MAX,
		.user_freq	     = UINT_MAX,
		.user_interval	     = ULLONG_MAX,
808
		.freq		     = 4000,
N
Namhyung Kim 已提交
809 810 811
		.target		     = {
			.uses_mmap   = true,
		},
812 813
	},
};
814

815 816
#define CALLCHAIN_HELP "do call-graph (stack chain/backtrace) recording: "

817
#ifdef HAVE_LIBUNWIND_SUPPORT
818
const char record_callchain_help[] = CALLCHAIN_HELP "[fp] dwarf";
819
#else
820
const char record_callchain_help[] = CALLCHAIN_HELP "[fp]";
821 822
#endif

823 824 825 826 827 828 829
/*
 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
 * with it and switch to use the library functions in perf_evlist that came
 * from builtin-record.c, i.e. use perf_record_opts,
 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
 * using pipes, etc.
 */
830
const struct option record_options[] = {
831
	OPT_CALLBACK('e', "event", &record.evlist, "event",
832
		     "event selector. use 'perf list' to list available events",
833
		     parse_events_option),
834
	OPT_CALLBACK(0, "filter", &record.evlist, "filter",
L
Li Zefan 已提交
835
		     "event filter", parse_filter),
836
	OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
837
		    "record events on existing process id"),
838
	OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
839
		    "record events on existing thread id"),
840
	OPT_INTEGER('r', "realtime", &record.realtime_prio,
841
		    "collect data with this RT SCHED_FIFO priority"),
842
	OPT_BOOLEAN('D', "no-delay", &record.opts.no_delay,
843
		    "collect data without buffering"),
844
	OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
845
		    "collect raw sample records from all opened counters"),
846
	OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
847
			    "system-wide collection from all CPUs"),
848
	OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
849
		    "list of cpus to monitor"),
850
	OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
851
	OPT_STRING('o', "output", &record.file.path, "file",
I
Ingo Molnar 已提交
852
		    "output file name"),
853
	OPT_BOOLEAN('i', "no-inherit", &record.opts.no_inherit,
854
		    "child tasks do not inherit counters"),
855
	OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
856 857 858
	OPT_CALLBACK('m', "mmap-pages", &record.opts.mmap_pages, "pages",
		     "number of mmap data pages",
		     perf_evlist__parse_mmap_pages),
859
	OPT_BOOLEAN(0, "group", &record.opts.group,
860
		    "put the counters into a counter group"),
861
	OPT_CALLBACK_DEFAULT('g', "call-graph", &record.opts,
862 863
			     "mode[,dump_size]", record_callchain_help,
			     &record_parse_callchain_opt, "fp"),
864
	OPT_INCR('v', "verbose", &verbose,
865
		    "be more verbose (show counter open errors, etc)"),
866
	OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
867
	OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
868
		    "per thread counts"),
869
	OPT_BOOLEAN('d', "data", &record.opts.sample_address,
870
		    "Sample addresses"),
871
	OPT_BOOLEAN('T', "timestamp", &record.opts.sample_time, "Sample timestamps"),
872
	OPT_BOOLEAN('P', "period", &record.opts.period, "Sample period"),
873
	OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
874
		    "don't sample"),
875
	OPT_BOOLEAN('N', "no-buildid-cache", &record.no_buildid_cache,
876
		    "do not update the buildid cache"),
877
	OPT_BOOLEAN('B', "no-buildid", &record.no_buildid,
878
		    "do not collect buildids in perf.data"),
879
	OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
S
Stephane Eranian 已提交
880 881
		     "monitor event in cgroup name only",
		     parse_cgroups),
882 883
	OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
		   "user to profile"),
884 885 886 887 888 889 890

	OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
		     "branch any", "sample any taken branches",
		     parse_branch_stack),

	OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
		     "branch filter mask", "branch stack filter modes",
891
		     parse_branch_stack),
892 893
	OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
		    "sample by weight (on special events only)"),
894 895
	OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
		    "sample transaction flags (special events only)"),
896 897 898
	OPT_END()
};

899
int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
900
{
901
	int err = -ENOMEM;
902 903
	struct perf_evlist *evsel_list;
	struct perf_record *rec = &record;
904
	char errbuf[BUFSIZ];
905

906
	evsel_list = perf_evlist__new();
907 908 909
	if (evsel_list == NULL)
		return -ENOMEM;

910 911
	rec->evlist = evsel_list;

912
	argc = parse_options(argc, argv, record_options, record_usage,
913
			    PARSE_OPT_STOP_AT_NON_OPTION);
914
	if (!argc && perf_target__none(&rec->opts.target))
915
		usage_with_options(record_usage, record_options);
916

917
	if (nr_cgroups && !rec->opts.target.system_wide) {
918 919
		ui__error("cgroup monitoring only available in"
			  " system-wide mode\n");
S
Stephane Eranian 已提交
920 921 922
		usage_with_options(record_usage, record_options);
	}

923
	symbol__init();
924

925
	if (symbol_conf.kptr_restrict)
926 927 928 929 930 931 932 933
		pr_warning(
"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
"check /proc/sys/kernel/kptr_restrict.\n\n"
"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
"file is not found in the buildid cache or in the vmlinux path.\n\n"
"Samples in kernel modules won't be resolved at all.\n\n"
"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
"even with a suitable vmlinux or kallsyms file.\n\n");
934

935
	if (rec->no_buildid_cache || rec->no_buildid)
936
		disable_buildid_cache();
937

938 939
	if (evsel_list->nr_entries == 0 &&
	    perf_evlist__add_default(evsel_list) < 0) {
940 941
		pr_err("Not enough memory for event selector list\n");
		goto out_symbol_exit;
942
	}
943

944 945 946 947 948 949 950 951 952
	err = perf_target__validate(&rec->opts.target);
	if (err) {
		perf_target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
		ui__warning("%s", errbuf);
	}

	err = perf_target__parse_uid(&rec->opts.target);
	if (err) {
		int saved_errno = errno;
953

954
		perf_target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
955
		ui__error("%s", errbuf);
956 957

		err = -saved_errno;
958
		goto out_symbol_exit;
959
	}
960

961
	err = -ENOMEM;
962
	if (perf_evlist__create_maps(evsel_list, &rec->opts.target) < 0)
963
		usage_with_options(record_usage, record_options);
964

965 966 967 968
	if (rec->opts.user_interval != ULLONG_MAX)
		rec->opts.default_interval = rec->opts.user_interval;
	if (rec->opts.user_freq != UINT_MAX)
		rec->opts.freq = rec->opts.user_freq;
969

970 971 972
	/*
	 * User specified count overrides default frequency.
	 */
973 974 975 976
	if (rec->opts.default_interval)
		rec->opts.freq = 0;
	else if (rec->opts.freq) {
		rec->opts.default_interval = rec->opts.freq;
977
	} else {
978
		ui__error("frequency and count are zero, aborting\n");
979
		err = -EINVAL;
980
		goto out_free_fd;
981 982
	}

983
	err = __cmd_record(&record, argc, argv);
984 985 986

	perf_evlist__munmap(evsel_list);
	perf_evlist__close(evsel_list);
987
out_free_fd:
988
	perf_evlist__delete_maps(evsel_list);
989 990
out_symbol_exit:
	symbol__exit();
991
	return err;
992
}