builtin-record.c 25.2 KB
Newer Older
I
Ingo Molnar 已提交
1
/*
2 3 4 5 6
 * builtin-record.c
 *
 * Builtin record command: Record the profile of a workload
 * (or a CPU, or a PID) into the perf.data output file - for
 * later analysis via perf report.
I
Ingo Molnar 已提交
7
 */
8
#include "builtin.h"
9 10 11

#include "perf.h"

12
#include "util/build-id.h"
13
#include "util/util.h"
14
#include "util/parse-options.h"
15
#include "util/parse-events.h"
16

17
#include "util/header.h"
18
#include "util/event.h"
19
#include "util/evlist.h"
20
#include "util/evsel.h"
21
#include "util/debug.h"
22
#include "util/session.h"
23
#include "util/tool.h"
24
#include "util/symbol.h"
25
#include "util/cpumap.h"
26
#include "util/thread_map.h"
27

28
#include <unistd.h>
29
#include <sched.h>
30
#include <sys/mman.h>
31

32
#ifndef HAVE_ON_EXIT_SUPPORT
33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
#ifndef ATEXIT_MAX
#define ATEXIT_MAX 32
#endif
static int __on_exit_count = 0;
typedef void (*on_exit_func_t) (int, void *);
static on_exit_func_t __on_exit_funcs[ATEXIT_MAX];
static void *__on_exit_args[ATEXIT_MAX];
static int __exitcode = 0;
static void __handle_on_exit_funcs(void);
static int on_exit(on_exit_func_t function, void *arg);
#define exit(x) (exit)(__exitcode = (x))

static int on_exit(on_exit_func_t function, void *arg)
{
	if (__on_exit_count == ATEXIT_MAX)
		return -ENOMEM;
	else if (__on_exit_count == 0)
		atexit(__handle_on_exit_funcs);
	__on_exit_funcs[__on_exit_count] = function;
	__on_exit_args[__on_exit_count++] = arg;
	return 0;
}

static void __handle_on_exit_funcs(void)
{
	int i;
	for (i = 0; i < __on_exit_count; i++)
		__on_exit_funcs[i] (__exitcode, __on_exit_args[i]);
}
#endif

64
struct perf_record {
65
	struct perf_tool	tool;
66 67 68 69 70 71 72 73 74 75 76 77
	struct perf_record_opts	opts;
	u64			bytes_written;
	const char		*output_name;
	struct perf_evlist	*evlist;
	struct perf_session	*session;
	const char		*progname;
	int			output;
	int			realtime_prio;
	bool			no_buildid;
	bool			no_buildid_cache;
	long			samples;
	off_t			post_processing_offset;
78
};
79

80
static void advance_output(struct perf_record *rec, size_t size)
81
{
82
	rec->bytes_written += size;
83 84
}

85
static int write_output(struct perf_record *rec, void *buf, size_t size)
86 87
{
	while (size) {
88
		int ret = write(rec->output, buf, size);
89

90 91 92 93
		if (ret < 0) {
			pr_err("failed to write\n");
			return -1;
		}
94 95 96 97

		size -= ret;
		buf += ret;

98
		rec->bytes_written += ret;
99
	}
100 101

	return 0;
102 103
}

104
static int process_synthesized_event(struct perf_tool *tool,
105
				     union perf_event *event,
106 107
				     struct perf_sample *sample __maybe_unused,
				     struct machine *machine __maybe_unused)
108
{
109
	struct perf_record *rec = container_of(tool, struct perf_record, tool);
110 111 112
	if (write_output(rec, event, event->header.size) < 0)
		return -1;

113 114 115
	return 0;
}

116
static int perf_record__mmap_read(struct perf_record *rec,
117
				   struct perf_mmap *md)
118
{
119
	unsigned int head = perf_mmap__read_head(md);
120
	unsigned int old = md->prev;
J
Jiri Olsa 已提交
121
	unsigned char *data = md->base + page_size;
122 123
	unsigned long size;
	void *buf;
124
	int rc = 0;
125

126
	if (old == head)
127
		return 0;
128

129
	rec->samples++;
130 131 132 133 134 135 136

	size = head - old;

	if ((old & md->mask) + size != (head & md->mask)) {
		buf = &data[old & md->mask];
		size = md->mask + 1 - (old & md->mask);
		old += size;
137

138 139 140 141
		if (write_output(rec, buf, size) < 0) {
			rc = -1;
			goto out;
		}
142 143 144 145 146
	}

	buf = &data[old & md->mask];
	size = head - old;
	old += size;
147

148 149 150 151
	if (write_output(rec, buf, size) < 0) {
		rc = -1;
		goto out;
	}
152 153

	md->prev = old;
154
	perf_mmap__write_tail(md, old);
155 156 157

out:
	return rc;
158 159 160
}

static volatile int done = 0;
161
static volatile int signr = -1;
162
static volatile int child_finished = 0;
163

164
static void sig_handler(int sig)
165
{
166 167 168
	if (sig == SIGCHLD)
		child_finished = 1;

169
	done = 1;
170 171 172
	signr = sig;
}

173
static void perf_record__sig_exit(int exit_status __maybe_unused, void *arg)
174
{
175
	struct perf_record *rec = arg;
176 177
	int status;

178
	if (rec->evlist->workload.pid > 0) {
179
		if (!child_finished)
180
			kill(rec->evlist->workload.pid, SIGTERM);
181 182 183

		wait(&status);
		if (WIFSIGNALED(status))
184
			psignal(WTERMSIG(status), rec->progname);
185
	}
186

187
	if (signr == -1 || signr == SIGUSR1)
188 189 190
		return;

	signal(signr, SIG_DFL);
191 192
}

193
static int perf_record__open(struct perf_record *rec)
194
{
195
	char msg[512];
196
	struct perf_evsel *pos;
197 198 199
	struct perf_evlist *evlist = rec->evlist;
	struct perf_session *session = rec->session;
	struct perf_record_opts *opts = &rec->opts;
200
	int rc = 0;
201

202
	perf_evlist__config(evlist, opts);
203

204 205
	list_for_each_entry(pos, &evlist->entries, node) {
try_again:
206
		if (perf_evsel__open(pos, evlist->cpus, evlist->threads) < 0) {
207
			if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
208
				if (verbose)
209
					ui__warning("%s\n", msg);
210 211
				goto try_again;
			}
212

213 214 215 216
			rc = -errno;
			perf_evsel__open_strerror(pos, &opts->target,
						  errno, msg, sizeof(msg));
			ui__error("%s\n", msg);
217
			goto out;
L
Li Zefan 已提交
218 219
		}
	}
220

221
	if (perf_evlist__apply_filters(evlist)) {
222 223
		error("failed to set filter with %d (%s)\n", errno,
			strerror(errno));
224 225
		rc = -1;
		goto out;
226 227
	}

228
	if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) {
229 230 231 232 233 234 235
		if (errno == EPERM) {
			pr_err("Permission error mapping pages.\n"
			       "Consider increasing "
			       "/proc/sys/kernel/perf_event_mlock_kb,\n"
			       "or try again with a smaller value of -m/--mmap_pages.\n"
			       "(current value: %d)\n", opts->mmap_pages);
			rc = -errno;
236 237
		} else if (!is_power_of_2(opts->mmap_pages) &&
			   (opts->mmap_pages != UINT_MAX)) {
238 239 240 241 242 243 244
			pr_err("--mmap_pages/-m value must be a power of two.");
			rc = -EINVAL;
		} else {
			pr_err("failed to mmap with %d (%s)\n", errno, strerror(errno));
			rc = -errno;
		}
		goto out;
245
	}
246

247
	session->evlist = evlist;
248
	perf_session__set_id_hdr_size(session);
249 250
out:
	return rc;
251 252
}

253
static int process_buildids(struct perf_record *rec)
254
{
255
	u64 size = lseek(rec->output, 0, SEEK_CUR);
256

257 258 259
	if (size == 0)
		return 0;

260 261 262
	rec->session->fd = rec->output;
	return __perf_session__process_events(rec->session, rec->post_processing_offset,
					      size - rec->post_processing_offset,
263 264 265
					      size, &build_id__mark_dso_hit_ops);
}

266
static void perf_record__exit(int status, void *arg)
267
{
268 269
	struct perf_record *rec = arg;

270 271 272
	if (status != 0)
		return;

273 274 275 276 277 278 279 280 281
	if (!rec->opts.pipe_output) {
		rec->session->header.data_size += rec->bytes_written;

		if (!rec->no_buildid)
			process_buildids(rec);
		perf_session__write_header(rec->session, rec->evlist,
					   rec->output, true);
		perf_session__delete(rec->session);
		perf_evlist__delete(rec->evlist);
282
		symbol__exit();
283
	}
284 285
}

286
static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
287 288
{
	int err;
289
	struct perf_tool *tool = data;
290 291 292 293 294 295 296 297
	/*
	 *As for guest kernel when processing subcommand record&report,
	 *we arrange module mmap prior to guest kernel mmap and trigger
	 *a preload dso because default guest module symbols are loaded
	 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
	 *method is used to avoid symbol missing when the first addr is
	 *in module instead of in guest kernel.
	 */
298
	err = perf_event__synthesize_modules(tool, process_synthesized_event,
299
					     machine);
300 301
	if (err < 0)
		pr_err("Couldn't record guest kernel [%d]'s reference"
302
		       " relocation symbol.\n", machine->pid);
303 304 305 306 307

	/*
	 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
	 * have no _text sometimes.
	 */
308
	err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
309
						 machine, "_text");
310
	if (err < 0)
311
		err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
312
							 machine, "_stext");
313 314
	if (err < 0)
		pr_err("Couldn't record guest kernel [%d]'s reference"
315
		       " relocation symbol.\n", machine->pid);
316 317
}

318 319 320 321 322
static struct perf_event_header finished_round_event = {
	.size = sizeof(struct perf_event_header),
	.type = PERF_RECORD_FINISHED_ROUND,
};

323
static int perf_record__mmap_read_all(struct perf_record *rec)
324
{
325
	int i;
326
	int rc = 0;
327

328
	for (i = 0; i < rec->evlist->nr_mmaps; i++) {
329 330 331 332 333 334
		if (rec->evlist->mmap[i].base) {
			if (perf_record__mmap_read(rec, &rec->evlist->mmap[i]) != 0) {
				rc = -1;
				goto out;
			}
		}
335 336
	}

337
	if (perf_header__has_feat(&rec->session->header, HEADER_TRACING_DATA))
338 339 340 341 342
		rc = write_output(rec, &finished_round_event,
				  sizeof(finished_round_event));

out:
	return rc;
343 344
}

345
static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
346
{
I
Ingo Molnar 已提交
347 348
	struct stat st;
	int flags;
349
	int err, output, feat;
350
	unsigned long waking = 0;
351
	const bool forks = argc > 0;
352
	struct machine *machine;
353
	struct perf_tool *tool = &rec->tool;
354 355 356 357
	struct perf_record_opts *opts = &rec->opts;
	struct perf_evlist *evsel_list = rec->evlist;
	const char *output_name = rec->output_name;
	struct perf_session *session;
358
	bool disabled = false;
359

360
	rec->progname = argv[0];
361

362
	on_exit(perf_record__sig_exit, rec);
363 364
	signal(SIGCHLD, sig_handler);
	signal(SIGINT, sig_handler);
365
	signal(SIGUSR1, sig_handler);
366
	signal(SIGTERM, sig_handler);
367

368 369
	if (!output_name) {
		if (!fstat(STDOUT_FILENO, &st) && S_ISFIFO(st.st_mode))
370
			opts->pipe_output = true;
371
		else
372
			rec->output_name = output_name = "perf.data";
373 374 375
	}
	if (output_name) {
		if (!strcmp(output_name, "-"))
376
			opts->pipe_output = true;
377
		else if (!stat(output_name, &st) && st.st_size) {
378 379 380 381 382
			char oldname[PATH_MAX];
			snprintf(oldname, sizeof(oldname), "%s.old",
				 output_name);
			unlink(oldname);
			rename(output_name, oldname);
383
		}
384 385
	}

386
	flags = O_CREAT|O_RDWR|O_TRUNC;
I
Ingo Molnar 已提交
387

388
	if (opts->pipe_output)
389 390 391
		output = STDOUT_FILENO;
	else
		output = open(output_name, flags, S_IRUSR | S_IWUSR);
392 393
	if (output < 0) {
		perror("failed to create output file");
394
		return -1;
395 396
	}

397 398
	rec->output = output;

399
	session = perf_session__new(output_name, O_WRONLY,
400
				    true, false, NULL);
401
	if (session == NULL) {
402 403 404 405
		pr_err("Not enough memory for reading perf file header\n");
		return -1;
	}

406 407
	rec->session = session;

408 409 410 411 412 413 414
	for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
		perf_header__set_feat(&session->header, feat);

	if (rec->no_buildid)
		perf_header__clear_feat(&session->header, HEADER_BUILD_ID);

	if (!have_tracepoints(&evsel_list->entries))
415
		perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
416

417 418 419
	if (!rec->opts.branch_stack)
		perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);

420
	if (forks) {
421
		err = perf_evlist__prepare_workload(evsel_list, &opts->target,
422 423
						    argv, opts->pipe_output,
						    true);
424 425 426
		if (err < 0) {
			pr_err("Couldn't run the workload!\n");
			goto out_delete_session;
427 428 429
		}
	}

430 431 432 433
	if (perf_record__open(rec) != 0) {
		err = -1;
		goto out_delete_session;
	}
434

435 436 437
	if (!evsel_list->nr_groups)
		perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);

438
	/*
439
	 * perf_session__delete(session) will be called at perf_record__exit()
440
	 */
441
	on_exit(perf_record__exit, rec);
442

443
	if (opts->pipe_output) {
444 445
		err = perf_header__write_pipe(output);
		if (err < 0)
446
			goto out_delete_session;
447
	} else {
448 449
		err = perf_session__write_header(session, evsel_list,
						 output, false);
450
		if (err < 0)
451
			goto out_delete_session;
452 453
	}

454
	if (!rec->no_buildid
455
	    && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
456
		pr_err("Couldn't generate buildids. "
457
		       "Use --no-buildid to profile anyway.\n");
458 459
		err = -1;
		goto out_delete_session;
460 461
	}

462
	rec->post_processing_offset = lseek(output, 0, SEEK_CUR);
463

464
	machine = &session->machines.host;
465

466
	if (opts->pipe_output) {
467
		err = perf_event__synthesize_attrs(tool, session,
468
						   process_synthesized_event);
469 470
		if (err < 0) {
			pr_err("Couldn't synthesize attrs.\n");
471
			goto out_delete_session;
472
		}
473

474
		if (have_tracepoints(&evsel_list->entries)) {
475 476 477 478 479 480 481 482
			/*
			 * FIXME err <= 0 here actually means that
			 * there were no tracepoints so its not really
			 * an error, just that we don't need to
			 * synthesize anything.  We really have to
			 * return this more properly and also
			 * propagate errors that now are calling die()
			 */
483
			err = perf_event__synthesize_tracing_data(tool, output, evsel_list,
484
								  process_synthesized_event);
485 486
			if (err <= 0) {
				pr_err("Couldn't record tracing data.\n");
487
				goto out_delete_session;
488
			}
489
			advance_output(rec, err);
490
		}
491 492
	}

493
	err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
494
						 machine, "_text");
495
	if (err < 0)
496
		err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
497
							 machine, "_stext");
498 499 500 501
	if (err < 0)
		pr_err("Couldn't record kernel reference relocation symbol\n"
		       "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
		       "Check /proc/kallsyms permission or run as root.\n");
502

503
	err = perf_event__synthesize_modules(tool, process_synthesized_event,
504
					     machine);
505 506 507 508 509
	if (err < 0)
		pr_err("Couldn't record kernel module information.\n"
		       "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
		       "Check /proc/modules permission or run as root.\n");

510
	if (perf_guest) {
511 512
		machines__process_guests(&session->machines,
					 perf_event__synthesize_guest_os, tool);
513
	}
514

J
Jiri Olsa 已提交
515
	if (perf_target__has_task(&opts->target))
516
		err = perf_event__synthesize_thread_map(tool, evsel_list->threads,
517
						  process_synthesized_event,
518
						  machine);
J
Jiri Olsa 已提交
519
	else if (perf_target__has_cpu(&opts->target))
520
		err = perf_event__synthesize_threads(tool, process_synthesized_event,
521
					       machine);
J
Jiri Olsa 已提交
522 523
	else /* command specified */
		err = 0;
524

525 526 527
	if (err != 0)
		goto out_delete_session;

528
	if (rec->realtime_prio) {
529 530
		struct sched_param param;

531
		param.sched_priority = rec->realtime_prio;
532
		if (sched_setscheduler(0, SCHED_FIFO, &param)) {
533
			pr_err("Could not set realtime priority.\n");
534 535
			err = -1;
			goto out_delete_session;
536 537 538
		}
	}

539 540 541 542 543 544 545
	/*
	 * When perf is starting the traced process, all the events
	 * (apart from group members) have enable_on_exec=1 set,
	 * so don't spoil it by prematurely enabling them.
	 */
	if (!perf_target__none(&opts->target))
		perf_evlist__enable(evsel_list);
546

547 548 549
	/*
	 * Let the child rip
	 */
550
	if (forks)
551
		perf_evlist__start_workload(evsel_list);
552

553
	for (;;) {
554
		int hits = rec->samples;
555

556 557 558 559
		if (perf_record__mmap_read_all(rec) < 0) {
			err = -1;
			goto out_delete_session;
		}
560

561
		if (hits == rec->samples) {
562 563
			if (done)
				break;
564
			err = poll(evsel_list->pollfd, evsel_list->nr_fds, -1);
565 566 567
			waking++;
		}

568 569 570 571 572
		/*
		 * When perf is starting the traced process, at the end events
		 * die with the process and we wait for that. Thus no need to
		 * disable events in this case.
		 */
573
		if (done && !disabled && !perf_target__none(&opts->target)) {
574
			perf_evlist__disable(evsel_list);
575 576
			disabled = true;
		}
577 578
	}

579
	if (quiet || signr == SIGUSR1)
580 581
		return 0;

582 583
	fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);

584 585 586 587
	/*
	 * Approximate RIP event size: 24 bytes.
	 */
	fprintf(stderr,
588
		"[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n",
589
		(double)rec->bytes_written / 1024.0 / 1024.0,
590
		output_name,
591
		rec->bytes_written / 24);
592

593
	return 0;
594 595 596 597

out_delete_session:
	perf_session__delete(session);
	return err;
598
}
599

600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617
#define BRANCH_OPT(n, m) \
	{ .name = n, .mode = (m) }

#define BRANCH_END { .name = NULL }

struct branch_mode {
	const char *name;
	int mode;
};

static const struct branch_mode branch_modes[] = {
	BRANCH_OPT("u", PERF_SAMPLE_BRANCH_USER),
	BRANCH_OPT("k", PERF_SAMPLE_BRANCH_KERNEL),
	BRANCH_OPT("hv", PERF_SAMPLE_BRANCH_HV),
	BRANCH_OPT("any", PERF_SAMPLE_BRANCH_ANY),
	BRANCH_OPT("any_call", PERF_SAMPLE_BRANCH_ANY_CALL),
	BRANCH_OPT("any_ret", PERF_SAMPLE_BRANCH_ANY_RETURN),
	BRANCH_OPT("ind_call", PERF_SAMPLE_BRANCH_IND_CALL),
618 619 620
	BRANCH_OPT("abort_tx", PERF_SAMPLE_BRANCH_ABORT_TX),
	BRANCH_OPT("in_tx", PERF_SAMPLE_BRANCH_IN_TX),
	BRANCH_OPT("no_tx", PERF_SAMPLE_BRANCH_NO_TX),
621 622 623 624
	BRANCH_END
};

static int
625
parse_branch_stack(const struct option *opt, const char *str, int unset)
626 627 628 629 630 631 632 633
{
#define ONLY_PLM \
	(PERF_SAMPLE_BRANCH_USER	|\
	 PERF_SAMPLE_BRANCH_KERNEL	|\
	 PERF_SAMPLE_BRANCH_HV)

	uint64_t *mode = (uint64_t *)opt->value;
	const struct branch_mode *br;
634
	char *s, *os = NULL, *p;
635 636
	int ret = -1;

637 638
	if (unset)
		return 0;
639

640 641 642 643
	/*
	 * cannot set it twice, -b + --branch-filter for instance
	 */
	if (*mode)
644 645
		return -1;

646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666
	/* str may be NULL in case no arg is passed to -b */
	if (str) {
		/* because str is read-only */
		s = os = strdup(str);
		if (!s)
			return -1;

		for (;;) {
			p = strchr(s, ',');
			if (p)
				*p = '\0';

			for (br = branch_modes; br->name; br++) {
				if (!strcasecmp(s, br->name))
					break;
			}
			if (!br->name) {
				ui__warning("unknown branch filter %s,"
					    " check man page\n", s);
				goto error;
			}
667

668
			*mode |= br->mode;
669

670 671
			if (!p)
				break;
672

673 674
			s = p + 1;
		}
675 676 677
	}
	ret = 0;

678
	/* default to any branch */
679
	if ((*mode & ~ONLY_PLM) == 0) {
680
		*mode = PERF_SAMPLE_BRANCH_ANY;
681 682 683 684 685 686
	}
error:
	free(os);
	return ret;
}

687
#ifdef HAVE_LIBUNWIND_SUPPORT
688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712
static int get_stack_size(char *str, unsigned long *_size)
{
	char *endptr;
	unsigned long size;
	unsigned long max_size = round_down(USHRT_MAX, sizeof(u64));

	size = strtoul(str, &endptr, 0);

	do {
		if (*endptr)
			break;

		size = round_up(size, sizeof(u64));
		if (!size || size > max_size)
			break;

		*_size = size;
		return 0;

	} while (0);

	pr_err("callchain: Incorrect stack dump size (max %ld): %s\n",
	       max_size, str);
	return -1;
}
713
#endif /* HAVE_LIBUNWIND_SUPPORT */
714

715 716
int record_parse_callchain_opt(const struct option *opt,
			       const char *arg, int unset)
717
{
718
	struct perf_record_opts *opts = opt->value;
719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743
	char *tok, *name, *saveptr = NULL;
	char *buf;
	int ret = -1;

	/* --no-call-graph */
	if (unset)
		return 0;

	/* We specified default option if none is provided. */
	BUG_ON(!arg);

	/* We need buffer that we know we can write to. */
	buf = malloc(strlen(arg) + 1);
	if (!buf)
		return -ENOMEM;

	strcpy(buf, arg);

	tok = strtok_r((char *)buf, ",", &saveptr);
	name = tok ? : (char *)buf;

	do {
		/* Framepointer style */
		if (!strncmp(name, "fp", sizeof("fp"))) {
			if (!strtok_r(NULL, ",", &saveptr)) {
744
				opts->call_graph = CALLCHAIN_FP;
745 746 747 748 749 750
				ret = 0;
			} else
				pr_err("callchain: No more arguments "
				       "needed for -g fp\n");
			break;

751
#ifdef HAVE_LIBUNWIND_SUPPORT
752 753
		/* Dwarf style */
		} else if (!strncmp(name, "dwarf", sizeof("dwarf"))) {
754 755
			const unsigned long default_stack_dump_size = 8192;

756
			ret = 0;
757 758
			opts->call_graph = CALLCHAIN_DWARF;
			opts->stack_dump_size = default_stack_dump_size;
759 760 761 762 763 764

			tok = strtok_r(NULL, ",", &saveptr);
			if (tok) {
				unsigned long size = 0;

				ret = get_stack_size(tok, &size);
765
				opts->stack_dump_size = size;
766 767 768 769
			}

			if (!ret)
				pr_debug("callchain: stack dump size %d\n",
770
					 opts->stack_dump_size);
771
#endif /* HAVE_LIBUNWIND_SUPPORT */
772 773 774 775 776 777 778 779 780 781 782
		} else {
			pr_err("callchain: Unknown -g option "
			       "value: %s\n", arg);
			break;
		}

	} while (0);

	free(buf);

	if (!ret)
783
		pr_debug("callchain: type %d\n", opts->call_graph);
784 785 786 787

	return ret;
}

788
static const char * const record_usage[] = {
789 790
	"perf record [<options>] [<command>]",
	"perf record [<options>] -- <command> [<options>]",
791 792 793
	NULL
};

794 795 796 797 798 799 800 801 802 803 804 805 806 807 808
/*
 * XXX Ideally would be local to cmd_record() and passed to a perf_record__new
 * because we need to have access to it in perf_record__exit, that is called
 * after cmd_record() exits, but since record_options need to be accessible to
 * builtin-script, leave it here.
 *
 * At least we don't ouch it in all the other functions here directly.
 *
 * Just say no to tons of global variables, sigh.
 */
static struct perf_record record = {
	.opts = {
		.mmap_pages	     = UINT_MAX,
		.user_freq	     = UINT_MAX,
		.user_interval	     = ULLONG_MAX,
809
		.freq		     = 4000,
N
Namhyung Kim 已提交
810 811 812
		.target		     = {
			.uses_mmap   = true,
		},
813 814
	},
};
815

816 817
#define CALLCHAIN_HELP "do call-graph (stack chain/backtrace) recording: "

818
#ifdef HAVE_LIBUNWIND_SUPPORT
819
const char record_callchain_help[] = CALLCHAIN_HELP "[fp] dwarf";
820
#else
821
const char record_callchain_help[] = CALLCHAIN_HELP "[fp]";
822 823
#endif

824 825 826 827 828 829 830
/*
 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
 * with it and switch to use the library functions in perf_evlist that came
 * from builtin-record.c, i.e. use perf_record_opts,
 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
 * using pipes, etc.
 */
831
const struct option record_options[] = {
832
	OPT_CALLBACK('e', "event", &record.evlist, "event",
833
		     "event selector. use 'perf list' to list available events",
834
		     parse_events_option),
835
	OPT_CALLBACK(0, "filter", &record.evlist, "filter",
L
Li Zefan 已提交
836
		     "event filter", parse_filter),
837
	OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
838
		    "record events on existing process id"),
839
	OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
840
		    "record events on existing thread id"),
841
	OPT_INTEGER('r', "realtime", &record.realtime_prio,
842
		    "collect data with this RT SCHED_FIFO priority"),
843
	OPT_BOOLEAN('D', "no-delay", &record.opts.no_delay,
844
		    "collect data without buffering"),
845
	OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
846
		    "collect raw sample records from all opened counters"),
847
	OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
848
			    "system-wide collection from all CPUs"),
849
	OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
850
		    "list of cpus to monitor"),
851 852
	OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
	OPT_STRING('o', "output", &record.output_name, "file",
I
Ingo Molnar 已提交
853
		    "output file name"),
854
	OPT_BOOLEAN('i', "no-inherit", &record.opts.no_inherit,
855
		    "child tasks do not inherit counters"),
856 857
	OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
	OPT_UINTEGER('m', "mmap-pages", &record.opts.mmap_pages,
858
		     "number of mmap data pages"),
859
	OPT_BOOLEAN(0, "group", &record.opts.group,
860
		    "put the counters into a counter group"),
861
	OPT_CALLBACK_DEFAULT('g', "call-graph", &record.opts,
862 863
			     "mode[,dump_size]", record_callchain_help,
			     &record_parse_callchain_opt, "fp"),
864
	OPT_INCR('v', "verbose", &verbose,
865
		    "be more verbose (show counter open errors, etc)"),
866
	OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
867
	OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
868
		    "per thread counts"),
869
	OPT_BOOLEAN('d', "data", &record.opts.sample_address,
870
		    "Sample addresses"),
871
	OPT_BOOLEAN('T', "timestamp", &record.opts.sample_time, "Sample timestamps"),
872
	OPT_BOOLEAN('P', "period", &record.opts.period, "Sample period"),
873
	OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
874
		    "don't sample"),
875
	OPT_BOOLEAN('N', "no-buildid-cache", &record.no_buildid_cache,
876
		    "do not update the buildid cache"),
877
	OPT_BOOLEAN('B', "no-buildid", &record.no_buildid,
878
		    "do not collect buildids in perf.data"),
879
	OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
S
Stephane Eranian 已提交
880 881
		     "monitor event in cgroup name only",
		     parse_cgroups),
882 883
	OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
		   "user to profile"),
884 885 886 887 888 889 890

	OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
		     "branch any", "sample any taken branches",
		     parse_branch_stack),

	OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
		     "branch filter mask", "branch stack filter modes",
891
		     parse_branch_stack),
892 893
	OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
		    "sample by weight (on special events only)"),
894 895
	OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
		    "sample transaction flags (special events only)"),
896 897 898
	OPT_END()
};

899
int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
900
{
901
	int err = -ENOMEM;
902 903
	struct perf_evlist *evsel_list;
	struct perf_record *rec = &record;
904
	char errbuf[BUFSIZ];
905

906
	evsel_list = perf_evlist__new();
907 908 909
	if (evsel_list == NULL)
		return -ENOMEM;

910 911
	rec->evlist = evsel_list;

912
	argc = parse_options(argc, argv, record_options, record_usage,
913
			    PARSE_OPT_STOP_AT_NON_OPTION);
914
	if (!argc && perf_target__none(&rec->opts.target))
915
		usage_with_options(record_usage, record_options);
916

917
	if (nr_cgroups && !rec->opts.target.system_wide) {
918 919
		ui__error("cgroup monitoring only available in"
			  " system-wide mode\n");
S
Stephane Eranian 已提交
920 921 922
		usage_with_options(record_usage, record_options);
	}

923
	symbol__init();
924

925
	if (symbol_conf.kptr_restrict)
926 927 928 929 930 931 932 933
		pr_warning(
"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
"check /proc/sys/kernel/kptr_restrict.\n\n"
"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
"file is not found in the buildid cache or in the vmlinux path.\n\n"
"Samples in kernel modules won't be resolved at all.\n\n"
"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
"even with a suitable vmlinux or kallsyms file.\n\n");
934

935
	if (rec->no_buildid_cache || rec->no_buildid)
936
		disable_buildid_cache();
937

938 939
	if (evsel_list->nr_entries == 0 &&
	    perf_evlist__add_default(evsel_list) < 0) {
940 941
		pr_err("Not enough memory for event selector list\n");
		goto out_symbol_exit;
942
	}
943

944 945 946 947 948 949 950 951 952
	err = perf_target__validate(&rec->opts.target);
	if (err) {
		perf_target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
		ui__warning("%s", errbuf);
	}

	err = perf_target__parse_uid(&rec->opts.target);
	if (err) {
		int saved_errno = errno;
953

954
		perf_target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
955
		ui__error("%s", errbuf);
956 957

		err = -saved_errno;
958
		goto out_symbol_exit;
959
	}
960

961
	err = -ENOMEM;
962
	if (perf_evlist__create_maps(evsel_list, &rec->opts.target) < 0)
963
		usage_with_options(record_usage, record_options);
964

965 966 967 968
	if (rec->opts.user_interval != ULLONG_MAX)
		rec->opts.default_interval = rec->opts.user_interval;
	if (rec->opts.user_freq != UINT_MAX)
		rec->opts.freq = rec->opts.user_freq;
969

970 971 972
	/*
	 * User specified count overrides default frequency.
	 */
973 974 975 976
	if (rec->opts.default_interval)
		rec->opts.freq = 0;
	else if (rec->opts.freq) {
		rec->opts.default_interval = rec->opts.freq;
977
	} else {
978
		ui__error("frequency and count are zero, aborting\n");
979
		err = -EINVAL;
980
		goto out_free_fd;
981 982
	}

983
	err = __cmd_record(&record, argc, argv);
984 985 986

	perf_evlist__munmap(evsel_list);
	perf_evlist__close(evsel_list);
987
out_free_fd:
988
	perf_evlist__delete_maps(evsel_list);
989 990
out_symbol_exit:
	symbol__exit();
991
	return err;
992
}