builtin-record.c 25.1 KB
Newer Older
I
Ingo Molnar 已提交
1
/*
2 3 4 5 6
 * builtin-record.c
 *
 * Builtin record command: Record the profile of a workload
 * (or a CPU, or a PID) into the perf.data output file - for
 * later analysis via perf report.
I
Ingo Molnar 已提交
7
 */
8
#include "builtin.h"
9 10 11

#include "perf.h"

12
#include "util/build-id.h"
13
#include "util/util.h"
14
#include "util/parse-options.h"
15
#include "util/parse-events.h"
16

17
#include "util/header.h"
18
#include "util/event.h"
19
#include "util/evlist.h"
20
#include "util/evsel.h"
21
#include "util/debug.h"
22
#include "util/session.h"
23
#include "util/tool.h"
24
#include "util/symbol.h"
25
#include "util/cpumap.h"
26
#include "util/thread_map.h"
27

28
#include <unistd.h>
29
#include <sched.h>
30
#include <sys/mman.h>
31

32
#ifndef HAVE_ON_EXIT_SUPPORT
33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
#ifndef ATEXIT_MAX
#define ATEXIT_MAX 32
#endif
static int __on_exit_count = 0;
typedef void (*on_exit_func_t) (int, void *);
static on_exit_func_t __on_exit_funcs[ATEXIT_MAX];
static void *__on_exit_args[ATEXIT_MAX];
static int __exitcode = 0;
static void __handle_on_exit_funcs(void);
static int on_exit(on_exit_func_t function, void *arg);
#define exit(x) (exit)(__exitcode = (x))

static int on_exit(on_exit_func_t function, void *arg)
{
	if (__on_exit_count == ATEXIT_MAX)
		return -ENOMEM;
	else if (__on_exit_count == 0)
		atexit(__handle_on_exit_funcs);
	__on_exit_funcs[__on_exit_count] = function;
	__on_exit_args[__on_exit_count++] = arg;
	return 0;
}

static void __handle_on_exit_funcs(void)
{
	int i;
	for (i = 0; i < __on_exit_count; i++)
		__on_exit_funcs[i] (__exitcode, __on_exit_args[i]);
}
#endif

64
struct perf_record {
65
	struct perf_tool	tool;
66 67 68 69 70 71 72 73 74 75 76 77
	struct perf_record_opts	opts;
	u64			bytes_written;
	const char		*output_name;
	struct perf_evlist	*evlist;
	struct perf_session	*session;
	const char		*progname;
	int			output;
	int			realtime_prio;
	bool			no_buildid;
	bool			no_buildid_cache;
	long			samples;
	off_t			post_processing_offset;
78
};
79

80
static void advance_output(struct perf_record *rec, size_t size)
81
{
82
	rec->bytes_written += size;
83 84
}

85
static int write_output(struct perf_record *rec, void *buf, size_t size)
86 87
{
	while (size) {
88
		int ret = write(rec->output, buf, size);
89

90
		if (ret < 0) {
91
			pr_err("failed to write perf data, error: %m\n");
92 93
			return -1;
		}
94 95 96 97

		size -= ret;
		buf += ret;

98
		rec->bytes_written += ret;
99
	}
100 101

	return 0;
102 103
}

104
static int process_synthesized_event(struct perf_tool *tool,
105
				     union perf_event *event,
106 107
				     struct perf_sample *sample __maybe_unused,
				     struct machine *machine __maybe_unused)
108
{
109
	struct perf_record *rec = container_of(tool, struct perf_record, tool);
110 111 112
	if (write_output(rec, event, event->header.size) < 0)
		return -1;

113 114 115
	return 0;
}

116
static int perf_record__mmap_read(struct perf_record *rec,
117
				   struct perf_mmap *md)
118
{
119
	unsigned int head = perf_mmap__read_head(md);
120
	unsigned int old = md->prev;
J
Jiri Olsa 已提交
121
	unsigned char *data = md->base + page_size;
122 123
	unsigned long size;
	void *buf;
124
	int rc = 0;
125

126
	if (old == head)
127
		return 0;
128

129
	rec->samples++;
130 131 132 133 134 135 136

	size = head - old;

	if ((old & md->mask) + size != (head & md->mask)) {
		buf = &data[old & md->mask];
		size = md->mask + 1 - (old & md->mask);
		old += size;
137

138 139 140 141
		if (write_output(rec, buf, size) < 0) {
			rc = -1;
			goto out;
		}
142 143 144 145 146
	}

	buf = &data[old & md->mask];
	size = head - old;
	old += size;
147

148 149 150 151
	if (write_output(rec, buf, size) < 0) {
		rc = -1;
		goto out;
	}
152 153

	md->prev = old;
154
	perf_mmap__write_tail(md, old);
155 156 157

out:
	return rc;
158 159 160
}

static volatile int done = 0;
161
static volatile int signr = -1;
162
static volatile int child_finished = 0;
163

164
static void sig_handler(int sig)
165
{
166 167 168
	if (sig == SIGCHLD)
		child_finished = 1;

169
	done = 1;
170 171 172
	signr = sig;
}

173
static void perf_record__sig_exit(int exit_status __maybe_unused, void *arg)
174
{
175
	struct perf_record *rec = arg;
176 177
	int status;

178
	if (rec->evlist->workload.pid > 0) {
179
		if (!child_finished)
180
			kill(rec->evlist->workload.pid, SIGTERM);
181 182 183

		wait(&status);
		if (WIFSIGNALED(status))
184
			psignal(WTERMSIG(status), rec->progname);
185
	}
186

187
	if (signr == -1 || signr == SIGUSR1)
188 189 190
		return;

	signal(signr, SIG_DFL);
191 192
}

193
static int perf_record__open(struct perf_record *rec)
194
{
195
	char msg[512];
196
	struct perf_evsel *pos;
197 198 199
	struct perf_evlist *evlist = rec->evlist;
	struct perf_session *session = rec->session;
	struct perf_record_opts *opts = &rec->opts;
200
	int rc = 0;
201

202
	perf_evlist__config(evlist, opts);
203

204 205
	list_for_each_entry(pos, &evlist->entries, node) {
try_again:
206
		if (perf_evsel__open(pos, evlist->cpus, evlist->threads) < 0) {
207
			if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
208
				if (verbose)
209
					ui__warning("%s\n", msg);
210 211
				goto try_again;
			}
212

213 214 215 216
			rc = -errno;
			perf_evsel__open_strerror(pos, &opts->target,
						  errno, msg, sizeof(msg));
			ui__error("%s\n", msg);
217
			goto out;
L
Li Zefan 已提交
218 219
		}
	}
220

221
	if (perf_evlist__apply_filters(evlist)) {
222 223
		error("failed to set filter with %d (%s)\n", errno,
			strerror(errno));
224 225
		rc = -1;
		goto out;
226 227
	}

228
	if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) {
229 230 231 232 233 234 235 236 237 238 239 240
		if (errno == EPERM) {
			pr_err("Permission error mapping pages.\n"
			       "Consider increasing "
			       "/proc/sys/kernel/perf_event_mlock_kb,\n"
			       "or try again with a smaller value of -m/--mmap_pages.\n"
			       "(current value: %d)\n", opts->mmap_pages);
			rc = -errno;
		} else {
			pr_err("failed to mmap with %d (%s)\n", errno, strerror(errno));
			rc = -errno;
		}
		goto out;
241
	}
242

243
	session->evlist = evlist;
244
	perf_session__set_id_hdr_size(session);
245 246
out:
	return rc;
247 248
}

249
static int process_buildids(struct perf_record *rec)
250
{
251
	u64 size = lseek(rec->output, 0, SEEK_CUR);
252

253 254 255
	if (size == 0)
		return 0;

256 257 258
	rec->session->fd = rec->output;
	return __perf_session__process_events(rec->session, rec->post_processing_offset,
					      size - rec->post_processing_offset,
259 260 261
					      size, &build_id__mark_dso_hit_ops);
}

262
static void perf_record__exit(int status, void *arg)
263
{
264 265
	struct perf_record *rec = arg;

266 267 268
	if (status != 0)
		return;

269 270 271 272 273 274 275 276 277
	if (!rec->opts.pipe_output) {
		rec->session->header.data_size += rec->bytes_written;

		if (!rec->no_buildid)
			process_buildids(rec);
		perf_session__write_header(rec->session, rec->evlist,
					   rec->output, true);
		perf_session__delete(rec->session);
		perf_evlist__delete(rec->evlist);
278
		symbol__exit();
279
	}
280 281
}

282
static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
283 284
{
	int err;
285
	struct perf_tool *tool = data;
286 287 288 289 290 291 292 293
	/*
	 *As for guest kernel when processing subcommand record&report,
	 *we arrange module mmap prior to guest kernel mmap and trigger
	 *a preload dso because default guest module symbols are loaded
	 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
	 *method is used to avoid symbol missing when the first addr is
	 *in module instead of in guest kernel.
	 */
294
	err = perf_event__synthesize_modules(tool, process_synthesized_event,
295
					     machine);
296 297
	if (err < 0)
		pr_err("Couldn't record guest kernel [%d]'s reference"
298
		       " relocation symbol.\n", machine->pid);
299 300 301 302 303

	/*
	 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
	 * have no _text sometimes.
	 */
304
	err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
305
						 machine, "_text");
306
	if (err < 0)
307
		err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
308
							 machine, "_stext");
309 310
	if (err < 0)
		pr_err("Couldn't record guest kernel [%d]'s reference"
311
		       " relocation symbol.\n", machine->pid);
312 313
}

314 315 316 317 318
static struct perf_event_header finished_round_event = {
	.size = sizeof(struct perf_event_header),
	.type = PERF_RECORD_FINISHED_ROUND,
};

319
static int perf_record__mmap_read_all(struct perf_record *rec)
320
{
321
	int i;
322
	int rc = 0;
323

324
	for (i = 0; i < rec->evlist->nr_mmaps; i++) {
325 326 327 328 329 330
		if (rec->evlist->mmap[i].base) {
			if (perf_record__mmap_read(rec, &rec->evlist->mmap[i]) != 0) {
				rc = -1;
				goto out;
			}
		}
331 332
	}

333
	if (perf_header__has_feat(&rec->session->header, HEADER_TRACING_DATA))
334 335 336 337 338
		rc = write_output(rec, &finished_round_event,
				  sizeof(finished_round_event));

out:
	return rc;
339 340
}

341
static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
342
{
I
Ingo Molnar 已提交
343 344
	struct stat st;
	int flags;
345
	int err, output, feat;
346
	unsigned long waking = 0;
347
	const bool forks = argc > 0;
348
	struct machine *machine;
349
	struct perf_tool *tool = &rec->tool;
350 351 352 353
	struct perf_record_opts *opts = &rec->opts;
	struct perf_evlist *evsel_list = rec->evlist;
	const char *output_name = rec->output_name;
	struct perf_session *session;
354
	bool disabled = false;
355

356
	rec->progname = argv[0];
357

358
	on_exit(perf_record__sig_exit, rec);
359 360
	signal(SIGCHLD, sig_handler);
	signal(SIGINT, sig_handler);
361
	signal(SIGUSR1, sig_handler);
362
	signal(SIGTERM, sig_handler);
363

364 365
	if (!output_name) {
		if (!fstat(STDOUT_FILENO, &st) && S_ISFIFO(st.st_mode))
366
			opts->pipe_output = true;
367
		else
368
			rec->output_name = output_name = "perf.data";
369 370 371
	}
	if (output_name) {
		if (!strcmp(output_name, "-"))
372
			opts->pipe_output = true;
373
		else if (!stat(output_name, &st) && st.st_size) {
374 375 376 377 378
			char oldname[PATH_MAX];
			snprintf(oldname, sizeof(oldname), "%s.old",
				 output_name);
			unlink(oldname);
			rename(output_name, oldname);
379
		}
380 381
	}

382
	flags = O_CREAT|O_RDWR|O_TRUNC;
I
Ingo Molnar 已提交
383

384
	if (opts->pipe_output)
385 386 387
		output = STDOUT_FILENO;
	else
		output = open(output_name, flags, S_IRUSR | S_IWUSR);
388 389
	if (output < 0) {
		perror("failed to create output file");
390
		return -1;
391 392
	}

393 394
	rec->output = output;

395
	session = perf_session__new(output_name, O_WRONLY,
396
				    true, false, NULL);
397
	if (session == NULL) {
398 399 400 401
		pr_err("Not enough memory for reading perf file header\n");
		return -1;
	}

402 403
	rec->session = session;

404 405 406 407 408 409 410
	for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
		perf_header__set_feat(&session->header, feat);

	if (rec->no_buildid)
		perf_header__clear_feat(&session->header, HEADER_BUILD_ID);

	if (!have_tracepoints(&evsel_list->entries))
411
		perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
412

413 414 415
	if (!rec->opts.branch_stack)
		perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);

416
	if (forks) {
417
		err = perf_evlist__prepare_workload(evsel_list, &opts->target,
418 419
						    argv, opts->pipe_output,
						    true);
420 421 422
		if (err < 0) {
			pr_err("Couldn't run the workload!\n");
			goto out_delete_session;
423 424 425
		}
	}

426 427 428 429
	if (perf_record__open(rec) != 0) {
		err = -1;
		goto out_delete_session;
	}
430

431 432 433
	if (!evsel_list->nr_groups)
		perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);

434
	/*
435
	 * perf_session__delete(session) will be called at perf_record__exit()
436
	 */
437
	on_exit(perf_record__exit, rec);
438

439
	if (opts->pipe_output) {
440 441
		err = perf_header__write_pipe(output);
		if (err < 0)
442
			goto out_delete_session;
443
	} else {
444 445
		err = perf_session__write_header(session, evsel_list,
						 output, false);
446
		if (err < 0)
447
			goto out_delete_session;
448 449
	}

450
	if (!rec->no_buildid
451
	    && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
452
		pr_err("Couldn't generate buildids. "
453
		       "Use --no-buildid to profile anyway.\n");
454 455
		err = -1;
		goto out_delete_session;
456 457
	}

458
	rec->post_processing_offset = lseek(output, 0, SEEK_CUR);
459

460
	machine = &session->machines.host;
461

462
	if (opts->pipe_output) {
463
		err = perf_event__synthesize_attrs(tool, session,
464
						   process_synthesized_event);
465 466
		if (err < 0) {
			pr_err("Couldn't synthesize attrs.\n");
467
			goto out_delete_session;
468
		}
469

470
		if (have_tracepoints(&evsel_list->entries)) {
471 472 473 474 475 476 477 478
			/*
			 * FIXME err <= 0 here actually means that
			 * there were no tracepoints so its not really
			 * an error, just that we don't need to
			 * synthesize anything.  We really have to
			 * return this more properly and also
			 * propagate errors that now are calling die()
			 */
479
			err = perf_event__synthesize_tracing_data(tool, output, evsel_list,
480
								  process_synthesized_event);
481 482
			if (err <= 0) {
				pr_err("Couldn't record tracing data.\n");
483
				goto out_delete_session;
484
			}
485
			advance_output(rec, err);
486
		}
487 488
	}

489
	err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
490
						 machine, "_text");
491
	if (err < 0)
492
		err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
493
							 machine, "_stext");
494 495 496 497
	if (err < 0)
		pr_err("Couldn't record kernel reference relocation symbol\n"
		       "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
		       "Check /proc/kallsyms permission or run as root.\n");
498

499
	err = perf_event__synthesize_modules(tool, process_synthesized_event,
500
					     machine);
501 502 503 504 505
	if (err < 0)
		pr_err("Couldn't record kernel module information.\n"
		       "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
		       "Check /proc/modules permission or run as root.\n");

506
	if (perf_guest) {
507 508
		machines__process_guests(&session->machines,
					 perf_event__synthesize_guest_os, tool);
509
	}
510

J
Jiri Olsa 已提交
511
	if (perf_target__has_task(&opts->target))
512
		err = perf_event__synthesize_thread_map(tool, evsel_list->threads,
513
						  process_synthesized_event,
514
						  machine);
J
Jiri Olsa 已提交
515
	else if (perf_target__has_cpu(&opts->target))
516
		err = perf_event__synthesize_threads(tool, process_synthesized_event,
517
					       machine);
J
Jiri Olsa 已提交
518 519
	else /* command specified */
		err = 0;
520

521 522 523
	if (err != 0)
		goto out_delete_session;

524
	if (rec->realtime_prio) {
525 526
		struct sched_param param;

527
		param.sched_priority = rec->realtime_prio;
528
		if (sched_setscheduler(0, SCHED_FIFO, &param)) {
529
			pr_err("Could not set realtime priority.\n");
530 531
			err = -1;
			goto out_delete_session;
532 533 534
		}
	}

535 536 537 538 539 540 541
	/*
	 * When perf is starting the traced process, all the events
	 * (apart from group members) have enable_on_exec=1 set,
	 * so don't spoil it by prematurely enabling them.
	 */
	if (!perf_target__none(&opts->target))
		perf_evlist__enable(evsel_list);
542

543 544 545
	/*
	 * Let the child rip
	 */
546
	if (forks)
547
		perf_evlist__start_workload(evsel_list);
548

549
	for (;;) {
550
		int hits = rec->samples;
551

552 553 554 555
		if (perf_record__mmap_read_all(rec) < 0) {
			err = -1;
			goto out_delete_session;
		}
556

557
		if (hits == rec->samples) {
558 559
			if (done)
				break;
560
			err = poll(evsel_list->pollfd, evsel_list->nr_fds, -1);
561 562 563
			waking++;
		}

564 565 566 567 568
		/*
		 * When perf is starting the traced process, at the end events
		 * die with the process and we wait for that. Thus no need to
		 * disable events in this case.
		 */
569
		if (done && !disabled && !perf_target__none(&opts->target)) {
570
			perf_evlist__disable(evsel_list);
571 572
			disabled = true;
		}
573 574
	}

575
	if (quiet || signr == SIGUSR1)
576 577
		return 0;

578 579
	fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);

580 581 582 583
	/*
	 * Approximate RIP event size: 24 bytes.
	 */
	fprintf(stderr,
584
		"[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n",
585
		(double)rec->bytes_written / 1024.0 / 1024.0,
586
		output_name,
587
		rec->bytes_written / 24);
588

589
	return 0;
590 591 592 593

out_delete_session:
	perf_session__delete(session);
	return err;
594
}
595

596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613
#define BRANCH_OPT(n, m) \
	{ .name = n, .mode = (m) }

#define BRANCH_END { .name = NULL }

struct branch_mode {
	const char *name;
	int mode;
};

static const struct branch_mode branch_modes[] = {
	BRANCH_OPT("u", PERF_SAMPLE_BRANCH_USER),
	BRANCH_OPT("k", PERF_SAMPLE_BRANCH_KERNEL),
	BRANCH_OPT("hv", PERF_SAMPLE_BRANCH_HV),
	BRANCH_OPT("any", PERF_SAMPLE_BRANCH_ANY),
	BRANCH_OPT("any_call", PERF_SAMPLE_BRANCH_ANY_CALL),
	BRANCH_OPT("any_ret", PERF_SAMPLE_BRANCH_ANY_RETURN),
	BRANCH_OPT("ind_call", PERF_SAMPLE_BRANCH_IND_CALL),
614 615 616
	BRANCH_OPT("abort_tx", PERF_SAMPLE_BRANCH_ABORT_TX),
	BRANCH_OPT("in_tx", PERF_SAMPLE_BRANCH_IN_TX),
	BRANCH_OPT("no_tx", PERF_SAMPLE_BRANCH_NO_TX),
617 618 619 620
	BRANCH_END
};

static int
621
parse_branch_stack(const struct option *opt, const char *str, int unset)
622 623 624 625 626 627 628 629
{
#define ONLY_PLM \
	(PERF_SAMPLE_BRANCH_USER	|\
	 PERF_SAMPLE_BRANCH_KERNEL	|\
	 PERF_SAMPLE_BRANCH_HV)

	uint64_t *mode = (uint64_t *)opt->value;
	const struct branch_mode *br;
630
	char *s, *os = NULL, *p;
631 632
	int ret = -1;

633 634
	if (unset)
		return 0;
635

636 637 638 639
	/*
	 * cannot set it twice, -b + --branch-filter for instance
	 */
	if (*mode)
640 641
		return -1;

642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662
	/* str may be NULL in case no arg is passed to -b */
	if (str) {
		/* because str is read-only */
		s = os = strdup(str);
		if (!s)
			return -1;

		for (;;) {
			p = strchr(s, ',');
			if (p)
				*p = '\0';

			for (br = branch_modes; br->name; br++) {
				if (!strcasecmp(s, br->name))
					break;
			}
			if (!br->name) {
				ui__warning("unknown branch filter %s,"
					    " check man page\n", s);
				goto error;
			}
663

664
			*mode |= br->mode;
665

666 667
			if (!p)
				break;
668

669 670
			s = p + 1;
		}
671 672 673
	}
	ret = 0;

674
	/* default to any branch */
675
	if ((*mode & ~ONLY_PLM) == 0) {
676
		*mode = PERF_SAMPLE_BRANCH_ANY;
677 678 679 680 681 682
	}
error:
	free(os);
	return ret;
}

683
#ifdef HAVE_LIBUNWIND_SUPPORT
684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708
static int get_stack_size(char *str, unsigned long *_size)
{
	char *endptr;
	unsigned long size;
	unsigned long max_size = round_down(USHRT_MAX, sizeof(u64));

	size = strtoul(str, &endptr, 0);

	do {
		if (*endptr)
			break;

		size = round_up(size, sizeof(u64));
		if (!size || size > max_size)
			break;

		*_size = size;
		return 0;

	} while (0);

	pr_err("callchain: Incorrect stack dump size (max %ld): %s\n",
	       max_size, str);
	return -1;
}
709
#endif /* HAVE_LIBUNWIND_SUPPORT */
710

711 712
int record_parse_callchain_opt(const struct option *opt,
			       const char *arg, int unset)
713
{
714
	struct perf_record_opts *opts = opt->value;
715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739
	char *tok, *name, *saveptr = NULL;
	char *buf;
	int ret = -1;

	/* --no-call-graph */
	if (unset)
		return 0;

	/* We specified default option if none is provided. */
	BUG_ON(!arg);

	/* We need buffer that we know we can write to. */
	buf = malloc(strlen(arg) + 1);
	if (!buf)
		return -ENOMEM;

	strcpy(buf, arg);

	tok = strtok_r((char *)buf, ",", &saveptr);
	name = tok ? : (char *)buf;

	do {
		/* Framepointer style */
		if (!strncmp(name, "fp", sizeof("fp"))) {
			if (!strtok_r(NULL, ",", &saveptr)) {
740
				opts->call_graph = CALLCHAIN_FP;
741 742 743 744 745 746
				ret = 0;
			} else
				pr_err("callchain: No more arguments "
				       "needed for -g fp\n");
			break;

747
#ifdef HAVE_LIBUNWIND_SUPPORT
748 749
		/* Dwarf style */
		} else if (!strncmp(name, "dwarf", sizeof("dwarf"))) {
750 751
			const unsigned long default_stack_dump_size = 8192;

752
			ret = 0;
753 754
			opts->call_graph = CALLCHAIN_DWARF;
			opts->stack_dump_size = default_stack_dump_size;
755 756 757 758 759 760

			tok = strtok_r(NULL, ",", &saveptr);
			if (tok) {
				unsigned long size = 0;

				ret = get_stack_size(tok, &size);
761
				opts->stack_dump_size = size;
762 763 764 765
			}

			if (!ret)
				pr_debug("callchain: stack dump size %d\n",
766
					 opts->stack_dump_size);
767
#endif /* HAVE_LIBUNWIND_SUPPORT */
768 769 770 771 772 773 774 775 776 777 778
		} else {
			pr_err("callchain: Unknown -g option "
			       "value: %s\n", arg);
			break;
		}

	} while (0);

	free(buf);

	if (!ret)
779
		pr_debug("callchain: type %d\n", opts->call_graph);
780 781 782 783

	return ret;
}

784
static const char * const record_usage[] = {
785 786
	"perf record [<options>] [<command>]",
	"perf record [<options>] -- <command> [<options>]",
787 788 789
	NULL
};

790 791 792 793 794 795 796 797 798 799 800 801 802 803 804
/*
 * XXX Ideally would be local to cmd_record() and passed to a perf_record__new
 * because we need to have access to it in perf_record__exit, that is called
 * after cmd_record() exits, but since record_options need to be accessible to
 * builtin-script, leave it here.
 *
 * At least we don't ouch it in all the other functions here directly.
 *
 * Just say no to tons of global variables, sigh.
 */
static struct perf_record record = {
	.opts = {
		.mmap_pages	     = UINT_MAX,
		.user_freq	     = UINT_MAX,
		.user_interval	     = ULLONG_MAX,
805
		.freq		     = 4000,
N
Namhyung Kim 已提交
806 807 808
		.target		     = {
			.uses_mmap   = true,
		},
809 810
	},
};
811

812 813
#define CALLCHAIN_HELP "do call-graph (stack chain/backtrace) recording: "

814
#ifdef HAVE_LIBUNWIND_SUPPORT
815
const char record_callchain_help[] = CALLCHAIN_HELP "[fp] dwarf";
816
#else
817
const char record_callchain_help[] = CALLCHAIN_HELP "[fp]";
818 819
#endif

820 821 822 823 824 825 826
/*
 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
 * with it and switch to use the library functions in perf_evlist that came
 * from builtin-record.c, i.e. use perf_record_opts,
 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
 * using pipes, etc.
 */
827
const struct option record_options[] = {
828
	OPT_CALLBACK('e', "event", &record.evlist, "event",
829
		     "event selector. use 'perf list' to list available events",
830
		     parse_events_option),
831
	OPT_CALLBACK(0, "filter", &record.evlist, "filter",
L
Li Zefan 已提交
832
		     "event filter", parse_filter),
833
	OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
834
		    "record events on existing process id"),
835
	OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
836
		    "record events on existing thread id"),
837
	OPT_INTEGER('r', "realtime", &record.realtime_prio,
838
		    "collect data with this RT SCHED_FIFO priority"),
839
	OPT_BOOLEAN('D', "no-delay", &record.opts.no_delay,
840
		    "collect data without buffering"),
841
	OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
842
		    "collect raw sample records from all opened counters"),
843
	OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
844
			    "system-wide collection from all CPUs"),
845
	OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
846
		    "list of cpus to monitor"),
847 848
	OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
	OPT_STRING('o', "output", &record.output_name, "file",
I
Ingo Molnar 已提交
849
		    "output file name"),
850
	OPT_BOOLEAN('i', "no-inherit", &record.opts.no_inherit,
851
		    "child tasks do not inherit counters"),
852
	OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
853 854 855
	OPT_CALLBACK('m', "mmap-pages", &record.opts.mmap_pages, "pages",
		     "number of mmap data pages",
		     perf_evlist__parse_mmap_pages),
856
	OPT_BOOLEAN(0, "group", &record.opts.group,
857
		    "put the counters into a counter group"),
858
	OPT_CALLBACK_DEFAULT('g', "call-graph", &record.opts,
859 860
			     "mode[,dump_size]", record_callchain_help,
			     &record_parse_callchain_opt, "fp"),
861
	OPT_INCR('v', "verbose", &verbose,
862
		    "be more verbose (show counter open errors, etc)"),
863
	OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
864
	OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
865
		    "per thread counts"),
866
	OPT_BOOLEAN('d', "data", &record.opts.sample_address,
867
		    "Sample addresses"),
868
	OPT_BOOLEAN('T', "timestamp", &record.opts.sample_time, "Sample timestamps"),
869
	OPT_BOOLEAN('P', "period", &record.opts.period, "Sample period"),
870
	OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
871
		    "don't sample"),
872
	OPT_BOOLEAN('N', "no-buildid-cache", &record.no_buildid_cache,
873
		    "do not update the buildid cache"),
874
	OPT_BOOLEAN('B', "no-buildid", &record.no_buildid,
875
		    "do not collect buildids in perf.data"),
876
	OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
S
Stephane Eranian 已提交
877 878
		     "monitor event in cgroup name only",
		     parse_cgroups),
879 880
	OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
		   "user to profile"),
881 882 883 884 885 886 887

	OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
		     "branch any", "sample any taken branches",
		     parse_branch_stack),

	OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
		     "branch filter mask", "branch stack filter modes",
888
		     parse_branch_stack),
889 890
	OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
		    "sample by weight (on special events only)"),
891 892
	OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
		    "sample transaction flags (special events only)"),
893 894 895
	OPT_END()
};

896
int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
897
{
898
	int err = -ENOMEM;
899 900
	struct perf_evlist *evsel_list;
	struct perf_record *rec = &record;
901
	char errbuf[BUFSIZ];
902

903
	evsel_list = perf_evlist__new();
904 905 906
	if (evsel_list == NULL)
		return -ENOMEM;

907 908
	rec->evlist = evsel_list;

909
	argc = parse_options(argc, argv, record_options, record_usage,
910
			    PARSE_OPT_STOP_AT_NON_OPTION);
911
	if (!argc && perf_target__none(&rec->opts.target))
912
		usage_with_options(record_usage, record_options);
913

914
	if (nr_cgroups && !rec->opts.target.system_wide) {
915 916
		ui__error("cgroup monitoring only available in"
			  " system-wide mode\n");
S
Stephane Eranian 已提交
917 918 919
		usage_with_options(record_usage, record_options);
	}

920
	symbol__init();
921

922
	if (symbol_conf.kptr_restrict)
923 924 925 926 927 928 929 930
		pr_warning(
"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
"check /proc/sys/kernel/kptr_restrict.\n\n"
"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
"file is not found in the buildid cache or in the vmlinux path.\n\n"
"Samples in kernel modules won't be resolved at all.\n\n"
"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
"even with a suitable vmlinux or kallsyms file.\n\n");
931

932
	if (rec->no_buildid_cache || rec->no_buildid)
933
		disable_buildid_cache();
934

935 936
	if (evsel_list->nr_entries == 0 &&
	    perf_evlist__add_default(evsel_list) < 0) {
937 938
		pr_err("Not enough memory for event selector list\n");
		goto out_symbol_exit;
939
	}
940

941 942 943 944 945 946 947 948 949
	err = perf_target__validate(&rec->opts.target);
	if (err) {
		perf_target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
		ui__warning("%s", errbuf);
	}

	err = perf_target__parse_uid(&rec->opts.target);
	if (err) {
		int saved_errno = errno;
950

951
		perf_target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
952
		ui__error("%s", errbuf);
953 954

		err = -saved_errno;
955
		goto out_symbol_exit;
956
	}
957

958
	err = -ENOMEM;
959
	if (perf_evlist__create_maps(evsel_list, &rec->opts.target) < 0)
960
		usage_with_options(record_usage, record_options);
961

962 963 964 965
	if (rec->opts.user_interval != ULLONG_MAX)
		rec->opts.default_interval = rec->opts.user_interval;
	if (rec->opts.user_freq != UINT_MAX)
		rec->opts.freq = rec->opts.user_freq;
966

967 968 969
	/*
	 * User specified count overrides default frequency.
	 */
970 971 972 973
	if (rec->opts.default_interval)
		rec->opts.freq = 0;
	else if (rec->opts.freq) {
		rec->opts.default_interval = rec->opts.freq;
974
	} else {
975
		ui__error("frequency and count are zero, aborting\n");
976
		err = -EINVAL;
977
		goto out_free_fd;
978 979
	}

980
	err = __cmd_record(&record, argc, argv);
981 982 983

	perf_evlist__munmap(evsel_list);
	perf_evlist__close(evsel_list);
984
out_free_fd:
985
	perf_evlist__delete_maps(evsel_list);
986 987
out_symbol_exit:
	symbol__exit();
988
	return err;
989
}