builtin-record.c 26.8 KB
Newer Older
I
Ingo Molnar 已提交
1
/*
2 3 4 5 6
 * builtin-record.c
 *
 * Builtin record command: Record the profile of a workload
 * (or a CPU, or a PID) into the perf.data output file - for
 * later analysis via perf report.
I
Ingo Molnar 已提交
7
 */
8
#include "builtin.h"
9 10 11

#include "perf.h"

12
#include "util/build-id.h"
13
#include "util/util.h"
14
#include "util/parse-options.h"
15
#include "util/parse-events.h"
16

17
#include "util/header.h"
18
#include "util/event.h"
19
#include "util/evlist.h"
20
#include "util/evsel.h"
21
#include "util/debug.h"
22
#include "util/session.h"
23
#include "util/tool.h"
24
#include "util/symbol.h"
25
#include "util/cpumap.h"
26
#include "util/thread_map.h"
27

28
#include <unistd.h>
29
#include <sched.h>
30
#include <sys/mman.h>
31

32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
#ifndef HAVE_ON_EXIT
#ifndef ATEXIT_MAX
#define ATEXIT_MAX 32
#endif
static int __on_exit_count = 0;
typedef void (*on_exit_func_t) (int, void *);
static on_exit_func_t __on_exit_funcs[ATEXIT_MAX];
static void *__on_exit_args[ATEXIT_MAX];
static int __exitcode = 0;
static void __handle_on_exit_funcs(void);
static int on_exit(on_exit_func_t function, void *arg);
#define exit(x) (exit)(__exitcode = (x))

static int on_exit(on_exit_func_t function, void *arg)
{
	if (__on_exit_count == ATEXIT_MAX)
		return -ENOMEM;
	else if (__on_exit_count == 0)
		atexit(__handle_on_exit_funcs);
	__on_exit_funcs[__on_exit_count] = function;
	__on_exit_args[__on_exit_count++] = arg;
	return 0;
}

static void __handle_on_exit_funcs(void)
{
	int i;
	for (i = 0; i < __on_exit_count; i++)
		__on_exit_funcs[i] (__exitcode, __on_exit_args[i]);
}
#endif

64 65 66 67 68
enum write_mode_t {
	WRITE_FORCE,
	WRITE_APPEND
};

69
struct perf_record {
70
	struct perf_tool	tool;
71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
	struct perf_record_opts	opts;
	u64			bytes_written;
	const char		*output_name;
	struct perf_evlist	*evlist;
	struct perf_session	*session;
	const char		*progname;
	int			output;
	unsigned int		page_size;
	int			realtime_prio;
	enum write_mode_t	write_mode;
	bool			no_buildid;
	bool			no_buildid_cache;
	bool			force;
	bool			file_new;
	bool			append_file;
	long			samples;
	off_t			post_processing_offset;
88
};
89

90
static void advance_output(struct perf_record *rec, size_t size)
91
{
92
	rec->bytes_written += size;
93 94
}

95
static int write_output(struct perf_record *rec, void *buf, size_t size)
96 97
{
	while (size) {
98
		int ret = write(rec->output, buf, size);
99

100 101 102 103
		if (ret < 0) {
			pr_err("failed to write\n");
			return -1;
		}
104 105 106 107

		size -= ret;
		buf += ret;

108
		rec->bytes_written += ret;
109
	}
110 111

	return 0;
112 113
}

114
static int process_synthesized_event(struct perf_tool *tool,
115
				     union perf_event *event,
116 117
				     struct perf_sample *sample __maybe_unused,
				     struct machine *machine __maybe_unused)
118
{
119
	struct perf_record *rec = container_of(tool, struct perf_record, tool);
120 121 122
	if (write_output(rec, event, event->header.size) < 0)
		return -1;

123 124 125
	return 0;
}

126
static int perf_record__mmap_read(struct perf_record *rec,
127
				   struct perf_mmap *md)
128
{
129
	unsigned int head = perf_mmap__read_head(md);
130
	unsigned int old = md->prev;
131
	unsigned char *data = md->base + rec->page_size;
132 133
	unsigned long size;
	void *buf;
134
	int rc = 0;
135

136
	if (old == head)
137
		return 0;
138

139
	rec->samples++;
140 141 142 143 144 145 146

	size = head - old;

	if ((old & md->mask) + size != (head & md->mask)) {
		buf = &data[old & md->mask];
		size = md->mask + 1 - (old & md->mask);
		old += size;
147

148 149 150 151
		if (write_output(rec, buf, size) < 0) {
			rc = -1;
			goto out;
		}
152 153 154 155 156
	}

	buf = &data[old & md->mask];
	size = head - old;
	old += size;
157

158 159 160 161
	if (write_output(rec, buf, size) < 0) {
		rc = -1;
		goto out;
	}
162 163

	md->prev = old;
164
	perf_mmap__write_tail(md, old);
165 166 167

out:
	return rc;
168 169 170
}

static volatile int done = 0;
171
static volatile int signr = -1;
172
static volatile int child_finished = 0;
173

174
static void sig_handler(int sig)
175
{
176 177 178
	if (sig == SIGCHLD)
		child_finished = 1;

179
	done = 1;
180 181 182
	signr = sig;
}

183
static void perf_record__sig_exit(int exit_status __maybe_unused, void *arg)
184
{
185
	struct perf_record *rec = arg;
186 187
	int status;

188
	if (rec->evlist->workload.pid > 0) {
189
		if (!child_finished)
190
			kill(rec->evlist->workload.pid, SIGTERM);
191 192 193

		wait(&status);
		if (WIFSIGNALED(status))
194
			psignal(WTERMSIG(status), rec->progname);
195
	}
196

197
	if (signr == -1 || signr == SIGUSR1)
198 199 200 201
		return;

	signal(signr, SIG_DFL);
	kill(getpid(), signr);
202 203
}

204 205 206 207 208 209 210 211
static bool perf_evlist__equal(struct perf_evlist *evlist,
			       struct perf_evlist *other)
{
	struct perf_evsel *pos, *pair;

	if (evlist->nr_entries != other->nr_entries)
		return false;

212
	pair = perf_evlist__first(other);
213 214 215 216

	list_for_each_entry(pos, &evlist->entries, node) {
		if (memcmp(&pos->attr, &pair->attr, sizeof(pos->attr) != 0))
			return false;
217
		pair = perf_evsel__next(pair);
218 219 220 221 222
	}

	return true;
}

223
static int perf_record__open(struct perf_record *rec)
224
{
225
	char msg[512];
226
	struct perf_evsel *pos;
227 228 229
	struct perf_evlist *evlist = rec->evlist;
	struct perf_session *session = rec->session;
	struct perf_record_opts *opts = &rec->opts;
230
	int rc = 0;
231

232
	perf_evlist__config(evlist, opts);
233

234 235
	list_for_each_entry(pos, &evlist->entries, node) {
try_again:
236
		if (perf_evsel__open(pos, evlist->cpus, evlist->threads) < 0) {
237
			if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
238
				if (verbose)
239
					ui__warning("%s\n", msg);
240 241
				goto try_again;
			}
242

243 244 245 246
			rc = -errno;
			perf_evsel__open_strerror(pos, &opts->target,
						  errno, msg, sizeof(msg));
			ui__error("%s\n", msg);
247
			goto out;
L
Li Zefan 已提交
248 249
		}
	}
250

251
	if (perf_evlist__apply_filters(evlist)) {
252 253
		error("failed to set filter with %d (%s)\n", errno,
			strerror(errno));
254 255
		rc = -1;
		goto out;
256 257
	}

258
	if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) {
259 260 261 262 263 264 265
		if (errno == EPERM) {
			pr_err("Permission error mapping pages.\n"
			       "Consider increasing "
			       "/proc/sys/kernel/perf_event_mlock_kb,\n"
			       "or try again with a smaller value of -m/--mmap_pages.\n"
			       "(current value: %d)\n", opts->mmap_pages);
			rc = -errno;
266 267
		} else if (!is_power_of_2(opts->mmap_pages) &&
			   (opts->mmap_pages != UINT_MAX)) {
268 269 270 271 272 273 274
			pr_err("--mmap_pages/-m value must be a power of two.");
			rc = -EINVAL;
		} else {
			pr_err("failed to mmap with %d (%s)\n", errno, strerror(errno));
			rc = -errno;
		}
		goto out;
275
	}
276

277
	if (rec->file_new)
278 279 280 281
		session->evlist = evlist;
	else {
		if (!perf_evlist__equal(session->evlist, evlist)) {
			fprintf(stderr, "incompatible append\n");
282 283
			rc = -1;
			goto out;
284 285 286
		}
 	}

287
	perf_session__set_id_hdr_size(session);
288 289
out:
	return rc;
290 291
}

292
static int process_buildids(struct perf_record *rec)
293
{
294
	u64 size = lseek(rec->output, 0, SEEK_CUR);
295

296 297 298
	if (size == 0)
		return 0;

299 300 301
	rec->session->fd = rec->output;
	return __perf_session__process_events(rec->session, rec->post_processing_offset,
					      size - rec->post_processing_offset,
302 303 304
					      size, &build_id__mark_dso_hit_ops);
}

305
static void perf_record__exit(int status, void *arg)
306
{
307 308
	struct perf_record *rec = arg;

309 310 311
	if (status != 0)
		return;

312 313 314 315 316 317 318 319 320
	if (!rec->opts.pipe_output) {
		rec->session->header.data_size += rec->bytes_written;

		if (!rec->no_buildid)
			process_buildids(rec);
		perf_session__write_header(rec->session, rec->evlist,
					   rec->output, true);
		perf_session__delete(rec->session);
		perf_evlist__delete(rec->evlist);
321
		symbol__exit();
322
	}
323 324
}

325
static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
326 327
{
	int err;
328
	struct perf_tool *tool = data;
329 330 331 332 333 334 335 336
	/*
	 *As for guest kernel when processing subcommand record&report,
	 *we arrange module mmap prior to guest kernel mmap and trigger
	 *a preload dso because default guest module symbols are loaded
	 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
	 *method is used to avoid symbol missing when the first addr is
	 *in module instead of in guest kernel.
	 */
337
	err = perf_event__synthesize_modules(tool, process_synthesized_event,
338
					     machine);
339 340
	if (err < 0)
		pr_err("Couldn't record guest kernel [%d]'s reference"
341
		       " relocation symbol.\n", machine->pid);
342 343 344 345 346

	/*
	 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
	 * have no _text sometimes.
	 */
347
	err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
348
						 machine, "_text");
349
	if (err < 0)
350
		err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
351
							 machine, "_stext");
352 353
	if (err < 0)
		pr_err("Couldn't record guest kernel [%d]'s reference"
354
		       " relocation symbol.\n", machine->pid);
355 356
}

357 358 359 360 361
static struct perf_event_header finished_round_event = {
	.size = sizeof(struct perf_event_header),
	.type = PERF_RECORD_FINISHED_ROUND,
};

362
static int perf_record__mmap_read_all(struct perf_record *rec)
363
{
364
	int i;
365
	int rc = 0;
366

367
	for (i = 0; i < rec->evlist->nr_mmaps; i++) {
368 369 370 371 372 373
		if (rec->evlist->mmap[i].base) {
			if (perf_record__mmap_read(rec, &rec->evlist->mmap[i]) != 0) {
				rc = -1;
				goto out;
			}
		}
374 375
	}

376
	if (perf_header__has_feat(&rec->session->header, HEADER_TRACING_DATA))
377 378 379 380 381
		rc = write_output(rec, &finished_round_event,
				  sizeof(finished_round_event));

out:
	return rc;
382 383
}

384
static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
385
{
I
Ingo Molnar 已提交
386 387
	struct stat st;
	int flags;
388
	int err, output, feat;
389
	unsigned long waking = 0;
390
	const bool forks = argc > 0;
391
	struct machine *machine;
392
	struct perf_tool *tool = &rec->tool;
393 394 395 396
	struct perf_record_opts *opts = &rec->opts;
	struct perf_evlist *evsel_list = rec->evlist;
	const char *output_name = rec->output_name;
	struct perf_session *session;
397
	bool disabled = false;
398

399
	rec->progname = argv[0];
400

401
	rec->page_size = sysconf(_SC_PAGE_SIZE);
402

403
	on_exit(perf_record__sig_exit, rec);
404 405
	signal(SIGCHLD, sig_handler);
	signal(SIGINT, sig_handler);
406
	signal(SIGUSR1, sig_handler);
407

408 409
	if (!output_name) {
		if (!fstat(STDOUT_FILENO, &st) && S_ISFIFO(st.st_mode))
410
			opts->pipe_output = true;
411
		else
412
			rec->output_name = output_name = "perf.data";
413 414 415
	}
	if (output_name) {
		if (!strcmp(output_name, "-"))
416
			opts->pipe_output = true;
417
		else if (!stat(output_name, &st) && st.st_size) {
418
			if (rec->write_mode == WRITE_FORCE) {
419 420 421 422 423 424
				char oldname[PATH_MAX];
				snprintf(oldname, sizeof(oldname), "%s.old",
					 output_name);
				unlink(oldname);
				rename(output_name, oldname);
			}
425 426
		} else if (rec->write_mode == WRITE_APPEND) {
			rec->write_mode = WRITE_FORCE;
427
		}
428 429
	}

430
	flags = O_CREAT|O_RDWR;
431 432
	if (rec->write_mode == WRITE_APPEND)
		rec->file_new = 0;
I
Ingo Molnar 已提交
433 434 435
	else
		flags |= O_TRUNC;

436
	if (opts->pipe_output)
437 438 439
		output = STDOUT_FILENO;
	else
		output = open(output_name, flags, S_IRUSR | S_IWUSR);
440 441
	if (output < 0) {
		perror("failed to create output file");
442
		return -1;
443 444
	}

445 446
	rec->output = output;

447
	session = perf_session__new(output_name, O_WRONLY,
448
				    rec->write_mode == WRITE_FORCE, false, NULL);
449
	if (session == NULL) {
450 451 452 453
		pr_err("Not enough memory for reading perf file header\n");
		return -1;
	}

454 455
	rec->session = session;

456 457 458 459 460 461 462
	for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
		perf_header__set_feat(&session->header, feat);

	if (rec->no_buildid)
		perf_header__clear_feat(&session->header, HEADER_BUILD_ID);

	if (!have_tracepoints(&evsel_list->entries))
463
		perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
464

465 466 467
	if (!rec->opts.branch_stack)
		perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);

468
	if (!rec->file_new) {
469
		err = perf_session__read_header(session, output);
470
		if (err < 0)
471
			goto out_delete_session;
472 473
	}

474
	if (forks) {
475
		err = perf_evlist__prepare_workload(evsel_list, &opts->target,
476 477
						    argv, opts->pipe_output,
						    true);
478 479 480
		if (err < 0) {
			pr_err("Couldn't run the workload!\n");
			goto out_delete_session;
481 482 483
		}
	}

484 485 486 487
	if (perf_record__open(rec) != 0) {
		err = -1;
		goto out_delete_session;
	}
488

489 490 491
	if (!evsel_list->nr_groups)
		perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);

492
	/*
493
	 * perf_session__delete(session) will be called at perf_record__exit()
494
	 */
495
	on_exit(perf_record__exit, rec);
496

497
	if (opts->pipe_output) {
498 499
		err = perf_header__write_pipe(output);
		if (err < 0)
500
			goto out_delete_session;
501
	} else if (rec->file_new) {
502 503
		err = perf_session__write_header(session, evsel_list,
						 output, false);
504
		if (err < 0)
505
			goto out_delete_session;
506 507
	}

508
	if (!rec->no_buildid
509
	    && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
510
		pr_err("Couldn't generate buildids. "
511
		       "Use --no-buildid to profile anyway.\n");
512 513
		err = -1;
		goto out_delete_session;
514 515
	}

516
	rec->post_processing_offset = lseek(output, 0, SEEK_CUR);
517

518
	machine = &session->machines.host;
519

520
	if (opts->pipe_output) {
521
		err = perf_event__synthesize_attrs(tool, session,
522
						   process_synthesized_event);
523 524
		if (err < 0) {
			pr_err("Couldn't synthesize attrs.\n");
525
			goto out_delete_session;
526
		}
527

528
		err = perf_event__synthesize_event_types(tool, process_synthesized_event,
529
							 machine);
530 531
		if (err < 0) {
			pr_err("Couldn't synthesize event_types.\n");
532
			goto out_delete_session;
533
		}
534

535
		if (have_tracepoints(&evsel_list->entries)) {
536 537 538 539 540 541 542 543
			/*
			 * FIXME err <= 0 here actually means that
			 * there were no tracepoints so its not really
			 * an error, just that we don't need to
			 * synthesize anything.  We really have to
			 * return this more properly and also
			 * propagate errors that now are calling die()
			 */
544
			err = perf_event__synthesize_tracing_data(tool, output, evsel_list,
545
								  process_synthesized_event);
546 547
			if (err <= 0) {
				pr_err("Couldn't record tracing data.\n");
548
				goto out_delete_session;
549
			}
550
			advance_output(rec, err);
551
		}
552 553
	}

554
	err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
555
						 machine, "_text");
556
	if (err < 0)
557
		err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
558
							 machine, "_stext");
559 560 561 562
	if (err < 0)
		pr_err("Couldn't record kernel reference relocation symbol\n"
		       "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
		       "Check /proc/kallsyms permission or run as root.\n");
563

564
	err = perf_event__synthesize_modules(tool, process_synthesized_event,
565
					     machine);
566 567 568 569 570
	if (err < 0)
		pr_err("Couldn't record kernel module information.\n"
		       "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
		       "Check /proc/modules permission or run as root.\n");

571
	if (perf_guest) {
572 573
		machines__process_guests(&session->machines,
					 perf_event__synthesize_guest_os, tool);
574
	}
575

J
Jiri Olsa 已提交
576
	if (perf_target__has_task(&opts->target))
577
		err = perf_event__synthesize_thread_map(tool, evsel_list->threads,
578
						  process_synthesized_event,
579
						  machine);
J
Jiri Olsa 已提交
580
	else if (perf_target__has_cpu(&opts->target))
581
		err = perf_event__synthesize_threads(tool, process_synthesized_event,
582
					       machine);
J
Jiri Olsa 已提交
583 584
	else /* command specified */
		err = 0;
585

586 587 588
	if (err != 0)
		goto out_delete_session;

589
	if (rec->realtime_prio) {
590 591
		struct sched_param param;

592
		param.sched_priority = rec->realtime_prio;
593
		if (sched_setscheduler(0, SCHED_FIFO, &param)) {
594
			pr_err("Could not set realtime priority.\n");
595 596
			err = -1;
			goto out_delete_session;
597 598 599
		}
	}

600 601 602 603 604 605 606
	/*
	 * When perf is starting the traced process, all the events
	 * (apart from group members) have enable_on_exec=1 set,
	 * so don't spoil it by prematurely enabling them.
	 */
	if (!perf_target__none(&opts->target))
		perf_evlist__enable(evsel_list);
607

608 609 610
	/*
	 * Let the child rip
	 */
611
	if (forks)
612
		perf_evlist__start_workload(evsel_list);
613

614
	for (;;) {
615
		int hits = rec->samples;
616

617 618 619 620
		if (perf_record__mmap_read_all(rec) < 0) {
			err = -1;
			goto out_delete_session;
		}
621

622
		if (hits == rec->samples) {
623 624
			if (done)
				break;
625
			err = poll(evsel_list->pollfd, evsel_list->nr_fds, -1);
626 627 628
			waking++;
		}

629 630 631 632 633
		/*
		 * When perf is starting the traced process, at the end events
		 * die with the process and we wait for that. Thus no need to
		 * disable events in this case.
		 */
634
		if (done && !disabled && !perf_target__none(&opts->target)) {
635
			perf_evlist__disable(evsel_list);
636 637
			disabled = true;
		}
638 639
	}

640
	if (quiet || signr == SIGUSR1)
641 642
		return 0;

643 644
	fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);

645 646 647 648
	/*
	 * Approximate RIP event size: 24 bytes.
	 */
	fprintf(stderr,
649
		"[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n",
650
		(double)rec->bytes_written / 1024.0 / 1024.0,
651
		output_name,
652
		rec->bytes_written / 24);
653

654
	return 0;
655 656 657 658

out_delete_session:
	perf_session__delete(session);
	return err;
659
}
660

661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682
#define BRANCH_OPT(n, m) \
	{ .name = n, .mode = (m) }

#define BRANCH_END { .name = NULL }

struct branch_mode {
	const char *name;
	int mode;
};

static const struct branch_mode branch_modes[] = {
	BRANCH_OPT("u", PERF_SAMPLE_BRANCH_USER),
	BRANCH_OPT("k", PERF_SAMPLE_BRANCH_KERNEL),
	BRANCH_OPT("hv", PERF_SAMPLE_BRANCH_HV),
	BRANCH_OPT("any", PERF_SAMPLE_BRANCH_ANY),
	BRANCH_OPT("any_call", PERF_SAMPLE_BRANCH_ANY_CALL),
	BRANCH_OPT("any_ret", PERF_SAMPLE_BRANCH_ANY_RETURN),
	BRANCH_OPT("ind_call", PERF_SAMPLE_BRANCH_IND_CALL),
	BRANCH_END
};

static int
683
parse_branch_stack(const struct option *opt, const char *str, int unset)
684 685 686 687 688 689 690 691
{
#define ONLY_PLM \
	(PERF_SAMPLE_BRANCH_USER	|\
	 PERF_SAMPLE_BRANCH_KERNEL	|\
	 PERF_SAMPLE_BRANCH_HV)

	uint64_t *mode = (uint64_t *)opt->value;
	const struct branch_mode *br;
692
	char *s, *os = NULL, *p;
693 694
	int ret = -1;

695 696
	if (unset)
		return 0;
697

698 699 700 701
	/*
	 * cannot set it twice, -b + --branch-filter for instance
	 */
	if (*mode)
702 703
		return -1;

704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724
	/* str may be NULL in case no arg is passed to -b */
	if (str) {
		/* because str is read-only */
		s = os = strdup(str);
		if (!s)
			return -1;

		for (;;) {
			p = strchr(s, ',');
			if (p)
				*p = '\0';

			for (br = branch_modes; br->name; br++) {
				if (!strcasecmp(s, br->name))
					break;
			}
			if (!br->name) {
				ui__warning("unknown branch filter %s,"
					    " check man page\n", s);
				goto error;
			}
725

726
			*mode |= br->mode;
727

728 729
			if (!p)
				break;
730

731 732
			s = p + 1;
		}
733 734 735
	}
	ret = 0;

736
	/* default to any branch */
737
	if ((*mode & ~ONLY_PLM) == 0) {
738
		*mode = PERF_SAMPLE_BRANCH_ANY;
739 740 741 742 743 744
	}
error:
	free(os);
	return ret;
}

745
#ifdef LIBUNWIND_SUPPORT
746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770
static int get_stack_size(char *str, unsigned long *_size)
{
	char *endptr;
	unsigned long size;
	unsigned long max_size = round_down(USHRT_MAX, sizeof(u64));

	size = strtoul(str, &endptr, 0);

	do {
		if (*endptr)
			break;

		size = round_up(size, sizeof(u64));
		if (!size || size > max_size)
			break;

		*_size = size;
		return 0;

	} while (0);

	pr_err("callchain: Incorrect stack dump size (max %ld): %s\n",
	       max_size, str);
	return -1;
}
771
#endif /* LIBUNWIND_SUPPORT */
772

773 774
int record_parse_callchain_opt(const struct option *opt,
			       const char *arg, int unset)
775
{
776
	struct perf_record_opts *opts = opt->value;
777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801
	char *tok, *name, *saveptr = NULL;
	char *buf;
	int ret = -1;

	/* --no-call-graph */
	if (unset)
		return 0;

	/* We specified default option if none is provided. */
	BUG_ON(!arg);

	/* We need buffer that we know we can write to. */
	buf = malloc(strlen(arg) + 1);
	if (!buf)
		return -ENOMEM;

	strcpy(buf, arg);

	tok = strtok_r((char *)buf, ",", &saveptr);
	name = tok ? : (char *)buf;

	do {
		/* Framepointer style */
		if (!strncmp(name, "fp", sizeof("fp"))) {
			if (!strtok_r(NULL, ",", &saveptr)) {
802
				opts->call_graph = CALLCHAIN_FP;
803 804 805 806 807 808
				ret = 0;
			} else
				pr_err("callchain: No more arguments "
				       "needed for -g fp\n");
			break;

809
#ifdef LIBUNWIND_SUPPORT
810 811
		/* Dwarf style */
		} else if (!strncmp(name, "dwarf", sizeof("dwarf"))) {
812 813
			const unsigned long default_stack_dump_size = 8192;

814
			ret = 0;
815 816
			opts->call_graph = CALLCHAIN_DWARF;
			opts->stack_dump_size = default_stack_dump_size;
817 818 819 820 821 822

			tok = strtok_r(NULL, ",", &saveptr);
			if (tok) {
				unsigned long size = 0;

				ret = get_stack_size(tok, &size);
823
				opts->stack_dump_size = size;
824 825 826 827
			}

			if (!ret)
				pr_debug("callchain: stack dump size %d\n",
828
					 opts->stack_dump_size);
829
#endif /* LIBUNWIND_SUPPORT */
830 831 832 833 834 835 836 837 838 839 840
		} else {
			pr_err("callchain: Unknown -g option "
			       "value: %s\n", arg);
			break;
		}

	} while (0);

	free(buf);

	if (!ret)
841
		pr_debug("callchain: type %d\n", opts->call_graph);
842 843 844 845

	return ret;
}

846
static const char * const record_usage[] = {
847 848
	"perf record [<options>] [<command>]",
	"perf record [<options>] -- <command> [<options>]",
849 850 851
	NULL
};

852 853 854 855 856 857 858 859 860 861 862 863 864 865 866
/*
 * XXX Ideally would be local to cmd_record() and passed to a perf_record__new
 * because we need to have access to it in perf_record__exit, that is called
 * after cmd_record() exits, but since record_options need to be accessible to
 * builtin-script, leave it here.
 *
 * At least we don't ouch it in all the other functions here directly.
 *
 * Just say no to tons of global variables, sigh.
 */
static struct perf_record record = {
	.opts = {
		.mmap_pages	     = UINT_MAX,
		.user_freq	     = UINT_MAX,
		.user_interval	     = ULLONG_MAX,
867
		.freq		     = 4000,
N
Namhyung Kim 已提交
868 869 870
		.target		     = {
			.uses_mmap   = true,
		},
871 872 873 874
	},
	.write_mode = WRITE_FORCE,
	.file_new   = true,
};
875

876 877 878
#define CALLCHAIN_HELP "do call-graph (stack chain/backtrace) recording: "

#ifdef LIBUNWIND_SUPPORT
879
const char record_callchain_help[] = CALLCHAIN_HELP "[fp] dwarf";
880
#else
881
const char record_callchain_help[] = CALLCHAIN_HELP "[fp]";
882 883
#endif

884 885 886 887 888 889 890
/*
 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
 * with it and switch to use the library functions in perf_evlist that came
 * from builtin-record.c, i.e. use perf_record_opts,
 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
 * using pipes, etc.
 */
891
const struct option record_options[] = {
892
	OPT_CALLBACK('e', "event", &record.evlist, "event",
893
		     "event selector. use 'perf list' to list available events",
894
		     parse_events_option),
895
	OPT_CALLBACK(0, "filter", &record.evlist, "filter",
L
Li Zefan 已提交
896
		     "event filter", parse_filter),
897
	OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
898
		    "record events on existing process id"),
899
	OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
900
		    "record events on existing thread id"),
901
	OPT_INTEGER('r', "realtime", &record.realtime_prio,
902
		    "collect data with this RT SCHED_FIFO priority"),
903
	OPT_BOOLEAN('D', "no-delay", &record.opts.no_delay,
904
		    "collect data without buffering"),
905
	OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
906
		    "collect raw sample records from all opened counters"),
907
	OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
908
			    "system-wide collection from all CPUs"),
909
	OPT_BOOLEAN('A', "append", &record.append_file,
I
Ingo Molnar 已提交
910
			    "append to the output file to do incremental profiling"),
911
	OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
912
		    "list of cpus to monitor"),
913
	OPT_BOOLEAN('f', "force", &record.force,
914
			"overwrite existing data file (deprecated)"),
915 916
	OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
	OPT_STRING('o', "output", &record.output_name, "file",
I
Ingo Molnar 已提交
917
		    "output file name"),
918
	OPT_BOOLEAN('i', "no-inherit", &record.opts.no_inherit,
919
		    "child tasks do not inherit counters"),
920 921
	OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
	OPT_UINTEGER('m', "mmap-pages", &record.opts.mmap_pages,
922
		     "number of mmap data pages"),
923
	OPT_BOOLEAN(0, "group", &record.opts.group,
924
		    "put the counters into a counter group"),
925
	OPT_CALLBACK_DEFAULT('g', "call-graph", &record.opts,
926 927
			     "mode[,dump_size]", record_callchain_help,
			     &record_parse_callchain_opt, "fp"),
928
	OPT_INCR('v', "verbose", &verbose,
929
		    "be more verbose (show counter open errors, etc)"),
930
	OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
931
	OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
932
		    "per thread counts"),
933
	OPT_BOOLEAN('d', "data", &record.opts.sample_address,
934
		    "Sample addresses"),
935
	OPT_BOOLEAN('T', "timestamp", &record.opts.sample_time, "Sample timestamps"),
936
	OPT_BOOLEAN('P', "period", &record.opts.period, "Sample period"),
937
	OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
938
		    "don't sample"),
939
	OPT_BOOLEAN('N', "no-buildid-cache", &record.no_buildid_cache,
940
		    "do not update the buildid cache"),
941
	OPT_BOOLEAN('B', "no-buildid", &record.no_buildid,
942
		    "do not collect buildids in perf.data"),
943
	OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
S
Stephane Eranian 已提交
944 945
		     "monitor event in cgroup name only",
		     parse_cgroups),
946 947
	OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
		   "user to profile"),
948 949 950 951 952 953 954

	OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
		     "branch any", "sample any taken branches",
		     parse_branch_stack),

	OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
		     "branch filter mask", "branch stack filter modes",
955
		     parse_branch_stack),
956 957 958
	OPT_END()
};

959
int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
960
{
961 962
	int err = -ENOMEM;
	struct perf_evsel *pos;
963 964
	struct perf_evlist *evsel_list;
	struct perf_record *rec = &record;
965
	char errbuf[BUFSIZ];
966

967
	evsel_list = perf_evlist__new();
968 969 970
	if (evsel_list == NULL)
		return -ENOMEM;

971 972
	rec->evlist = evsel_list;

973
	argc = parse_options(argc, argv, record_options, record_usage,
974
			    PARSE_OPT_STOP_AT_NON_OPTION);
975
	if (!argc && perf_target__none(&rec->opts.target))
976
		usage_with_options(record_usage, record_options);
977

978
	if (rec->force && rec->append_file) {
979 980
		ui__error("Can't overwrite and append at the same time."
			  " You need to choose between -f and -A");
981
		usage_with_options(record_usage, record_options);
982 983
	} else if (rec->append_file) {
		rec->write_mode = WRITE_APPEND;
984
	} else {
985
		rec->write_mode = WRITE_FORCE;
986 987
	}

988
	if (nr_cgroups && !rec->opts.target.system_wide) {
989 990
		ui__error("cgroup monitoring only available in"
			  " system-wide mode\n");
S
Stephane Eranian 已提交
991 992 993
		usage_with_options(record_usage, record_options);
	}

994
	symbol__init();
995

996
	if (symbol_conf.kptr_restrict)
997 998 999 1000 1001 1002 1003 1004
		pr_warning(
"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
"check /proc/sys/kernel/kptr_restrict.\n\n"
"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
"file is not found in the buildid cache or in the vmlinux path.\n\n"
"Samples in kernel modules won't be resolved at all.\n\n"
"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
"even with a suitable vmlinux or kallsyms file.\n\n");
1005

1006
	if (rec->no_buildid_cache || rec->no_buildid)
1007
		disable_buildid_cache();
1008

1009 1010
	if (evsel_list->nr_entries == 0 &&
	    perf_evlist__add_default(evsel_list) < 0) {
1011 1012
		pr_err("Not enough memory for event selector list\n");
		goto out_symbol_exit;
1013
	}
1014

1015 1016 1017 1018 1019 1020 1021 1022 1023
	err = perf_target__validate(&rec->opts.target);
	if (err) {
		perf_target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
		ui__warning("%s", errbuf);
	}

	err = perf_target__parse_uid(&rec->opts.target);
	if (err) {
		int saved_errno = errno;
1024

1025
		perf_target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
1026
		ui__error("%s", errbuf);
1027 1028

		err = -saved_errno;
1029
		goto out_symbol_exit;
1030
	}
1031

1032
	err = -ENOMEM;
1033
	if (perf_evlist__create_maps(evsel_list, &rec->opts.target) < 0)
1034
		usage_with_options(record_usage, record_options);
1035

1036
	list_for_each_entry(pos, &evsel_list->entries, node) {
1037
		if (perf_header__push_event(pos->attr.config, perf_evsel__name(pos)))
1038
			goto out_free_fd;
1039
	}
1040

1041 1042 1043 1044
	if (rec->opts.user_interval != ULLONG_MAX)
		rec->opts.default_interval = rec->opts.user_interval;
	if (rec->opts.user_freq != UINT_MAX)
		rec->opts.freq = rec->opts.user_freq;
1045

1046 1047 1048
	/*
	 * User specified count overrides default frequency.
	 */
1049 1050 1051 1052
	if (rec->opts.default_interval)
		rec->opts.freq = 0;
	else if (rec->opts.freq) {
		rec->opts.default_interval = rec->opts.freq;
1053
	} else {
1054
		ui__error("frequency and count are zero, aborting\n");
1055
		err = -EINVAL;
1056
		goto out_free_fd;
1057 1058
	}

1059
	err = __cmd_record(&record, argc, argv);
1060 1061 1062

	perf_evlist__munmap(evsel_list);
	perf_evlist__close(evsel_list);
1063
out_free_fd:
1064
	perf_evlist__delete_maps(evsel_list);
1065 1066
out_symbol_exit:
	symbol__exit();
1067
	return err;
1068
}