builtin-record.c 27.9 KB
Newer Older
I
Ingo Molnar 已提交
1
/*
2 3 4 5 6
 * builtin-record.c
 *
 * Builtin record command: Record the profile of a workload
 * (or a CPU, or a PID) into the perf.data output file - for
 * later analysis via perf report.
I
Ingo Molnar 已提交
7
 */
8 9
#define _FILE_OFFSET_BITS 64

10
#include "builtin.h"
11 12 13

#include "perf.h"

14
#include "util/build-id.h"
15
#include "util/util.h"
16
#include "util/parse-options.h"
17
#include "util/parse-events.h"
18

19
#include "util/header.h"
20
#include "util/event.h"
21
#include "util/evlist.h"
22
#include "util/evsel.h"
23
#include "util/debug.h"
24
#include "util/session.h"
25
#include "util/tool.h"
26
#include "util/symbol.h"
27
#include "util/cpumap.h"
28
#include "util/thread_map.h"
29

30
#include <unistd.h>
31
#include <sched.h>
32
#include <sys/mman.h>
33

34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65
#ifndef HAVE_ON_EXIT
#ifndef ATEXIT_MAX
#define ATEXIT_MAX 32
#endif
static int __on_exit_count = 0;
typedef void (*on_exit_func_t) (int, void *);
static on_exit_func_t __on_exit_funcs[ATEXIT_MAX];
static void *__on_exit_args[ATEXIT_MAX];
static int __exitcode = 0;
static void __handle_on_exit_funcs(void);
static int on_exit(on_exit_func_t function, void *arg);
#define exit(x) (exit)(__exitcode = (x))

static int on_exit(on_exit_func_t function, void *arg)
{
	if (__on_exit_count == ATEXIT_MAX)
		return -ENOMEM;
	else if (__on_exit_count == 0)
		atexit(__handle_on_exit_funcs);
	__on_exit_funcs[__on_exit_count] = function;
	__on_exit_args[__on_exit_count++] = arg;
	return 0;
}

static void __handle_on_exit_funcs(void)
{
	int i;
	for (i = 0; i < __on_exit_count; i++)
		__on_exit_funcs[i] (__exitcode, __on_exit_args[i]);
}
#endif

66 67 68 69 70
enum write_mode_t {
	WRITE_FORCE,
	WRITE_APPEND
};

71
struct perf_record {
72
	struct perf_tool	tool;
73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
	struct perf_record_opts	opts;
	u64			bytes_written;
	const char		*output_name;
	struct perf_evlist	*evlist;
	struct perf_session	*session;
	const char		*progname;
	int			output;
	unsigned int		page_size;
	int			realtime_prio;
	enum write_mode_t	write_mode;
	bool			no_buildid;
	bool			no_buildid_cache;
	bool			force;
	bool			file_new;
	bool			append_file;
	long			samples;
	off_t			post_processing_offset;
90
};
91

92
static void advance_output(struct perf_record *rec, size_t size)
93
{
94
	rec->bytes_written += size;
95 96
}

97
static int write_output(struct perf_record *rec, void *buf, size_t size)
98 99
{
	while (size) {
100
		int ret = write(rec->output, buf, size);
101

102 103 104 105
		if (ret < 0) {
			pr_err("failed to write\n");
			return -1;
		}
106 107 108 109

		size -= ret;
		buf += ret;

110
		rec->bytes_written += ret;
111
	}
112 113

	return 0;
114 115
}

116
static int process_synthesized_event(struct perf_tool *tool,
117
				     union perf_event *event,
118 119
				     struct perf_sample *sample __maybe_unused,
				     struct machine *machine __maybe_unused)
120
{
121
	struct perf_record *rec = container_of(tool, struct perf_record, tool);
122 123 124
	if (write_output(rec, event, event->header.size) < 0)
		return -1;

125 126 127
	return 0;
}

128
static int perf_record__mmap_read(struct perf_record *rec,
129
				   struct perf_mmap *md)
130
{
131
	unsigned int head = perf_mmap__read_head(md);
132
	unsigned int old = md->prev;
133
	unsigned char *data = md->base + rec->page_size;
134 135
	unsigned long size;
	void *buf;
136
	int rc = 0;
137

138
	if (old == head)
139
		return 0;
140

141
	rec->samples++;
142 143 144 145 146 147 148

	size = head - old;

	if ((old & md->mask) + size != (head & md->mask)) {
		buf = &data[old & md->mask];
		size = md->mask + 1 - (old & md->mask);
		old += size;
149

150 151 152 153
		if (write_output(rec, buf, size) < 0) {
			rc = -1;
			goto out;
		}
154 155 156 157 158
	}

	buf = &data[old & md->mask];
	size = head - old;
	old += size;
159

160 161 162 163
	if (write_output(rec, buf, size) < 0) {
		rc = -1;
		goto out;
	}
164 165

	md->prev = old;
166
	perf_mmap__write_tail(md, old);
167 168 169

out:
	return rc;
170 171 172
}

static volatile int done = 0;
173
static volatile int signr = -1;
174
static volatile int child_finished = 0;
175

176
static void sig_handler(int sig)
177
{
178 179 180
	if (sig == SIGCHLD)
		child_finished = 1;

181
	done = 1;
182 183 184
	signr = sig;
}

185
static void perf_record__sig_exit(int exit_status __maybe_unused, void *arg)
186
{
187
	struct perf_record *rec = arg;
188 189
	int status;

190
	if (rec->evlist->workload.pid > 0) {
191
		if (!child_finished)
192
			kill(rec->evlist->workload.pid, SIGTERM);
193 194 195

		wait(&status);
		if (WIFSIGNALED(status))
196
			psignal(WTERMSIG(status), rec->progname);
197
	}
198

199
	if (signr == -1 || signr == SIGUSR1)
200 201 202 203
		return;

	signal(signr, SIG_DFL);
	kill(getpid(), signr);
204 205
}

206 207 208 209 210 211 212 213
static bool perf_evlist__equal(struct perf_evlist *evlist,
			       struct perf_evlist *other)
{
	struct perf_evsel *pos, *pair;

	if (evlist->nr_entries != other->nr_entries)
		return false;

214
	pair = perf_evlist__first(other);
215 216 217 218

	list_for_each_entry(pos, &evlist->entries, node) {
		if (memcmp(&pos->attr, &pair->attr, sizeof(pos->attr) != 0))
			return false;
219
		pair = perf_evsel__next(pair);
220 221 222 223 224
	}

	return true;
}

225
static int perf_record__open(struct perf_record *rec)
226
{
227
	char msg[128];
228
	struct perf_evsel *pos;
229 230 231
	struct perf_evlist *evlist = rec->evlist;
	struct perf_session *session = rec->session;
	struct perf_record_opts *opts = &rec->opts;
232
	int rc = 0;
233

234
	perf_evlist__config(evlist, opts);
235

236 237 238
	list_for_each_entry(pos, &evlist->entries, node) {
		struct perf_event_attr *attr = &pos->attr;
try_again:
239
		if (perf_evsel__open(pos, evlist->cpus, evlist->threads) < 0) {
240 241
			int err = errno;

242
			if (err == EPERM || err == EACCES) {
243
				ui__error_paranoid();
244 245
				rc = -err;
				goto out;
246
			} else if (err ==  ENODEV && opts->target.cpu_list) {
247 248 249 250
				pr_err("No such device - did you specify"
				       " an out-of-range profile CPU?\n");
				rc = -err;
				goto out;
251
			}
252

253
			if (perf_evsel__fallback(pos, err, msg, sizeof(msg))) {
254
				if (verbose)
255
					ui__warning("%s\n", msg);
256 257
				goto try_again;
			}
258 259

			if (err == ENOENT) {
260
				ui__error("The %s event is not supported.\n",
261
					  perf_evsel__name(pos));
262 263
				rc = -err;
				goto out;
264 265 266 267 268
			} else if ((err == EOPNOTSUPP) && (attr->precise_ip)) {
				ui__error("\'precise\' request may not be supported. "
					  "Try removing 'p' modifier\n");
				rc = -err;
				goto out;
269 270
			}

271
			printf("\n");
272 273 274 275
			error("sys_perf_event_open() syscall returned with %d "
			      "(%s) for event %s. /bin/dmesg may provide "
			      "additional information.\n",
			      err, strerror(err), perf_evsel__name(pos));
276 277

#if defined(__i386__) || defined(__x86_64__)
278 279 280 281 282 283 284 285 286
			if (attr->type == PERF_TYPE_HARDWARE &&
			    err == EOPNOTSUPP) {
				pr_err("No hardware sampling interrupt available."
				       " No APIC? If so then you can boot the kernel"
				       " with the \"lapic\" boot parameter to"
				       " force-enable it.\n");
				rc = -err;
				goto out;
			}
287 288
#endif

289 290 291
			pr_err("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
			rc = -err;
			goto out;
L
Li Zefan 已提交
292 293
		}
	}
294

295
	if (perf_evlist__apply_filters(evlist)) {
296 297
		error("failed to set filter with %d (%s)\n", errno,
			strerror(errno));
298 299
		rc = -1;
		goto out;
300 301
	}

302
	if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) {
303 304 305 306 307 308 309
		if (errno == EPERM) {
			pr_err("Permission error mapping pages.\n"
			       "Consider increasing "
			       "/proc/sys/kernel/perf_event_mlock_kb,\n"
			       "or try again with a smaller value of -m/--mmap_pages.\n"
			       "(current value: %d)\n", opts->mmap_pages);
			rc = -errno;
310 311
		} else if (!is_power_of_2(opts->mmap_pages) &&
			   (opts->mmap_pages != UINT_MAX)) {
312 313 314 315 316 317 318
			pr_err("--mmap_pages/-m value must be a power of two.");
			rc = -EINVAL;
		} else {
			pr_err("failed to mmap with %d (%s)\n", errno, strerror(errno));
			rc = -errno;
		}
		goto out;
319
	}
320

321
	if (rec->file_new)
322 323 324 325
		session->evlist = evlist;
	else {
		if (!perf_evlist__equal(session->evlist, evlist)) {
			fprintf(stderr, "incompatible append\n");
326 327
			rc = -1;
			goto out;
328 329 330
		}
 	}

331
	perf_session__set_id_hdr_size(session);
332 333
out:
	return rc;
334 335
}

336
static int process_buildids(struct perf_record *rec)
337
{
338
	u64 size = lseek(rec->output, 0, SEEK_CUR);
339

340 341 342
	if (size == 0)
		return 0;

343 344 345
	rec->session->fd = rec->output;
	return __perf_session__process_events(rec->session, rec->post_processing_offset,
					      size - rec->post_processing_offset,
346 347 348
					      size, &build_id__mark_dso_hit_ops);
}

349
static void perf_record__exit(int status, void *arg)
350
{
351 352
	struct perf_record *rec = arg;

353 354 355
	if (status != 0)
		return;

356 357 358 359 360 361 362 363 364
	if (!rec->opts.pipe_output) {
		rec->session->header.data_size += rec->bytes_written;

		if (!rec->no_buildid)
			process_buildids(rec);
		perf_session__write_header(rec->session, rec->evlist,
					   rec->output, true);
		perf_session__delete(rec->session);
		perf_evlist__delete(rec->evlist);
365
		symbol__exit();
366
	}
367 368
}

369
static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
370 371
{
	int err;
372
	struct perf_tool *tool = data;
373

374
	if (machine__is_host(machine))
375 376 377 378 379 380 381 382 383 384
		return;

	/*
	 *As for guest kernel when processing subcommand record&report,
	 *we arrange module mmap prior to guest kernel mmap and trigger
	 *a preload dso because default guest module symbols are loaded
	 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
	 *method is used to avoid symbol missing when the first addr is
	 *in module instead of in guest kernel.
	 */
385
	err = perf_event__synthesize_modules(tool, process_synthesized_event,
386
					     machine);
387 388
	if (err < 0)
		pr_err("Couldn't record guest kernel [%d]'s reference"
389
		       " relocation symbol.\n", machine->pid);
390 391 392 393 394

	/*
	 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
	 * have no _text sometimes.
	 */
395
	err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
396
						 machine, "_text");
397
	if (err < 0)
398
		err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
399
							 machine, "_stext");
400 401
	if (err < 0)
		pr_err("Couldn't record guest kernel [%d]'s reference"
402
		       " relocation symbol.\n", machine->pid);
403 404
}

405 406 407 408 409
static struct perf_event_header finished_round_event = {
	.size = sizeof(struct perf_event_header),
	.type = PERF_RECORD_FINISHED_ROUND,
};

410
static int perf_record__mmap_read_all(struct perf_record *rec)
411
{
412
	int i;
413
	int rc = 0;
414

415
	for (i = 0; i < rec->evlist->nr_mmaps; i++) {
416 417 418 419 420 421
		if (rec->evlist->mmap[i].base) {
			if (perf_record__mmap_read(rec, &rec->evlist->mmap[i]) != 0) {
				rc = -1;
				goto out;
			}
		}
422 423
	}

424
	if (perf_header__has_feat(&rec->session->header, HEADER_TRACING_DATA))
425 426 427 428 429
		rc = write_output(rec, &finished_round_event,
				  sizeof(finished_round_event));

out:
	return rc;
430 431
}

432
static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
433
{
I
Ingo Molnar 已提交
434 435
	struct stat st;
	int flags;
436
	int err, output, feat;
437
	unsigned long waking = 0;
438
	const bool forks = argc > 0;
439
	struct machine *machine;
440
	struct perf_tool *tool = &rec->tool;
441 442 443 444
	struct perf_record_opts *opts = &rec->opts;
	struct perf_evlist *evsel_list = rec->evlist;
	const char *output_name = rec->output_name;
	struct perf_session *session;
445
	bool disabled = false;
446

447
	rec->progname = argv[0];
448

449
	rec->page_size = sysconf(_SC_PAGE_SIZE);
450

451
	on_exit(perf_record__sig_exit, rec);
452 453
	signal(SIGCHLD, sig_handler);
	signal(SIGINT, sig_handler);
454
	signal(SIGUSR1, sig_handler);
455

456 457
	if (!output_name) {
		if (!fstat(STDOUT_FILENO, &st) && S_ISFIFO(st.st_mode))
458
			opts->pipe_output = true;
459
		else
460
			rec->output_name = output_name = "perf.data";
461 462 463
	}
	if (output_name) {
		if (!strcmp(output_name, "-"))
464
			opts->pipe_output = true;
465
		else if (!stat(output_name, &st) && st.st_size) {
466
			if (rec->write_mode == WRITE_FORCE) {
467 468 469 470 471 472
				char oldname[PATH_MAX];
				snprintf(oldname, sizeof(oldname), "%s.old",
					 output_name);
				unlink(oldname);
				rename(output_name, oldname);
			}
473 474
		} else if (rec->write_mode == WRITE_APPEND) {
			rec->write_mode = WRITE_FORCE;
475
		}
476 477
	}

478
	flags = O_CREAT|O_RDWR;
479 480
	if (rec->write_mode == WRITE_APPEND)
		rec->file_new = 0;
I
Ingo Molnar 已提交
481 482 483
	else
		flags |= O_TRUNC;

484
	if (opts->pipe_output)
485 486 487
		output = STDOUT_FILENO;
	else
		output = open(output_name, flags, S_IRUSR | S_IWUSR);
488 489
	if (output < 0) {
		perror("failed to create output file");
490
		return -1;
491 492
	}

493 494
	rec->output = output;

495
	session = perf_session__new(output_name, O_WRONLY,
496
				    rec->write_mode == WRITE_FORCE, false, NULL);
497
	if (session == NULL) {
498 499 500 501
		pr_err("Not enough memory for reading perf file header\n");
		return -1;
	}

502 503
	rec->session = session;

504 505 506 507 508 509 510
	for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
		perf_header__set_feat(&session->header, feat);

	if (rec->no_buildid)
		perf_header__clear_feat(&session->header, HEADER_BUILD_ID);

	if (!have_tracepoints(&evsel_list->entries))
511
		perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
512

513 514 515
	if (!rec->opts.branch_stack)
		perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);

516
	if (!rec->file_new) {
517
		err = perf_session__read_header(session, output);
518
		if (err < 0)
519
			goto out_delete_session;
520 521
	}

522
	if (forks) {
523
		err = perf_evlist__prepare_workload(evsel_list, opts, argv);
524 525 526
		if (err < 0) {
			pr_err("Couldn't run the workload!\n");
			goto out_delete_session;
527 528 529
		}
	}

530 531 532 533
	if (perf_record__open(rec) != 0) {
		err = -1;
		goto out_delete_session;
	}
534

535
	/*
536
	 * perf_session__delete(session) will be called at perf_record__exit()
537
	 */
538
	on_exit(perf_record__exit, rec);
539

540
	if (opts->pipe_output) {
541 542
		err = perf_header__write_pipe(output);
		if (err < 0)
543
			goto out_delete_session;
544
	} else if (rec->file_new) {
545 546
		err = perf_session__write_header(session, evsel_list,
						 output, false);
547
		if (err < 0)
548
			goto out_delete_session;
549 550
	}

551
	if (!rec->no_buildid
552
	    && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
553
		pr_err("Couldn't generate buildids. "
554
		       "Use --no-buildid to profile anyway.\n");
555 556
		err = -1;
		goto out_delete_session;
557 558
	}

559
	rec->post_processing_offset = lseek(output, 0, SEEK_CUR);
560

561 562 563
	machine = perf_session__find_host_machine(session);
	if (!machine) {
		pr_err("Couldn't find native kernel information.\n");
564 565
		err = -1;
		goto out_delete_session;
566 567
	}

568
	if (opts->pipe_output) {
569
		err = perf_event__synthesize_attrs(tool, session,
570
						   process_synthesized_event);
571 572
		if (err < 0) {
			pr_err("Couldn't synthesize attrs.\n");
573
			goto out_delete_session;
574
		}
575

576
		err = perf_event__synthesize_event_types(tool, process_synthesized_event,
577
							 machine);
578 579
		if (err < 0) {
			pr_err("Couldn't synthesize event_types.\n");
580
			goto out_delete_session;
581
		}
582

583
		if (have_tracepoints(&evsel_list->entries)) {
584 585 586 587 588 589 590 591
			/*
			 * FIXME err <= 0 here actually means that
			 * there were no tracepoints so its not really
			 * an error, just that we don't need to
			 * synthesize anything.  We really have to
			 * return this more properly and also
			 * propagate errors that now are calling die()
			 */
592
			err = perf_event__synthesize_tracing_data(tool, output, evsel_list,
593
								  process_synthesized_event);
594 595
			if (err <= 0) {
				pr_err("Couldn't record tracing data.\n");
596
				goto out_delete_session;
597
			}
598
			advance_output(rec, err);
599
		}
600 601
	}

602
	err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
603
						 machine, "_text");
604
	if (err < 0)
605
		err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
606
							 machine, "_stext");
607 608 609 610
	if (err < 0)
		pr_err("Couldn't record kernel reference relocation symbol\n"
		       "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
		       "Check /proc/kallsyms permission or run as root.\n");
611

612
	err = perf_event__synthesize_modules(tool, process_synthesized_event,
613
					     machine);
614 615 616 617 618
	if (err < 0)
		pr_err("Couldn't record kernel module information.\n"
		       "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
		       "Check /proc/modules permission or run as root.\n");

619
	if (perf_guest)
620
		perf_session__process_machines(session, tool,
621
					       perf_event__synthesize_guest_os);
622

623
	if (!opts->target.system_wide)
624
		err = perf_event__synthesize_thread_map(tool, evsel_list->threads,
625
						  process_synthesized_event,
626
						  machine);
627
	else
628
		err = perf_event__synthesize_threads(tool, process_synthesized_event,
629
					       machine);
630

631 632 633
	if (err != 0)
		goto out_delete_session;

634
	if (rec->realtime_prio) {
635 636
		struct sched_param param;

637
		param.sched_priority = rec->realtime_prio;
638
		if (sched_setscheduler(0, SCHED_FIFO, &param)) {
639
			pr_err("Could not set realtime priority.\n");
640 641
			err = -1;
			goto out_delete_session;
642 643 644
		}
	}

645 646 647 648 649 650 651
	/*
	 * When perf is starting the traced process, all the events
	 * (apart from group members) have enable_on_exec=1 set,
	 * so don't spoil it by prematurely enabling them.
	 */
	if (!perf_target__none(&opts->target))
		perf_evlist__enable(evsel_list);
652

653 654 655
	/*
	 * Let the child rip
	 */
656
	if (forks)
657
		perf_evlist__start_workload(evsel_list);
658

659
	for (;;) {
660
		int hits = rec->samples;
661

662 663 664 665
		if (perf_record__mmap_read_all(rec) < 0) {
			err = -1;
			goto out_delete_session;
		}
666

667
		if (hits == rec->samples) {
668 669
			if (done)
				break;
670
			err = poll(evsel_list->pollfd, evsel_list->nr_fds, -1);
671 672 673
			waking++;
		}

674 675 676 677 678
		/*
		 * When perf is starting the traced process, at the end events
		 * die with the process and we wait for that. Thus no need to
		 * disable events in this case.
		 */
679
		if (done && !disabled && !perf_target__none(&opts->target)) {
680
			perf_evlist__disable(evsel_list);
681 682
			disabled = true;
		}
683 684
	}

685
	if (quiet || signr == SIGUSR1)
686 687
		return 0;

688 689
	fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);

690 691 692 693
	/*
	 * Approximate RIP event size: 24 bytes.
	 */
	fprintf(stderr,
694
		"[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n",
695
		(double)rec->bytes_written / 1024.0 / 1024.0,
696
		output_name,
697
		rec->bytes_written / 24);
698

699
	return 0;
700 701 702 703

out_delete_session:
	perf_session__delete(session);
	return err;
704
}
705

706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727
#define BRANCH_OPT(n, m) \
	{ .name = n, .mode = (m) }

#define BRANCH_END { .name = NULL }

struct branch_mode {
	const char *name;
	int mode;
};

static const struct branch_mode branch_modes[] = {
	BRANCH_OPT("u", PERF_SAMPLE_BRANCH_USER),
	BRANCH_OPT("k", PERF_SAMPLE_BRANCH_KERNEL),
	BRANCH_OPT("hv", PERF_SAMPLE_BRANCH_HV),
	BRANCH_OPT("any", PERF_SAMPLE_BRANCH_ANY),
	BRANCH_OPT("any_call", PERF_SAMPLE_BRANCH_ANY_CALL),
	BRANCH_OPT("any_ret", PERF_SAMPLE_BRANCH_ANY_RETURN),
	BRANCH_OPT("ind_call", PERF_SAMPLE_BRANCH_IND_CALL),
	BRANCH_END
};

static int
728
parse_branch_stack(const struct option *opt, const char *str, int unset)
729 730 731 732 733 734 735 736
{
#define ONLY_PLM \
	(PERF_SAMPLE_BRANCH_USER	|\
	 PERF_SAMPLE_BRANCH_KERNEL	|\
	 PERF_SAMPLE_BRANCH_HV)

	uint64_t *mode = (uint64_t *)opt->value;
	const struct branch_mode *br;
737
	char *s, *os = NULL, *p;
738 739
	int ret = -1;

740 741
	if (unset)
		return 0;
742

743 744 745 746
	/*
	 * cannot set it twice, -b + --branch-filter for instance
	 */
	if (*mode)
747 748
		return -1;

749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769
	/* str may be NULL in case no arg is passed to -b */
	if (str) {
		/* because str is read-only */
		s = os = strdup(str);
		if (!s)
			return -1;

		for (;;) {
			p = strchr(s, ',');
			if (p)
				*p = '\0';

			for (br = branch_modes; br->name; br++) {
				if (!strcasecmp(s, br->name))
					break;
			}
			if (!br->name) {
				ui__warning("unknown branch filter %s,"
					    " check man page\n", s);
				goto error;
			}
770

771
			*mode |= br->mode;
772

773 774
			if (!p)
				break;
775

776 777
			s = p + 1;
		}
778 779 780
	}
	ret = 0;

781
	/* default to any branch */
782
	if ((*mode & ~ONLY_PLM) == 0) {
783
		*mode = PERF_SAMPLE_BRANCH_ANY;
784 785 786 787 788 789
	}
error:
	free(os);
	return ret;
}

790
#ifdef LIBUNWIND_SUPPORT
791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815
static int get_stack_size(char *str, unsigned long *_size)
{
	char *endptr;
	unsigned long size;
	unsigned long max_size = round_down(USHRT_MAX, sizeof(u64));

	size = strtoul(str, &endptr, 0);

	do {
		if (*endptr)
			break;

		size = round_up(size, sizeof(u64));
		if (!size || size > max_size)
			break;

		*_size = size;
		return 0;

	} while (0);

	pr_err("callchain: Incorrect stack dump size (max %ld): %s\n",
	       max_size, str);
	return -1;
}
816
#endif /* LIBUNWIND_SUPPORT */
817

818 819
int record_parse_callchain_opt(const struct option *opt,
			       const char *arg, int unset)
820
{
821
	struct perf_record_opts *opts = opt->value;
822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846
	char *tok, *name, *saveptr = NULL;
	char *buf;
	int ret = -1;

	/* --no-call-graph */
	if (unset)
		return 0;

	/* We specified default option if none is provided. */
	BUG_ON(!arg);

	/* We need buffer that we know we can write to. */
	buf = malloc(strlen(arg) + 1);
	if (!buf)
		return -ENOMEM;

	strcpy(buf, arg);

	tok = strtok_r((char *)buf, ",", &saveptr);
	name = tok ? : (char *)buf;

	do {
		/* Framepointer style */
		if (!strncmp(name, "fp", sizeof("fp"))) {
			if (!strtok_r(NULL, ",", &saveptr)) {
847
				opts->call_graph = CALLCHAIN_FP;
848 849 850 851 852 853
				ret = 0;
			} else
				pr_err("callchain: No more arguments "
				       "needed for -g fp\n");
			break;

854
#ifdef LIBUNWIND_SUPPORT
855 856
		/* Dwarf style */
		} else if (!strncmp(name, "dwarf", sizeof("dwarf"))) {
857 858
			const unsigned long default_stack_dump_size = 8192;

859
			ret = 0;
860 861
			opts->call_graph = CALLCHAIN_DWARF;
			opts->stack_dump_size = default_stack_dump_size;
862 863 864 865 866 867

			tok = strtok_r(NULL, ",", &saveptr);
			if (tok) {
				unsigned long size = 0;

				ret = get_stack_size(tok, &size);
868
				opts->stack_dump_size = size;
869 870 871 872
			}

			if (!ret)
				pr_debug("callchain: stack dump size %d\n",
873
					 opts->stack_dump_size);
874
#endif /* LIBUNWIND_SUPPORT */
875 876 877 878 879 880 881 882 883 884 885
		} else {
			pr_err("callchain: Unknown -g option "
			       "value: %s\n", arg);
			break;
		}

	} while (0);

	free(buf);

	if (!ret)
886
		pr_debug("callchain: type %d\n", opts->call_graph);
887 888 889 890

	return ret;
}

891
static const char * const record_usage[] = {
892 893
	"perf record [<options>] [<command>]",
	"perf record [<options>] -- <command> [<options>]",
894 895 896
	NULL
};

897 898 899 900 901 902 903 904 905 906 907 908 909 910 911
/*
 * XXX Ideally would be local to cmd_record() and passed to a perf_record__new
 * because we need to have access to it in perf_record__exit, that is called
 * after cmd_record() exits, but since record_options need to be accessible to
 * builtin-script, leave it here.
 *
 * At least we don't ouch it in all the other functions here directly.
 *
 * Just say no to tons of global variables, sigh.
 */
static struct perf_record record = {
	.opts = {
		.mmap_pages	     = UINT_MAX,
		.user_freq	     = UINT_MAX,
		.user_interval	     = ULLONG_MAX,
912
		.freq		     = 4000,
N
Namhyung Kim 已提交
913 914 915
		.target		     = {
			.uses_mmap   = true,
		},
916 917 918 919
	},
	.write_mode = WRITE_FORCE,
	.file_new   = true,
};
920

921 922 923
#define CALLCHAIN_HELP "do call-graph (stack chain/backtrace) recording: "

#ifdef LIBUNWIND_SUPPORT
924
const char record_callchain_help[] = CALLCHAIN_HELP "[fp] dwarf";
925
#else
926
const char record_callchain_help[] = CALLCHAIN_HELP "[fp]";
927 928
#endif

929 930 931 932 933 934 935
/*
 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
 * with it and switch to use the library functions in perf_evlist that came
 * from builtin-record.c, i.e. use perf_record_opts,
 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
 * using pipes, etc.
 */
936
const struct option record_options[] = {
937
	OPT_CALLBACK('e', "event", &record.evlist, "event",
938
		     "event selector. use 'perf list' to list available events",
939
		     parse_events_option),
940
	OPT_CALLBACK(0, "filter", &record.evlist, "filter",
L
Li Zefan 已提交
941
		     "event filter", parse_filter),
942
	OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
943
		    "record events on existing process id"),
944
	OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
945
		    "record events on existing thread id"),
946
	OPT_INTEGER('r', "realtime", &record.realtime_prio,
947
		    "collect data with this RT SCHED_FIFO priority"),
948
	OPT_BOOLEAN('D', "no-delay", &record.opts.no_delay,
949
		    "collect data without buffering"),
950
	OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
951
		    "collect raw sample records from all opened counters"),
952
	OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
953
			    "system-wide collection from all CPUs"),
954
	OPT_BOOLEAN('A', "append", &record.append_file,
I
Ingo Molnar 已提交
955
			    "append to the output file to do incremental profiling"),
956
	OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
957
		    "list of cpus to monitor"),
958
	OPT_BOOLEAN('f', "force", &record.force,
959
			"overwrite existing data file (deprecated)"),
960 961
	OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
	OPT_STRING('o', "output", &record.output_name, "file",
I
Ingo Molnar 已提交
962
		    "output file name"),
963
	OPT_BOOLEAN('i', "no-inherit", &record.opts.no_inherit,
964
		    "child tasks do not inherit counters"),
965 966
	OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
	OPT_UINTEGER('m', "mmap-pages", &record.opts.mmap_pages,
967
		     "number of mmap data pages"),
968
	OPT_BOOLEAN(0, "group", &record.opts.group,
969
		    "put the counters into a counter group"),
970
	OPT_CALLBACK_DEFAULT('g', "call-graph", &record.opts,
971 972
			     "mode[,dump_size]", record_callchain_help,
			     &record_parse_callchain_opt, "fp"),
973
	OPT_INCR('v', "verbose", &verbose,
974
		    "be more verbose (show counter open errors, etc)"),
975
	OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
976
	OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
977
		    "per thread counts"),
978
	OPT_BOOLEAN('d', "data", &record.opts.sample_address,
979
		    "Sample addresses"),
980
	OPT_BOOLEAN('T', "timestamp", &record.opts.sample_time, "Sample timestamps"),
981
	OPT_BOOLEAN('P', "period", &record.opts.period, "Sample period"),
982
	OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
983
		    "don't sample"),
984
	OPT_BOOLEAN('N', "no-buildid-cache", &record.no_buildid_cache,
985
		    "do not update the buildid cache"),
986
	OPT_BOOLEAN('B', "no-buildid", &record.no_buildid,
987
		    "do not collect buildids in perf.data"),
988
	OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
S
Stephane Eranian 已提交
989 990
		     "monitor event in cgroup name only",
		     parse_cgroups),
991 992
	OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
		   "user to profile"),
993 994 995 996 997 998 999

	OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
		     "branch any", "sample any taken branches",
		     parse_branch_stack),

	OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
		     "branch filter mask", "branch stack filter modes",
1000
		     parse_branch_stack),
1001 1002 1003
	OPT_END()
};

1004
int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
1005
{
1006 1007
	int err = -ENOMEM;
	struct perf_evsel *pos;
1008 1009
	struct perf_evlist *evsel_list;
	struct perf_record *rec = &record;
1010
	char errbuf[BUFSIZ];
1011

1012
	evsel_list = perf_evlist__new(NULL, NULL);
1013 1014 1015
	if (evsel_list == NULL)
		return -ENOMEM;

1016 1017
	rec->evlist = evsel_list;

1018
	argc = parse_options(argc, argv, record_options, record_usage,
1019
			    PARSE_OPT_STOP_AT_NON_OPTION);
1020
	if (!argc && perf_target__none(&rec->opts.target))
1021
		usage_with_options(record_usage, record_options);
1022

1023
	if (rec->force && rec->append_file) {
1024 1025
		ui__error("Can't overwrite and append at the same time."
			  " You need to choose between -f and -A");
1026
		usage_with_options(record_usage, record_options);
1027 1028
	} else if (rec->append_file) {
		rec->write_mode = WRITE_APPEND;
1029
	} else {
1030
		rec->write_mode = WRITE_FORCE;
1031 1032
	}

1033
	if (nr_cgroups && !rec->opts.target.system_wide) {
1034 1035
		ui__error("cgroup monitoring only available in"
			  " system-wide mode\n");
S
Stephane Eranian 已提交
1036 1037 1038
		usage_with_options(record_usage, record_options);
	}

1039
	symbol__init();
1040

1041
	if (symbol_conf.kptr_restrict)
1042 1043 1044 1045 1046 1047 1048 1049
		pr_warning(
"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
"check /proc/sys/kernel/kptr_restrict.\n\n"
"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
"file is not found in the buildid cache or in the vmlinux path.\n\n"
"Samples in kernel modules won't be resolved at all.\n\n"
"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
"even with a suitable vmlinux or kallsyms file.\n\n");
1050

1051
	if (rec->no_buildid_cache || rec->no_buildid)
1052
		disable_buildid_cache();
1053

1054 1055
	if (evsel_list->nr_entries == 0 &&
	    perf_evlist__add_default(evsel_list) < 0) {
1056 1057
		pr_err("Not enough memory for event selector list\n");
		goto out_symbol_exit;
1058
	}
1059

1060 1061 1062 1063 1064 1065 1066 1067 1068
	err = perf_target__validate(&rec->opts.target);
	if (err) {
		perf_target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
		ui__warning("%s", errbuf);
	}

	err = perf_target__parse_uid(&rec->opts.target);
	if (err) {
		int saved_errno = errno;
1069

1070
		perf_target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
1071
		ui__error("%s", errbuf);
1072 1073

		err = -saved_errno;
1074
		goto out_free_fd;
1075
	}
1076

1077
	err = -ENOMEM;
1078
	if (perf_evlist__create_maps(evsel_list, &rec->opts.target) < 0)
1079
		usage_with_options(record_usage, record_options);
1080

1081
	list_for_each_entry(pos, &evsel_list->entries, node) {
1082
		if (perf_header__push_event(pos->attr.config, perf_evsel__name(pos)))
1083
			goto out_free_fd;
1084
	}
1085

1086 1087 1088 1089
	if (rec->opts.user_interval != ULLONG_MAX)
		rec->opts.default_interval = rec->opts.user_interval;
	if (rec->opts.user_freq != UINT_MAX)
		rec->opts.freq = rec->opts.user_freq;
1090

1091 1092 1093
	/*
	 * User specified count overrides default frequency.
	 */
1094 1095 1096 1097
	if (rec->opts.default_interval)
		rec->opts.freq = 0;
	else if (rec->opts.freq) {
		rec->opts.default_interval = rec->opts.freq;
1098
	} else {
1099
		ui__error("frequency and count are zero, aborting\n");
1100
		err = -EINVAL;
1101
		goto out_free_fd;
1102 1103
	}

1104
	err = __cmd_record(&record, argc, argv);
1105
out_free_fd:
1106
	perf_evlist__delete_maps(evsel_list);
1107 1108
out_symbol_exit:
	symbol__exit();
1109
	return err;
1110
}