builtin-record.c 28.3 KB
Newer Older
I
Ingo Molnar 已提交
1
/*
2 3 4 5 6
 * builtin-record.c
 *
 * Builtin record command: Record the profile of a workload
 * (or a CPU, or a PID) into the perf.data output file - for
 * later analysis via perf report.
I
Ingo Molnar 已提交
7
 */
8 9
#define _FILE_OFFSET_BITS 64

10
#include "builtin.h"
11 12 13

#include "perf.h"

14
#include "util/build-id.h"
15
#include "util/util.h"
16
#include "util/parse-options.h"
17
#include "util/parse-events.h"
18

19
#include "util/header.h"
20
#include "util/event.h"
21
#include "util/evlist.h"
22
#include "util/evsel.h"
23
#include "util/debug.h"
24
#include "util/session.h"
25
#include "util/tool.h"
26
#include "util/symbol.h"
27
#include "util/cpumap.h"
28
#include "util/thread_map.h"
29

30
#include <unistd.h>
31
#include <sched.h>
32
#include <sys/mman.h>
33

34 35
#define CALLCHAIN_HELP "do call-graph (stack chain/backtrace) recording: "

36
#ifdef LIBUNWIND_SUPPORT
37 38
static unsigned long default_stack_dump_size = 8192;
static char callchain_help[] = CALLCHAIN_HELP "[fp] dwarf";
39 40
#else
static char callchain_help[] = CALLCHAIN_HELP "[fp]";
41 42
#endif

43 44 45 46 47
enum write_mode_t {
	WRITE_FORCE,
	WRITE_APPEND
};

48
struct perf_record {
49
	struct perf_tool	tool;
50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
	struct perf_record_opts	opts;
	u64			bytes_written;
	const char		*output_name;
	struct perf_evlist	*evlist;
	struct perf_session	*session;
	const char		*progname;
	int			output;
	unsigned int		page_size;
	int			realtime_prio;
	enum write_mode_t	write_mode;
	bool			no_buildid;
	bool			no_buildid_cache;
	bool			force;
	bool			file_new;
	bool			append_file;
	long			samples;
	off_t			post_processing_offset;
67
};
68

69
static void advance_output(struct perf_record *rec, size_t size)
70
{
71
	rec->bytes_written += size;
72 73
}

74
static int write_output(struct perf_record *rec, void *buf, size_t size)
75 76
{
	while (size) {
77
		int ret = write(rec->output, buf, size);
78

79 80 81 82
		if (ret < 0) {
			pr_err("failed to write\n");
			return -1;
		}
83 84 85 86

		size -= ret;
		buf += ret;

87
		rec->bytes_written += ret;
88
	}
89 90

	return 0;
91 92
}

93
static int process_synthesized_event(struct perf_tool *tool,
94
				     union perf_event *event,
95 96
				     struct perf_sample *sample __maybe_unused,
				     struct machine *machine __maybe_unused)
97
{
98
	struct perf_record *rec = container_of(tool, struct perf_record, tool);
99 100 101
	if (write_output(rec, event, event->header.size) < 0)
		return -1;

102 103 104
	return 0;
}

105
static int perf_record__mmap_read(struct perf_record *rec,
106
				   struct perf_mmap *md)
107
{
108
	unsigned int head = perf_mmap__read_head(md);
109
	unsigned int old = md->prev;
110
	unsigned char *data = md->base + rec->page_size;
111 112
	unsigned long size;
	void *buf;
113
	int rc = 0;
114

115
	if (old == head)
116
		return 0;
117

118
	rec->samples++;
119 120 121 122 123 124 125

	size = head - old;

	if ((old & md->mask) + size != (head & md->mask)) {
		buf = &data[old & md->mask];
		size = md->mask + 1 - (old & md->mask);
		old += size;
126

127 128 129 130
		if (write_output(rec, buf, size) < 0) {
			rc = -1;
			goto out;
		}
131 132 133 134 135
	}

	buf = &data[old & md->mask];
	size = head - old;
	old += size;
136

137 138 139 140
	if (write_output(rec, buf, size) < 0) {
		rc = -1;
		goto out;
	}
141 142

	md->prev = old;
143
	perf_mmap__write_tail(md, old);
144 145 146

out:
	return rc;
147 148 149
}

static volatile int done = 0;
150
static volatile int signr = -1;
151
static volatile int child_finished = 0;
152

153
static void sig_handler(int sig)
154
{
155 156 157
	if (sig == SIGCHLD)
		child_finished = 1;

158
	done = 1;
159 160 161
	signr = sig;
}

162
static void perf_record__sig_exit(int exit_status __maybe_unused, void *arg)
163
{
164
	struct perf_record *rec = arg;
165 166
	int status;

167
	if (rec->evlist->workload.pid > 0) {
168
		if (!child_finished)
169
			kill(rec->evlist->workload.pid, SIGTERM);
170 171 172

		wait(&status);
		if (WIFSIGNALED(status))
173
			psignal(WTERMSIG(status), rec->progname);
174
	}
175

176
	if (signr == -1 || signr == SIGUSR1)
177 178 179 180
		return;

	signal(signr, SIG_DFL);
	kill(getpid(), signr);
181 182
}

183 184 185 186 187 188 189 190
static bool perf_evlist__equal(struct perf_evlist *evlist,
			       struct perf_evlist *other)
{
	struct perf_evsel *pos, *pair;

	if (evlist->nr_entries != other->nr_entries)
		return false;

191
	pair = perf_evlist__first(other);
192 193 194 195

	list_for_each_entry(pos, &evlist->entries, node) {
		if (memcmp(&pos->attr, &pair->attr, sizeof(pos->attr) != 0))
			return false;
196
		pair = perf_evsel__next(pair);
197 198 199 200 201
	}

	return true;
}

202
static int perf_record__open(struct perf_record *rec)
203
{
204
	struct perf_evsel *pos;
205 206 207
	struct perf_evlist *evlist = rec->evlist;
	struct perf_session *session = rec->session;
	struct perf_record_opts *opts = &rec->opts;
208
	int rc = 0;
209

210
	perf_evlist__config_attrs(evlist, opts);
211

212
	if (opts->group)
213
		perf_evlist__set_leader(evlist);
214

215 216 217 218 219 220 221 222 223 224 225 226 227 228 229
	list_for_each_entry(pos, &evlist->entries, node) {
		struct perf_event_attr *attr = &pos->attr;
		/*
		 * Check if parse_single_tracepoint_event has already asked for
		 * PERF_SAMPLE_TIME.
		 *
		 * XXX this is kludgy but short term fix for problems introduced by
		 * eac23d1c that broke 'perf script' by having different sample_types
		 * when using multiple tracepoint events when we use a perf binary
		 * that tries to use sample_id_all on an older kernel.
		 *
		 * We need to move counter creation to perf_session, support
		 * different sample_types, etc.
		 */
		bool time_needed = attr->sample_type & PERF_SAMPLE_TIME;
230

231 232 233
fallback_missing_features:
		if (opts->exclude_guest_missing)
			attr->exclude_guest = attr->exclude_host = 0;
234
retry_sample_id:
235
		attr->sample_id_all = opts->sample_id_all_missing ? 0 : 1;
236
try_again:
237
		if (perf_evsel__open(pos, evlist->cpus, evlist->threads) < 0) {
238 239
			int err = errno;

240
			if (err == EPERM || err == EACCES) {
241
				ui__error_paranoid();
242 243
				rc = -err;
				goto out;
244
			} else if (err ==  ENODEV && opts->target.cpu_list) {
245 246 247 248
				pr_err("No such device - did you specify"
				       " an out-of-range profile CPU?\n");
				rc = -err;
				goto out;
249 250 251 252 253 254 255
			} else if (err == EINVAL) {
				if (!opts->exclude_guest_missing &&
				    (attr->exclude_guest || attr->exclude_host)) {
					pr_debug("Old kernel, cannot exclude "
						 "guest or host samples.\n");
					opts->exclude_guest_missing = true;
					goto fallback_missing_features;
256
				} else if (!opts->sample_id_all_missing) {
257 258 259
					/*
					 * Old kernel, no attr->sample_id_type_all field
					 */
260
					opts->sample_id_all_missing = true;
261 262 263 264 265
					if (!opts->sample_time && !opts->raw_samples && !time_needed)
						attr->sample_type &= ~PERF_SAMPLE_TIME;

					goto retry_sample_id;
				}
266
			}
267

268 269 270
			/*
			 * If it's cycles then fall back to hrtimer
			 * based cpu-clock-tick sw counter, which
271 272 273 274
			 * is always available even if no PMU support.
			 *
			 * PPC returns ENXIO until 2.6.37 (behavior changed
			 * with commit b0a873e).
275
			 */
276 277
			if ((err == ENOENT || err == ENXIO)
					&& attr->type == PERF_TYPE_HARDWARE
278 279 280
					&& attr->config == PERF_COUNT_HW_CPU_CYCLES) {

				if (verbose)
281 282
					ui__warning("The cycles event is not supported, "
						    "trying to fall back to cpu-clock-ticks\n");
283 284
				attr->type = PERF_TYPE_SOFTWARE;
				attr->config = PERF_COUNT_SW_CPU_CLOCK;
285 286 287 288
				if (pos->name) {
					free(pos->name);
					pos->name = NULL;
				}
289 290
				goto try_again;
			}
291 292

			if (err == ENOENT) {
293
				ui__error("The %s event is not supported.\n",
294
					  perf_evsel__name(pos));
295 296
				rc = -err;
				goto out;
297 298
			}

299
			printf("\n");
300 301 302 303
			error("sys_perf_event_open() syscall returned with %d "
			      "(%s) for event %s. /bin/dmesg may provide "
			      "additional information.\n",
			      err, strerror(err), perf_evsel__name(pos));
304 305

#if defined(__i386__) || defined(__x86_64__)
306 307 308 309 310 311 312 313 314
			if (attr->type == PERF_TYPE_HARDWARE &&
			    err == EOPNOTSUPP) {
				pr_err("No hardware sampling interrupt available."
				       " No APIC? If so then you can boot the kernel"
				       " with the \"lapic\" boot parameter to"
				       " force-enable it.\n");
				rc = -err;
				goto out;
			}
315 316
#endif

317 318 319
			pr_err("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
			rc = -err;
			goto out;
L
Li Zefan 已提交
320 321
		}
	}
322

323
	if (perf_evlist__apply_filters(evlist)) {
324 325
		error("failed to set filter with %d (%s)\n", errno,
			strerror(errno));
326 327
		rc = -1;
		goto out;
328 329
	}

330
	if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) {
331 332 333 334 335 336 337 338 339 340 341 342 343 344 345
		if (errno == EPERM) {
			pr_err("Permission error mapping pages.\n"
			       "Consider increasing "
			       "/proc/sys/kernel/perf_event_mlock_kb,\n"
			       "or try again with a smaller value of -m/--mmap_pages.\n"
			       "(current value: %d)\n", opts->mmap_pages);
			rc = -errno;
		} else if (!is_power_of_2(opts->mmap_pages)) {
			pr_err("--mmap_pages/-m value must be a power of two.");
			rc = -EINVAL;
		} else {
			pr_err("failed to mmap with %d (%s)\n", errno, strerror(errno));
			rc = -errno;
		}
		goto out;
346
	}
347

348
	if (rec->file_new)
349 350 351 352
		session->evlist = evlist;
	else {
		if (!perf_evlist__equal(session->evlist, evlist)) {
			fprintf(stderr, "incompatible append\n");
353 354
			rc = -1;
			goto out;
355 356 357
		}
 	}

358
	perf_session__set_id_hdr_size(session);
359 360
out:
	return rc;
361 362
}

363
static int process_buildids(struct perf_record *rec)
364
{
365
	u64 size = lseek(rec->output, 0, SEEK_CUR);
366

367 368 369
	if (size == 0)
		return 0;

370 371 372
	rec->session->fd = rec->output;
	return __perf_session__process_events(rec->session, rec->post_processing_offset,
					      size - rec->post_processing_offset,
373 374 375
					      size, &build_id__mark_dso_hit_ops);
}

376
static void perf_record__exit(int status, void *arg)
377
{
378 379
	struct perf_record *rec = arg;

380 381 382
	if (status != 0)
		return;

383 384 385 386 387 388 389 390 391
	if (!rec->opts.pipe_output) {
		rec->session->header.data_size += rec->bytes_written;

		if (!rec->no_buildid)
			process_buildids(rec);
		perf_session__write_header(rec->session, rec->evlist,
					   rec->output, true);
		perf_session__delete(rec->session);
		perf_evlist__delete(rec->evlist);
392
		symbol__exit();
393
	}
394 395
}

396
static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
397 398
{
	int err;
399
	struct perf_tool *tool = data;
400

401
	if (machine__is_host(machine))
402 403 404 405 406 407 408 409 410 411
		return;

	/*
	 *As for guest kernel when processing subcommand record&report,
	 *we arrange module mmap prior to guest kernel mmap and trigger
	 *a preload dso because default guest module symbols are loaded
	 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
	 *method is used to avoid symbol missing when the first addr is
	 *in module instead of in guest kernel.
	 */
412
	err = perf_event__synthesize_modules(tool, process_synthesized_event,
413
					     machine);
414 415
	if (err < 0)
		pr_err("Couldn't record guest kernel [%d]'s reference"
416
		       " relocation symbol.\n", machine->pid);
417 418 419 420 421

	/*
	 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
	 * have no _text sometimes.
	 */
422
	err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
423
						 machine, "_text");
424
	if (err < 0)
425
		err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
426
							 machine, "_stext");
427 428
	if (err < 0)
		pr_err("Couldn't record guest kernel [%d]'s reference"
429
		       " relocation symbol.\n", machine->pid);
430 431
}

432 433 434 435 436
static struct perf_event_header finished_round_event = {
	.size = sizeof(struct perf_event_header),
	.type = PERF_RECORD_FINISHED_ROUND,
};

437
static int perf_record__mmap_read_all(struct perf_record *rec)
438
{
439
	int i;
440
	int rc = 0;
441

442
	for (i = 0; i < rec->evlist->nr_mmaps; i++) {
443 444 445 446 447 448
		if (rec->evlist->mmap[i].base) {
			if (perf_record__mmap_read(rec, &rec->evlist->mmap[i]) != 0) {
				rc = -1;
				goto out;
			}
		}
449 450
	}

451
	if (perf_header__has_feat(&rec->session->header, HEADER_TRACING_DATA))
452 453 454 455 456
		rc = write_output(rec, &finished_round_event,
				  sizeof(finished_round_event));

out:
	return rc;
457 458
}

459
static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
460
{
I
Ingo Molnar 已提交
461 462
	struct stat st;
	int flags;
463
	int err, output, feat;
464
	unsigned long waking = 0;
465
	const bool forks = argc > 0;
466
	struct machine *machine;
467
	struct perf_tool *tool = &rec->tool;
468 469 470 471
	struct perf_record_opts *opts = &rec->opts;
	struct perf_evlist *evsel_list = rec->evlist;
	const char *output_name = rec->output_name;
	struct perf_session *session;
472

473
	rec->progname = argv[0];
474

475
	rec->page_size = sysconf(_SC_PAGE_SIZE);
476

477
	on_exit(perf_record__sig_exit, rec);
478 479
	signal(SIGCHLD, sig_handler);
	signal(SIGINT, sig_handler);
480
	signal(SIGUSR1, sig_handler);
481

482 483
	if (!output_name) {
		if (!fstat(STDOUT_FILENO, &st) && S_ISFIFO(st.st_mode))
484
			opts->pipe_output = true;
485
		else
486
			rec->output_name = output_name = "perf.data";
487 488 489
	}
	if (output_name) {
		if (!strcmp(output_name, "-"))
490
			opts->pipe_output = true;
491
		else if (!stat(output_name, &st) && st.st_size) {
492
			if (rec->write_mode == WRITE_FORCE) {
493 494 495 496 497 498
				char oldname[PATH_MAX];
				snprintf(oldname, sizeof(oldname), "%s.old",
					 output_name);
				unlink(oldname);
				rename(output_name, oldname);
			}
499 500
		} else if (rec->write_mode == WRITE_APPEND) {
			rec->write_mode = WRITE_FORCE;
501
		}
502 503
	}

504
	flags = O_CREAT|O_RDWR;
505 506
	if (rec->write_mode == WRITE_APPEND)
		rec->file_new = 0;
I
Ingo Molnar 已提交
507 508 509
	else
		flags |= O_TRUNC;

510
	if (opts->pipe_output)
511 512 513
		output = STDOUT_FILENO;
	else
		output = open(output_name, flags, S_IRUSR | S_IWUSR);
514 515
	if (output < 0) {
		perror("failed to create output file");
516
		return -1;
517 518
	}

519 520
	rec->output = output;

521
	session = perf_session__new(output_name, O_WRONLY,
522
				    rec->write_mode == WRITE_FORCE, false, NULL);
523
	if (session == NULL) {
524 525 526 527
		pr_err("Not enough memory for reading perf file header\n");
		return -1;
	}

528 529
	rec->session = session;

530 531 532 533 534 535 536
	for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
		perf_header__set_feat(&session->header, feat);

	if (rec->no_buildid)
		perf_header__clear_feat(&session->header, HEADER_BUILD_ID);

	if (!have_tracepoints(&evsel_list->entries))
537
		perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
538

539 540 541
	if (!rec->opts.branch_stack)
		perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);

542
	if (!rec->file_new) {
543
		err = perf_session__read_header(session, output);
544
		if (err < 0)
545
			goto out_delete_session;
546 547
	}

548
	if (forks) {
549
		err = perf_evlist__prepare_workload(evsel_list, opts, argv);
550 551 552
		if (err < 0) {
			pr_err("Couldn't run the workload!\n");
			goto out_delete_session;
553 554 555
		}
	}

556 557 558 559
	if (perf_record__open(rec) != 0) {
		err = -1;
		goto out_delete_session;
	}
560

561
	/*
562
	 * perf_session__delete(session) will be called at perf_record__exit()
563
	 */
564
	on_exit(perf_record__exit, rec);
565

566
	if (opts->pipe_output) {
567 568
		err = perf_header__write_pipe(output);
		if (err < 0)
569
			goto out_delete_session;
570
	} else if (rec->file_new) {
571 572
		err = perf_session__write_header(session, evsel_list,
						 output, false);
573
		if (err < 0)
574
			goto out_delete_session;
575 576
	}

577
	if (!rec->no_buildid
578
	    && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
579
		pr_err("Couldn't generate buildids. "
580
		       "Use --no-buildid to profile anyway.\n");
581 582
		err = -1;
		goto out_delete_session;
583 584
	}

585
	rec->post_processing_offset = lseek(output, 0, SEEK_CUR);
586

587 588 589
	machine = perf_session__find_host_machine(session);
	if (!machine) {
		pr_err("Couldn't find native kernel information.\n");
590 591
		err = -1;
		goto out_delete_session;
592 593
	}

594
	if (opts->pipe_output) {
595
		err = perf_event__synthesize_attrs(tool, session,
596
						   process_synthesized_event);
597 598
		if (err < 0) {
			pr_err("Couldn't synthesize attrs.\n");
599
			goto out_delete_session;
600
		}
601

602
		err = perf_event__synthesize_event_types(tool, process_synthesized_event,
603
							 machine);
604 605
		if (err < 0) {
			pr_err("Couldn't synthesize event_types.\n");
606
			goto out_delete_session;
607
		}
608

609
		if (have_tracepoints(&evsel_list->entries)) {
610 611 612 613 614 615 616 617
			/*
			 * FIXME err <= 0 here actually means that
			 * there were no tracepoints so its not really
			 * an error, just that we don't need to
			 * synthesize anything.  We really have to
			 * return this more properly and also
			 * propagate errors that now are calling die()
			 */
618
			err = perf_event__synthesize_tracing_data(tool, output, evsel_list,
619
								  process_synthesized_event);
620 621
			if (err <= 0) {
				pr_err("Couldn't record tracing data.\n");
622
				goto out_delete_session;
623
			}
624
			advance_output(rec, err);
625
		}
626 627
	}

628
	err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
629
						 machine, "_text");
630
	if (err < 0)
631
		err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
632
							 machine, "_stext");
633 634 635 636
	if (err < 0)
		pr_err("Couldn't record kernel reference relocation symbol\n"
		       "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
		       "Check /proc/kallsyms permission or run as root.\n");
637

638
	err = perf_event__synthesize_modules(tool, process_synthesized_event,
639
					     machine);
640 641 642 643 644
	if (err < 0)
		pr_err("Couldn't record kernel module information.\n"
		       "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
		       "Check /proc/modules permission or run as root.\n");

645
	if (perf_guest)
646
		perf_session__process_machines(session, tool,
647
					       perf_event__synthesize_guest_os);
648

649
	if (!opts->target.system_wide)
650
		err = perf_event__synthesize_thread_map(tool, evsel_list->threads,
651
						  process_synthesized_event,
652
						  machine);
653
	else
654
		err = perf_event__synthesize_threads(tool, process_synthesized_event,
655
					       machine);
656

657 658 659
	if (err != 0)
		goto out_delete_session;

660
	if (rec->realtime_prio) {
661 662
		struct sched_param param;

663
		param.sched_priority = rec->realtime_prio;
664
		if (sched_setscheduler(0, SCHED_FIFO, &param)) {
665
			pr_err("Could not set realtime priority.\n");
666 667
			err = -1;
			goto out_delete_session;
668 669 670
		}
	}

671 672
	perf_evlist__enable(evsel_list);

673 674 675
	/*
	 * Let the child rip
	 */
676
	if (forks)
677
		perf_evlist__start_workload(evsel_list);
678

679
	for (;;) {
680
		int hits = rec->samples;
681

682 683 684 685
		if (perf_record__mmap_read_all(rec) < 0) {
			err = -1;
			goto out_delete_session;
		}
686

687
		if (hits == rec->samples) {
688 689
			if (done)
				break;
690
			err = poll(evsel_list->pollfd, evsel_list->nr_fds, -1);
691 692 693
			waking++;
		}

694 695
		if (done)
			perf_evlist__disable(evsel_list);
696 697
	}

698
	if (quiet || signr == SIGUSR1)
699 700
		return 0;

701 702
	fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);

703 704 705 706
	/*
	 * Approximate RIP event size: 24 bytes.
	 */
	fprintf(stderr,
707
		"[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n",
708
		(double)rec->bytes_written / 1024.0 / 1024.0,
709
		output_name,
710
		rec->bytes_written / 24);
711

712
	return 0;
713 714 715 716

out_delete_session:
	perf_session__delete(session);
	return err;
717
}
718

719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740
#define BRANCH_OPT(n, m) \
	{ .name = n, .mode = (m) }

#define BRANCH_END { .name = NULL }

struct branch_mode {
	const char *name;
	int mode;
};

static const struct branch_mode branch_modes[] = {
	BRANCH_OPT("u", PERF_SAMPLE_BRANCH_USER),
	BRANCH_OPT("k", PERF_SAMPLE_BRANCH_KERNEL),
	BRANCH_OPT("hv", PERF_SAMPLE_BRANCH_HV),
	BRANCH_OPT("any", PERF_SAMPLE_BRANCH_ANY),
	BRANCH_OPT("any_call", PERF_SAMPLE_BRANCH_ANY_CALL),
	BRANCH_OPT("any_ret", PERF_SAMPLE_BRANCH_ANY_RETURN),
	BRANCH_OPT("ind_call", PERF_SAMPLE_BRANCH_IND_CALL),
	BRANCH_END
};

static int
741
parse_branch_stack(const struct option *opt, const char *str, int unset)
742 743 744 745 746 747 748 749
{
#define ONLY_PLM \
	(PERF_SAMPLE_BRANCH_USER	|\
	 PERF_SAMPLE_BRANCH_KERNEL	|\
	 PERF_SAMPLE_BRANCH_HV)

	uint64_t *mode = (uint64_t *)opt->value;
	const struct branch_mode *br;
750
	char *s, *os = NULL, *p;
751 752
	int ret = -1;

753 754
	if (unset)
		return 0;
755

756 757 758 759
	/*
	 * cannot set it twice, -b + --branch-filter for instance
	 */
	if (*mode)
760 761
		return -1;

762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782
	/* str may be NULL in case no arg is passed to -b */
	if (str) {
		/* because str is read-only */
		s = os = strdup(str);
		if (!s)
			return -1;

		for (;;) {
			p = strchr(s, ',');
			if (p)
				*p = '\0';

			for (br = branch_modes; br->name; br++) {
				if (!strcasecmp(s, br->name))
					break;
			}
			if (!br->name) {
				ui__warning("unknown branch filter %s,"
					    " check man page\n", s);
				goto error;
			}
783

784
			*mode |= br->mode;
785

786 787
			if (!p)
				break;
788

789 790
			s = p + 1;
		}
791 792 793
	}
	ret = 0;

794
	/* default to any branch */
795
	if ((*mode & ~ONLY_PLM) == 0) {
796
		*mode = PERF_SAMPLE_BRANCH_ANY;
797 798 799 800 801 802
	}
error:
	free(os);
	return ret;
}

803
#ifdef LIBUNWIND_SUPPORT
804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828
static int get_stack_size(char *str, unsigned long *_size)
{
	char *endptr;
	unsigned long size;
	unsigned long max_size = round_down(USHRT_MAX, sizeof(u64));

	size = strtoul(str, &endptr, 0);

	do {
		if (*endptr)
			break;

		size = round_up(size, sizeof(u64));
		if (!size || size > max_size)
			break;

		*_size = size;
		return 0;

	} while (0);

	pr_err("callchain: Incorrect stack dump size (max %ld): %s\n",
	       max_size, str);
	return -1;
}
829
#endif /* LIBUNWIND_SUPPORT */
830 831

static int
832
parse_callchain_opt(const struct option *opt __maybe_unused, const char *arg,
833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867
		    int unset)
{
	struct perf_record *rec = (struct perf_record *)opt->value;
	char *tok, *name, *saveptr = NULL;
	char *buf;
	int ret = -1;

	/* --no-call-graph */
	if (unset)
		return 0;

	/* We specified default option if none is provided. */
	BUG_ON(!arg);

	/* We need buffer that we know we can write to. */
	buf = malloc(strlen(arg) + 1);
	if (!buf)
		return -ENOMEM;

	strcpy(buf, arg);

	tok = strtok_r((char *)buf, ",", &saveptr);
	name = tok ? : (char *)buf;

	do {
		/* Framepointer style */
		if (!strncmp(name, "fp", sizeof("fp"))) {
			if (!strtok_r(NULL, ",", &saveptr)) {
				rec->opts.call_graph = CALLCHAIN_FP;
				ret = 0;
			} else
				pr_err("callchain: No more arguments "
				       "needed for -g fp\n");
			break;

868
#ifdef LIBUNWIND_SUPPORT
869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885
		/* Dwarf style */
		} else if (!strncmp(name, "dwarf", sizeof("dwarf"))) {
			ret = 0;
			rec->opts.call_graph = CALLCHAIN_DWARF;
			rec->opts.stack_dump_size = default_stack_dump_size;

			tok = strtok_r(NULL, ",", &saveptr);
			if (tok) {
				unsigned long size = 0;

				ret = get_stack_size(tok, &size);
				rec->opts.stack_dump_size = size;
			}

			if (!ret)
				pr_debug("callchain: stack dump size %d\n",
					 rec->opts.stack_dump_size);
886
#endif /* LIBUNWIND_SUPPORT */
887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902
		} else {
			pr_err("callchain: Unknown -g option "
			       "value: %s\n", arg);
			break;
		}

	} while (0);

	free(buf);

	if (!ret)
		pr_debug("callchain: type %d\n", rec->opts.call_graph);

	return ret;
}

903
static const char * const record_usage[] = {
904 905
	"perf record [<options>] [<command>]",
	"perf record [<options>] -- <command> [<options>]",
906 907 908
	NULL
};

909 910 911 912 913 914 915 916 917 918 919 920 921 922 923
/*
 * XXX Ideally would be local to cmd_record() and passed to a perf_record__new
 * because we need to have access to it in perf_record__exit, that is called
 * after cmd_record() exits, but since record_options need to be accessible to
 * builtin-script, leave it here.
 *
 * At least we don't ouch it in all the other functions here directly.
 *
 * Just say no to tons of global variables, sigh.
 */
static struct perf_record record = {
	.opts = {
		.mmap_pages	     = UINT_MAX,
		.user_freq	     = UINT_MAX,
		.user_interval	     = ULLONG_MAX,
924
		.freq		     = 4000,
N
Namhyung Kim 已提交
925 926 927
		.target		     = {
			.uses_mmap   = true,
		},
928 929 930 931
	},
	.write_mode = WRITE_FORCE,
	.file_new   = true,
};
932

933 934 935 936 937 938 939
/*
 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
 * with it and switch to use the library functions in perf_evlist that came
 * from builtin-record.c, i.e. use perf_record_opts,
 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
 * using pipes, etc.
 */
940
const struct option record_options[] = {
941
	OPT_CALLBACK('e', "event", &record.evlist, "event",
942
		     "event selector. use 'perf list' to list available events",
943
		     parse_events_option),
944
	OPT_CALLBACK(0, "filter", &record.evlist, "filter",
L
Li Zefan 已提交
945
		     "event filter", parse_filter),
946
	OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
947
		    "record events on existing process id"),
948
	OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
949
		    "record events on existing thread id"),
950
	OPT_INTEGER('r', "realtime", &record.realtime_prio,
951
		    "collect data with this RT SCHED_FIFO priority"),
952
	OPT_BOOLEAN('D', "no-delay", &record.opts.no_delay,
953
		    "collect data without buffering"),
954
	OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
955
		    "collect raw sample records from all opened counters"),
956
	OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
957
			    "system-wide collection from all CPUs"),
958
	OPT_BOOLEAN('A', "append", &record.append_file,
I
Ingo Molnar 已提交
959
			    "append to the output file to do incremental profiling"),
960
	OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
961
		    "list of cpus to monitor"),
962
	OPT_BOOLEAN('f', "force", &record.force,
963
			"overwrite existing data file (deprecated)"),
964 965
	OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
	OPT_STRING('o', "output", &record.output_name, "file",
I
Ingo Molnar 已提交
966
		    "output file name"),
967
	OPT_BOOLEAN('i', "no-inherit", &record.opts.no_inherit,
968
		    "child tasks do not inherit counters"),
969 970
	OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
	OPT_UINTEGER('m', "mmap-pages", &record.opts.mmap_pages,
971
		     "number of mmap data pages"),
972
	OPT_BOOLEAN(0, "group", &record.opts.group,
973
		    "put the counters into a counter group"),
974 975 976
	OPT_CALLBACK_DEFAULT('g', "call-graph", &record, "mode[,dump_size]",
			     callchain_help, &parse_callchain_opt,
			     "fp"),
977
	OPT_INCR('v', "verbose", &verbose,
978
		    "be more verbose (show counter open errors, etc)"),
979
	OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
980
	OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
981
		    "per thread counts"),
982
	OPT_BOOLEAN('d', "data", &record.opts.sample_address,
983
		    "Sample addresses"),
984
	OPT_BOOLEAN('T', "timestamp", &record.opts.sample_time, "Sample timestamps"),
985
	OPT_BOOLEAN('P', "period", &record.opts.period, "Sample period"),
986
	OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
987
		    "don't sample"),
988
	OPT_BOOLEAN('N', "no-buildid-cache", &record.no_buildid_cache,
989
		    "do not update the buildid cache"),
990
	OPT_BOOLEAN('B', "no-buildid", &record.no_buildid,
991
		    "do not collect buildids in perf.data"),
992
	OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
S
Stephane Eranian 已提交
993 994
		     "monitor event in cgroup name only",
		     parse_cgroups),
995 996
	OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
		   "user to profile"),
997 998 999 1000 1001 1002 1003

	OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
		     "branch any", "sample any taken branches",
		     parse_branch_stack),

	OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
		     "branch filter mask", "branch stack filter modes",
1004
		     parse_branch_stack),
1005 1006 1007
	OPT_END()
};

1008
int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
1009
{
1010 1011
	int err = -ENOMEM;
	struct perf_evsel *pos;
1012 1013
	struct perf_evlist *evsel_list;
	struct perf_record *rec = &record;
1014
	char errbuf[BUFSIZ];
1015

1016
	evsel_list = perf_evlist__new(NULL, NULL);
1017 1018 1019
	if (evsel_list == NULL)
		return -ENOMEM;

1020 1021
	rec->evlist = evsel_list;

1022
	argc = parse_options(argc, argv, record_options, record_usage,
1023
			    PARSE_OPT_STOP_AT_NON_OPTION);
1024
	if (!argc && perf_target__none(&rec->opts.target))
1025
		usage_with_options(record_usage, record_options);
1026

1027
	if (rec->force && rec->append_file) {
1028 1029
		ui__error("Can't overwrite and append at the same time."
			  " You need to choose between -f and -A");
1030
		usage_with_options(record_usage, record_options);
1031 1032
	} else if (rec->append_file) {
		rec->write_mode = WRITE_APPEND;
1033
	} else {
1034
		rec->write_mode = WRITE_FORCE;
1035 1036
	}

1037
	if (nr_cgroups && !rec->opts.target.system_wide) {
1038 1039
		ui__error("cgroup monitoring only available in"
			  " system-wide mode\n");
S
Stephane Eranian 已提交
1040 1041 1042
		usage_with_options(record_usage, record_options);
	}

1043
	symbol__init();
1044

1045
	if (symbol_conf.kptr_restrict)
1046 1047 1048 1049 1050 1051 1052 1053
		pr_warning(
"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
"check /proc/sys/kernel/kptr_restrict.\n\n"
"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
"file is not found in the buildid cache or in the vmlinux path.\n\n"
"Samples in kernel modules won't be resolved at all.\n\n"
"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
"even with a suitable vmlinux or kallsyms file.\n\n");
1054

1055
	if (rec->no_buildid_cache || rec->no_buildid)
1056
		disable_buildid_cache();
1057

1058 1059
	if (evsel_list->nr_entries == 0 &&
	    perf_evlist__add_default(evsel_list) < 0) {
1060 1061
		pr_err("Not enough memory for event selector list\n");
		goto out_symbol_exit;
1062
	}
1063

1064 1065 1066 1067 1068 1069 1070 1071 1072
	err = perf_target__validate(&rec->opts.target);
	if (err) {
		perf_target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
		ui__warning("%s", errbuf);
	}

	err = perf_target__parse_uid(&rec->opts.target);
	if (err) {
		int saved_errno = errno;
1073

1074
		perf_target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
1075
		ui__error("%s", errbuf);
1076 1077

		err = -saved_errno;
1078
		goto out_free_fd;
1079
	}
1080

1081
	err = -ENOMEM;
1082
	if (perf_evlist__create_maps(evsel_list, &rec->opts.target) < 0)
1083
		usage_with_options(record_usage, record_options);
1084

1085
	list_for_each_entry(pos, &evsel_list->entries, node) {
1086
		if (perf_header__push_event(pos->attr.config, perf_evsel__name(pos)))
1087
			goto out_free_fd;
1088
	}
1089

1090 1091 1092 1093
	if (rec->opts.user_interval != ULLONG_MAX)
		rec->opts.default_interval = rec->opts.user_interval;
	if (rec->opts.user_freq != UINT_MAX)
		rec->opts.freq = rec->opts.user_freq;
1094

1095 1096 1097
	/*
	 * User specified count overrides default frequency.
	 */
1098 1099 1100 1101
	if (rec->opts.default_interval)
		rec->opts.freq = 0;
	else if (rec->opts.freq) {
		rec->opts.default_interval = rec->opts.freq;
1102
	} else {
1103
		ui__error("frequency and count are zero, aborting\n");
1104
		err = -EINVAL;
1105
		goto out_free_fd;
1106 1107
	}

1108
	err = __cmd_record(&record, argc, argv);
1109
out_free_fd:
1110
	perf_evlist__delete_maps(evsel_list);
1111 1112
out_symbol_exit:
	symbol__exit();
1113
	return err;
1114
}