builtin-record.c 20.7 KB
Newer Older
I
Ingo Molnar 已提交
1
/*
2 3 4 5 6
 * builtin-record.c
 *
 * Builtin record command: Record the profile of a workload
 * (or a CPU, or a PID) into the perf.data output file - for
 * later analysis via perf report.
I
Ingo Molnar 已提交
7
 */
8 9
#define _FILE_OFFSET_BITS 64

10
#include "builtin.h"
11 12 13

#include "perf.h"

14
#include "util/build-id.h"
15
#include "util/util.h"
16
#include "util/parse-options.h"
17
#include "util/parse-events.h"
18

19
#include "util/header.h"
20
#include "util/event.h"
21
#include "util/evlist.h"
22
#include "util/evsel.h"
23
#include "util/debug.h"
24
#include "util/session.h"
25
#include "util/symbol.h"
26
#include "util/cpumap.h"
27
#include "util/thread_map.h"
28

29
#include <unistd.h>
30
#include <sched.h>
31
#include <sys/mman.h>
32

33 34 35 36 37
enum write_mode_t {
	WRITE_FORCE,
	WRITE_APPEND
};

38
struct perf_record_opts record_opts = {
39 40
	.target_pid	     = -1,
	.target_tid	     = -1,
41
	.mmap_pages	     = UINT_MAX,
42 43 44 45 46
	.user_freq	     = UINT_MAX,
	.user_interval	     = ULLONG_MAX,
	.freq		     = 1000,
	.sample_id_all_avail = true,
};
47

48 49
static unsigned int		page_size;
static int			output;
50
static const char		*output_name			= NULL;
51
static bool			group				=  false;
52
static int			realtime_prio			=      0;
53
static enum write_mode_t	write_mode			= WRITE_FORCE;
54
static bool			no_buildid			=  false;
55
static bool			no_buildid_cache		=  false;
56
static struct perf_evlist	*evsel_list;
57 58 59

static long			samples				=      0;
static u64			bytes_written			=      0;
60

61
static int			file_new			=      1;
62
static off_t			post_processing_offset;
63

64
static struct perf_session	*session;
65
static const char               *progname;
66

67 68 69 70 71
static void advance_output(size_t size)
{
	bytes_written += size;
}

72 73 74 75 76 77 78 79 80 81 82 83 84 85 86
static void write_output(void *buf, size_t size)
{
	while (size) {
		int ret = write(output, buf, size);

		if (ret < 0)
			die("failed to write");

		size -= ret;
		buf += ret;

		bytes_written += ret;
	}
}

87
static int process_synthesized_event(union perf_event *event,
88
				     struct perf_sample *sample __used,
89
				     struct perf_session *self __used)
90
{
91
	write_output(event, event->header.size);
92 93 94
	return 0;
}

95
static void mmap_read(struct perf_mmap *md)
96
{
97
	unsigned int head = perf_mmap__read_head(md);
98 99 100 101 102
	unsigned int old = md->prev;
	unsigned char *data = md->base + page_size;
	unsigned long size;
	void *buf;

103 104 105 106
	if (old == head)
		return;

	samples++;
107 108 109 110 111 112 113

	size = head - old;

	if ((old & md->mask) + size != (head & md->mask)) {
		buf = &data[old & md->mask];
		size = md->mask + 1 - (old & md->mask);
		old += size;
114

115
		write_output(buf, size);
116 117 118 119 120
	}

	buf = &data[old & md->mask];
	size = head - old;
	old += size;
121

122
	write_output(buf, size);
123 124

	md->prev = old;
125
	perf_mmap__write_tail(md, old);
126 127 128
}

static volatile int done = 0;
129
static volatile int signr = -1;
130
static volatile int child_finished = 0;
131

132
static void sig_handler(int sig)
133
{
134 135 136
	if (sig == SIGCHLD)
		child_finished = 1;

137
	done = 1;
138 139 140 141 142
	signr = sig;
}

static void sig_atexit(void)
{
143 144
	int status;

145
	if (evsel_list->workload.pid > 0) {
146
		if (!child_finished)
147
			kill(evsel_list->workload.pid, SIGTERM);
148 149 150 151 152

		wait(&status);
		if (WIFSIGNALED(status))
			psignal(WTERMSIG(status), progname);
	}
153

154
	if (signr == -1 || signr == SIGUSR1)
155 156 157 158
		return;

	signal(signr, SIG_DFL);
	kill(getpid(), signr);
159 160
}

161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179
static bool perf_evlist__equal(struct perf_evlist *evlist,
			       struct perf_evlist *other)
{
	struct perf_evsel *pos, *pair;

	if (evlist->nr_entries != other->nr_entries)
		return false;

	pair = list_entry(other->entries.next, struct perf_evsel, node);

	list_for_each_entry(pos, &evlist->entries, node) {
		if (memcmp(&pos->attr, &pair->attr, sizeof(pos->attr) != 0))
			return false;
		pair = list_entry(pair->node.next, struct perf_evsel, node);
	}

	return true;
}

180 181
static void open_counters(struct perf_evlist *evlist)
{
182
	struct perf_evsel *pos, *first;
183

184 185
	first = list_entry(evlist->entries.next, struct perf_evsel, node);

186 187
	perf_evlist__config_attrs(evlist, &record_opts);

188 189
	list_for_each_entry(pos, &evlist->entries, node) {
		struct perf_event_attr *attr = &pos->attr;
190
		struct xyarray *group_fd = NULL;
191 192 193 194 195 196 197 198 199 200 201 202 203
		/*
		 * Check if parse_single_tracepoint_event has already asked for
		 * PERF_SAMPLE_TIME.
		 *
		 * XXX this is kludgy but short term fix for problems introduced by
		 * eac23d1c that broke 'perf script' by having different sample_types
		 * when using multiple tracepoint events when we use a perf binary
		 * that tries to use sample_id_all on an older kernel.
		 *
		 * We need to move counter creation to perf_session, support
		 * different sample_types, etc.
		 */
		bool time_needed = attr->sample_type & PERF_SAMPLE_TIME;
204

205 206
		if (group && pos != first)
			group_fd = first->fd;
207
retry_sample_id:
208
		attr->sample_id_all = record_opts.sample_id_all_avail ? 1 : 0;
209
try_again:
210 211
		if (perf_evsel__open(pos, evlist->cpus, evlist->threads, group,
				     group_fd) < 0) {
212 213
			int err = errno;

214
			if (err == EPERM || err == EACCES) {
215
				ui__error_paranoid();
216
				exit(EXIT_FAILURE);
217
			} else if (err ==  ENODEV && record_opts.cpu_list) {
218 219
				die("No such device - did you specify"
					" an out-of-range profile CPU?\n");
220
			} else if (err == EINVAL && record_opts.sample_id_all_avail) {
221 222 223
				/*
				 * Old kernel, no attr->sample_id_type_all field
				 */
224 225
				record_opts.sample_id_all_avail = false;
				if (!record_opts.sample_time && !record_opts.raw_samples && !time_needed)
226 227
					attr->sample_type &= ~PERF_SAMPLE_TIME;

228
				goto retry_sample_id;
229
			}
230

231 232 233 234 235 236 237 238 239
			/*
			 * If it's cycles then fall back to hrtimer
			 * based cpu-clock-tick sw counter, which
			 * is always available even if no PMU support:
			 */
			if (attr->type == PERF_TYPE_HARDWARE
					&& attr->config == PERF_COUNT_HW_CPU_CYCLES) {

				if (verbose)
240 241
					ui__warning("The cycles event is not supported, "
						    "trying to fall back to cpu-clock-ticks\n");
242 243 244 245
				attr->type = PERF_TYPE_SOFTWARE;
				attr->config = PERF_COUNT_SW_CPU_CLOCK;
				goto try_again;
			}
246 247 248 249 250 251 252

			if (err == ENOENT) {
				ui__warning("The %s event is not supported.\n",
					    event_name(pos));
				exit(EXIT_FAILURE);
			}

253
			printf("\n");
254
			error("sys_perf_event_open() syscall returned with %d (%s).  /bin/dmesg may provide additional information.\n",
255
			      err, strerror(err));
256 257

#if defined(__i386__) || defined(__x86_64__)
258 259 260 261 262
			if (attr->type == PERF_TYPE_HARDWARE && err == EOPNOTSUPP)
				die("No hardware sampling interrupt available."
				    " No APIC? If so then you can boot the kernel"
				    " with the \"lapic\" boot parameter to"
				    " force-enable it.\n");
263 264
#endif

265
			die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
L
Li Zefan 已提交
266 267
		}
	}
268

269 270 271 272 273 274
	if (perf_evlist__set_filters(evlist)) {
		error("failed to set filter with %d (%s)\n", errno,
			strerror(errno));
		exit(-1);
	}

275
	if (perf_evlist__mmap(evlist, record_opts.mmap_pages, false) < 0)
276 277
		die("failed to mmap with %d (%s)\n", errno, strerror(errno));

278 279 280 281 282 283 284 285 286 287
	if (file_new)
		session->evlist = evlist;
	else {
		if (!perf_evlist__equal(session->evlist, evlist)) {
			fprintf(stderr, "incompatible append\n");
			exit(-1);
		}
 	}

	perf_session__update_sample_type(session);
288 289
}

290 291 292 293
static int process_buildids(void)
{
	u64 size = lseek(output, 0, SEEK_CUR);

294 295 296
	if (size == 0)
		return 0;

297 298 299 300 301 302
	session->fd = output;
	return __perf_session__process_events(session, post_processing_offset,
					      size - post_processing_offset,
					      size, &build_id__mark_dso_hit_ops);
}

303 304
static void atexit_header(void)
{
305
	if (!record_opts.pipe_output) {
306
		session->header.data_size += bytes_written;
307

308 309
		if (!no_buildid)
			process_buildids();
310
		perf_session__write_header(session, evsel_list, output, true);
311
		perf_session__delete(session);
312
		perf_evlist__delete(evsel_list);
313
		symbol__exit();
314
	}
315 316
}

317
static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
318 319
{
	int err;
320
	struct perf_session *psession = data;
321

322
	if (machine__is_host(machine))
323 324 325 326 327 328 329 330 331 332
		return;

	/*
	 *As for guest kernel when processing subcommand record&report,
	 *we arrange module mmap prior to guest kernel mmap and trigger
	 *a preload dso because default guest module symbols are loaded
	 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
	 *method is used to avoid symbol missing when the first addr is
	 *in module instead of in guest kernel.
	 */
333 334
	err = perf_event__synthesize_modules(process_synthesized_event,
					     psession, machine);
335 336
	if (err < 0)
		pr_err("Couldn't record guest kernel [%d]'s reference"
337
		       " relocation symbol.\n", machine->pid);
338 339 340 341 342

	/*
	 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
	 * have no _text sometimes.
	 */
343 344
	err = perf_event__synthesize_kernel_mmap(process_synthesized_event,
						 psession, machine, "_text");
345
	if (err < 0)
346 347 348
		err = perf_event__synthesize_kernel_mmap(process_synthesized_event,
							 psession, machine,
							 "_stext");
349 350
	if (err < 0)
		pr_err("Couldn't record guest kernel [%d]'s reference"
351
		       " relocation symbol.\n", machine->pid);
352 353
}

354 355 356 357 358 359 360
static struct perf_event_header finished_round_event = {
	.size = sizeof(struct perf_event_header),
	.type = PERF_RECORD_FINISHED_ROUND,
};

static void mmap_read_all(void)
{
361
	int i;
362

363
	for (i = 0; i < evsel_list->nr_mmaps; i++) {
364 365
		if (evsel_list->mmap[i].base)
			mmap_read(&evsel_list->mmap[i]);
366 367 368 369 370 371
	}

	if (perf_header__has_feat(&session->header, HEADER_TRACE_INFO))
		write_output(&finished_round_event, sizeof(finished_round_event));
}

372
static int __cmd_record(int argc, const char **argv)
373
{
I
Ingo Molnar 已提交
374 375
	struct stat st;
	int flags;
376
	int err;
377
	unsigned long waking = 0;
378
	const bool forks = argc > 0;
379
	struct machine *machine;
380

381 382
	progname = argv[0];

383 384
	page_size = sysconf(_SC_PAGE_SIZE);

385 386 387
	atexit(sig_atexit);
	signal(SIGCHLD, sig_handler);
	signal(SIGINT, sig_handler);
388
	signal(SIGUSR1, sig_handler);
389

390 391
	if (!output_name) {
		if (!fstat(STDOUT_FILENO, &st) && S_ISFIFO(st.st_mode))
392
			record_opts.pipe_output = true;
393 394 395 396 397
		else
			output_name = "perf.data";
	}
	if (output_name) {
		if (!strcmp(output_name, "-"))
398
			record_opts.pipe_output = true;
399 400 401 402 403 404 405 406 407 408
		else if (!stat(output_name, &st) && st.st_size) {
			if (write_mode == WRITE_FORCE) {
				char oldname[PATH_MAX];
				snprintf(oldname, sizeof(oldname), "%s.old",
					 output_name);
				unlink(oldname);
				rename(output_name, oldname);
			}
		} else if (write_mode == WRITE_APPEND) {
			write_mode = WRITE_FORCE;
409
		}
410 411
	}

412
	flags = O_CREAT|O_RDWR;
413
	if (write_mode == WRITE_APPEND)
414
		file_new = 0;
I
Ingo Molnar 已提交
415 416 417
	else
		flags |= O_TRUNC;

418
	if (record_opts.pipe_output)
419 420 421
		output = STDOUT_FILENO;
	else
		output = open(output_name, flags, S_IRUSR | S_IWUSR);
422 423 424 425 426
	if (output < 0) {
		perror("failed to create output file");
		exit(-1);
	}

427
	session = perf_session__new(output_name, O_WRONLY,
428
				    write_mode == WRITE_FORCE, false, NULL);
429
	if (session == NULL) {
430 431 432 433
		pr_err("Not enough memory for reading perf file header\n");
		return -1;
	}

434 435 436
	if (!no_buildid)
		perf_header__set_feat(&session->header, HEADER_BUILD_ID);

437
	if (!file_new) {
438
		err = perf_session__read_header(session, output);
439
		if (err < 0)
440
			goto out_delete_session;
441 442
	}

443
	if (have_tracepoints(&evsel_list->entries))
444
		perf_header__set_feat(&session->header, HEADER_TRACE_INFO);
445

446 447 448 449 450 451 452 453 454 455 456 457 458
	perf_header__set_feat(&session->header, HEADER_HOSTNAME);
	perf_header__set_feat(&session->header, HEADER_OSRELEASE);
	perf_header__set_feat(&session->header, HEADER_ARCH);
	perf_header__set_feat(&session->header, HEADER_CPUDESC);
	perf_header__set_feat(&session->header, HEADER_NRCPUS);
	perf_header__set_feat(&session->header, HEADER_EVENT_DESC);
	perf_header__set_feat(&session->header, HEADER_CMDLINE);
	perf_header__set_feat(&session->header, HEADER_VERSION);
	perf_header__set_feat(&session->header, HEADER_CPU_TOPOLOGY);
	perf_header__set_feat(&session->header, HEADER_TOTAL_MEM);
	perf_header__set_feat(&session->header, HEADER_NUMA_TOPOLOGY);
	perf_header__set_feat(&session->header, HEADER_CPUID);

459
	if (forks) {
460 461 462 463
		err = perf_evlist__prepare_workload(evsel_list, &record_opts, argv);
		if (err < 0) {
			pr_err("Couldn't run the workload!\n");
			goto out_delete_session;
464 465 466
		}
	}

467
	open_counters(evsel_list);
468

469 470 471 472 473
	/*
	 * perf_session__delete(session) will be called at atexit_header()
	 */
	atexit(atexit_header);

474
	if (record_opts.pipe_output) {
475 476 477 478
		err = perf_header__write_pipe(output);
		if (err < 0)
			return err;
	} else if (file_new) {
479 480
		err = perf_session__write_header(session, evsel_list,
						 output, false);
481 482
		if (err < 0)
			return err;
483 484
	}

485 486
	post_processing_offset = lseek(output, 0, SEEK_CUR);

487
	if (record_opts.pipe_output) {
488 489
		err = perf_session__synthesize_attrs(session,
						     process_synthesized_event);
490 491 492 493
		if (err < 0) {
			pr_err("Couldn't synthesize attrs.\n");
			return err;
		}
494

495 496
		err = perf_event__synthesize_event_types(process_synthesized_event,
							 session);
497 498 499 500
		if (err < 0) {
			pr_err("Couldn't synthesize event_types.\n");
			return err;
		}
501

502
		if (have_tracepoints(&evsel_list->entries)) {
503 504 505 506 507 508 509 510
			/*
			 * FIXME err <= 0 here actually means that
			 * there were no tracepoints so its not really
			 * an error, just that we don't need to
			 * synthesize anything.  We really have to
			 * return this more properly and also
			 * propagate errors that now are calling die()
			 */
511 512 513
			err = perf_event__synthesize_tracing_data(output, evsel_list,
								  process_synthesized_event,
								  session);
514 515 516 517
			if (err <= 0) {
				pr_err("Couldn't record tracing data.\n");
				return err;
			}
518
			advance_output(err);
519
		}
520 521
	}

522 523
	machine = perf_session__find_host_machine(session);
	if (!machine) {
524 525 526 527
		pr_err("Couldn't find native kernel information.\n");
		return -1;
	}

528 529
	err = perf_event__synthesize_kernel_mmap(process_synthesized_event,
						 session, machine, "_text");
530
	if (err < 0)
531 532
		err = perf_event__synthesize_kernel_mmap(process_synthesized_event,
							 session, machine, "_stext");
533 534 535 536
	if (err < 0)
		pr_err("Couldn't record kernel reference relocation symbol\n"
		       "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
		       "Check /proc/kallsyms permission or run as root.\n");
537

538 539
	err = perf_event__synthesize_modules(process_synthesized_event,
					     session, machine);
540 541 542 543 544
	if (err < 0)
		pr_err("Couldn't record kernel module information.\n"
		       "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
		       "Check /proc/modules permission or run as root.\n");

545
	if (perf_guest)
546 547
		perf_session__process_machines(session,
					       perf_event__synthesize_guest_os);
548

549
	if (!record_opts.system_wide)
550 551 552
		perf_event__synthesize_thread_map(evsel_list->threads,
						  process_synthesized_event,
						  session);
553
	else
554 555
		perf_event__synthesize_threads(process_synthesized_event,
					       session);
556

557 558 559 560 561
	if (realtime_prio) {
		struct sched_param param;

		param.sched_priority = realtime_prio;
		if (sched_setscheduler(0, SCHED_FIFO, &param)) {
562
			pr_err("Could not set realtime priority.\n");
563 564 565 566
			exit(-1);
		}
	}

567 568
	perf_evlist__enable(evsel_list);

569 570 571
	/*
	 * Let the child rip
	 */
572
	if (forks)
573
		perf_evlist__start_workload(evsel_list);
574

575
	for (;;) {
576
		int hits = samples;
577

578
		mmap_read_all();
579

580 581 582
		if (hits == samples) {
			if (done)
				break;
583
			err = poll(evsel_list->pollfd, evsel_list->nr_fds, -1);
584 585 586
			waking++;
		}

587 588
		if (done)
			perf_evlist__disable(evsel_list);
589 590
	}

591
	if (quiet || signr == SIGUSR1)
592 593
		return 0;

594 595
	fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);

596 597 598 599
	/*
	 * Approximate RIP event size: 24 bytes.
	 */
	fprintf(stderr,
600
		"[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n",
601 602 603
		(double)bytes_written / 1024.0 / 1024.0,
		output_name,
		bytes_written / 24);
604

605
	return 0;
606 607 608 609

out_delete_session:
	perf_session__delete(session);
	return err;
610
}
611 612

static const char * const record_usage[] = {
613 614
	"perf record [<options>] [<command>]",
	"perf record [<options>] -- <command> [<options>]",
615 616 617
	NULL
};

618 619
static bool force, append_file;

620
const struct option record_options[] = {
621
	OPT_CALLBACK('e', "event", &evsel_list, "event",
622
		     "event selector. use 'perf list' to list available events",
623
		     parse_events_option),
624
	OPT_CALLBACK(0, "filter", &evsel_list, "filter",
L
Li Zefan 已提交
625
		     "event filter", parse_filter),
626
	OPT_INTEGER('p', "pid", &record_opts.target_pid,
627
		    "record events on existing process id"),
628
	OPT_INTEGER('t', "tid", &record_opts.target_tid,
629
		    "record events on existing thread id"),
630 631
	OPT_INTEGER('r', "realtime", &realtime_prio,
		    "collect data with this RT SCHED_FIFO priority"),
632
	OPT_BOOLEAN('D', "no-delay", &record_opts.no_delay,
633
		    "collect data without buffering"),
634
	OPT_BOOLEAN('R', "raw-samples", &record_opts.raw_samples,
635
		    "collect raw sample records from all opened counters"),
636
	OPT_BOOLEAN('a', "all-cpus", &record_opts.system_wide,
637
			    "system-wide collection from all CPUs"),
I
Ingo Molnar 已提交
638 639
	OPT_BOOLEAN('A', "append", &append_file,
			    "append to the output file to do incremental profiling"),
640
	OPT_STRING('C', "cpu", &record_opts.cpu_list, "cpu",
641
		    "list of cpus to monitor"),
642
	OPT_BOOLEAN('f', "force", &force,
643
			"overwrite existing data file (deprecated)"),
644
	OPT_U64('c', "count", &record_opts.user_interval, "event period to sample"),
I
Ingo Molnar 已提交
645 646
	OPT_STRING('o', "output", &output_name, "file",
		    "output file name"),
647
	OPT_BOOLEAN('i', "no-inherit", &record_opts.no_inherit,
648
		    "child tasks do not inherit counters"),
649
	OPT_UINTEGER('F', "freq", &record_opts.user_freq, "profile at this frequency"),
650 651
	OPT_UINTEGER('m', "mmap-pages", &record_opts.mmap_pages,
		     "number of mmap data pages"),
652 653
	OPT_BOOLEAN(0, "group", &group,
		    "put the counters into a counter group"),
654
	OPT_BOOLEAN('g', "call-graph", &record_opts.call_graph,
655
		    "do call-graph (stack chain/backtrace) recording"),
656
	OPT_INCR('v', "verbose", &verbose,
657
		    "be more verbose (show counter open errors, etc)"),
658
	OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
659
	OPT_BOOLEAN('s', "stat", &record_opts.inherit_stat,
660
		    "per thread counts"),
661
	OPT_BOOLEAN('d', "data", &record_opts.sample_address,
662
		    "Sample addresses"),
663 664
	OPT_BOOLEAN('T', "timestamp", &record_opts.sample_time, "Sample timestamps"),
	OPT_BOOLEAN('n', "no-samples", &record_opts.no_samples,
665
		    "don't sample"),
666
	OPT_BOOLEAN('N', "no-buildid-cache", &no_buildid_cache,
667
		    "do not update the buildid cache"),
668 669
	OPT_BOOLEAN('B', "no-buildid", &no_buildid,
		    "do not collect buildids in perf.data"),
S
Stephane Eranian 已提交
670 671 672
	OPT_CALLBACK('G', "cgroup", &evsel_list, "name",
		     "monitor event in cgroup name only",
		     parse_cgroups),
673 674 675
	OPT_END()
};

676
int cmd_record(int argc, const char **argv, const char *prefix __used)
677
{
678 679
	int err = -ENOMEM;
	struct perf_evsel *pos;
680

681 682
	perf_header__set_cmdline(argc, argv);

683
	evsel_list = perf_evlist__new(NULL, NULL);
684 685 686
	if (evsel_list == NULL)
		return -ENOMEM;

687
	argc = parse_options(argc, argv, record_options, record_usage,
688
			    PARSE_OPT_STOP_AT_NON_OPTION);
689 690
	if (!argc && record_opts.target_pid == -1 && record_opts.target_tid == -1 &&
		!record_opts.system_wide && !record_opts.cpu_list)
691
		usage_with_options(record_usage, record_options);
692

693 694 695
	if (force && append_file) {
		fprintf(stderr, "Can't overwrite and append at the same time."
				" You need to choose between -f and -A");
696
		usage_with_options(record_usage, record_options);
697 698 699 700 701 702
	} else if (append_file) {
		write_mode = WRITE_APPEND;
	} else {
		write_mode = WRITE_FORCE;
	}

703
	if (nr_cgroups && !record_opts.system_wide) {
S
Stephane Eranian 已提交
704 705 706 707 708
		fprintf(stderr, "cgroup monitoring only available in"
			" system-wide mode\n");
		usage_with_options(record_usage, record_options);
	}

709
	symbol__init();
710

711
	if (symbol_conf.kptr_restrict)
712 713 714 715 716 717 718 719
		pr_warning(
"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
"check /proc/sys/kernel/kptr_restrict.\n\n"
"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
"file is not found in the buildid cache or in the vmlinux path.\n\n"
"Samples in kernel modules won't be resolved at all.\n\n"
"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
"even with a suitable vmlinux or kallsyms file.\n\n");
720

721
	if (no_buildid_cache || no_buildid)
722
		disable_buildid_cache();
723

724 725
	if (evsel_list->nr_entries == 0 &&
	    perf_evlist__add_default(evsel_list) < 0) {
726 727
		pr_err("Not enough memory for event selector list\n");
		goto out_symbol_exit;
728
	}
729

730 731
	if (record_opts.target_pid != -1)
		record_opts.target_tid = record_opts.target_pid;
732

733 734
	if (perf_evlist__create_maps(evsel_list, record_opts.target_pid,
				     record_opts.target_tid, record_opts.cpu_list) < 0)
735
		usage_with_options(record_usage, record_options);
736

737
	list_for_each_entry(pos, &evsel_list->entries, node) {
738 739
		if (perf_evsel__alloc_fd(pos, evsel_list->cpus->nr,
					 evsel_list->threads->nr) < 0)
740
			goto out_free_fd;
741 742
		if (perf_header__push_event(pos->attr.config, event_name(pos)))
			goto out_free_fd;
743
	}
744

745
	if (perf_evlist__alloc_pollfd(evsel_list) < 0)
746
		goto out_free_fd;
747

748 749 750 751
	if (record_opts.user_interval != ULLONG_MAX)
		record_opts.default_interval = record_opts.user_interval;
	if (record_opts.user_freq != UINT_MAX)
		record_opts.freq = record_opts.user_freq;
752

753 754 755
	/*
	 * User specified count overrides default frequency.
	 */
756 757 758 759
	if (record_opts.default_interval)
		record_opts.freq = 0;
	else if (record_opts.freq) {
		record_opts.default_interval = record_opts.freq;
760 761
	} else {
		fprintf(stderr, "frequency and count are zero, aborting\n");
762
		err = -EINVAL;
763
		goto out_free_fd;
764 765
	}

766 767
	err = __cmd_record(argc, argv);
out_free_fd:
768
	perf_evlist__delete_maps(evsel_list);
769 770
out_symbol_exit:
	symbol__exit();
771
	return err;
772
}