session.c 39.4 KB
Newer Older
1 2
#include <linux/kernel.h>

3
#include <byteswap.h>
4 5
#include <unistd.h>
#include <sys/types.h>
6
#include <sys/mman.h>
7

8 9
#include "evlist.h"
#include "evsel.h"
10
#include "session.h"
11
#include "tool.h"
12
#include "sort.h"
13
#include "util.h"
14
#include "cpumap.h"
15
#include "event-parse.h"
16
#include "perf_regs.h"
17
#include "vdso.h"
18 19 20 21 22

static int perf_session__open(struct perf_session *self, bool force)
{
	struct stat input_stat;

23 24 25 26
	if (!strcmp(self->filename, "-")) {
		self->fd_pipe = true;
		self->fd = STDIN_FILENO;

27
		if (perf_session__read_header(self, self->fd) < 0)
28
			pr_err("incompatible file format (rerun with -v to learn more)");
29 30 31 32

		return 0;
	}

33
	self->fd = open(self->filename, O_RDONLY);
34
	if (self->fd < 0) {
35 36 37 38
		int err = errno;

		pr_err("failed to open %s: %s", self->filename, strerror(err));
		if (err == ENOENT && !strcmp(self->filename, "perf.data"))
39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
			pr_err("  (try 'perf record' first)");
		pr_err("\n");
		return -errno;
	}

	if (fstat(self->fd, &input_stat) < 0)
		goto out_close;

	if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
		pr_err("file %s not owned by current user or root\n",
		       self->filename);
		goto out_close;
	}

	if (!input_stat.st_size) {
		pr_info("zero-sized file (%s), nothing to do!\n",
			self->filename);
		goto out_close;
	}

59
	if (perf_session__read_header(self, self->fd) < 0) {
60
		pr_err("incompatible file format (rerun with -v to learn more)");
61 62 63
		goto out_close;
	}

64 65 66 67 68 69 70 71 72 73
	if (!perf_evlist__valid_sample_type(self->evlist)) {
		pr_err("non matching sample_type");
		goto out_close;
	}

	if (!perf_evlist__valid_sample_id_all(self->evlist)) {
		pr_err("non matching sample_id_all");
		goto out_close;
	}

74 75 76 77 78 79 80 81 82
	self->size = input_stat.st_size;
	return 0;

out_close:
	close(self->fd);
	self->fd = -1;
	return -1;
}

83
void perf_session__set_id_hdr_size(struct perf_session *session)
84
{
85 86 87
	u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);

	machines__set_id_hdr_size(&session->machines, id_hdr_size);
88 89
}

90 91
int perf_session__create_kernel_maps(struct perf_session *self)
{
92
	int ret = machine__create_kernel_maps(&self->machines.host);
93 94

	if (ret >= 0)
95
		ret = machines__create_guest_kernel_maps(&self->machines);
96 97 98
	return ret;
}

99 100
static void perf_session__destroy_kernel_maps(struct perf_session *self)
{
101
	machines__destroy_kernel_maps(&self->machines);
102 103
}

104 105
struct perf_session *perf_session__new(const char *filename, int mode,
				       bool force, bool repipe,
106
				       struct perf_tool *tool)
107
{
108 109 110 111 112 113 114 115 116 117 118 119 120
	struct perf_session *self;
	struct stat st;
	size_t len;

	if (!filename || !strlen(filename)) {
		if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode))
			filename = "-";
		else
			filename = "perf.data";
	}

	len = strlen(filename);
	self = zalloc(sizeof(*self) + len);
121 122 123 124 125

	if (self == NULL)
		goto out;

	memcpy(self->filename, filename, len);
T
Tom Zanussi 已提交
126
	self->repipe = repipe;
127
	INIT_LIST_HEAD(&self->ordered_samples.samples);
128
	INIT_LIST_HEAD(&self->ordered_samples.sample_cache);
129
	INIT_LIST_HEAD(&self->ordered_samples.to_free);
130
	machines__init(&self->machines);
131

132 133 134
	if (mode == O_RDONLY) {
		if (perf_session__open(self, force) < 0)
			goto out_delete;
135
		perf_session__set_id_hdr_size(self);
136 137 138
	} else if (mode == O_WRONLY) {
		/*
		 * In O_RDONLY mode this will be performed when reading the
139
		 * kernel MMAP event, in perf_event__process_mmap().
140 141 142 143
		 */
		if (perf_session__create_kernel_maps(self) < 0)
			goto out_delete;
	}
144

145
	if (tool && tool->ordering_requires_timestamps &&
146
	    tool->ordered_samples && !perf_evlist__sample_id_all(self->evlist)) {
147
		dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
148
		tool->ordered_samples = false;
149 150
	}

151 152
out:
	return self;
153 154 155
out_delete:
	perf_session__delete(self);
	return NULL;
156 157
}

158 159
static void perf_session__delete_dead_threads(struct perf_session *session)
{
160
	machine__delete_dead_threads(&session->machines.host);
161 162 163 164
}

static void perf_session__delete_threads(struct perf_session *session)
{
165
	machine__delete_threads(&session->machines.host);
166 167
}

168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183
static void perf_session_env__delete(struct perf_session_env *env)
{
	free(env->hostname);
	free(env->os_release);
	free(env->version);
	free(env->arch);
	free(env->cpu_desc);
	free(env->cpuid);

	free(env->cmdline);
	free(env->sibling_cores);
	free(env->sibling_threads);
	free(env->numa_nodes);
	free(env->pmu_mappings);
}

184 185
void perf_session__delete(struct perf_session *self)
{
186
	perf_session__destroy_kernel_maps(self);
187 188
	perf_session__delete_dead_threads(self);
	perf_session__delete_threads(self);
189
	perf_session_env__delete(&self->header.env);
190
	machines__exit(&self->machines);
191 192
	close(self->fd);
	free(self);
193
	vdso__exit();
194
}
195

196 197 198 199
static int process_event_synth_tracing_data_stub(union perf_event *event
						 __maybe_unused,
						 struct perf_session *session
						__maybe_unused)
200 201 202 203 204
{
	dump_printf(": unhandled!\n");
	return 0;
}

205 206 207
static int process_event_synth_attr_stub(union perf_event *event __maybe_unused,
					 struct perf_evlist **pevlist
					 __maybe_unused)
208 209 210 211 212
{
	dump_printf(": unhandled!\n");
	return 0;
}

213 214 215 216 217
static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
				     union perf_event *event __maybe_unused,
				     struct perf_sample *sample __maybe_unused,
				     struct perf_evsel *evsel __maybe_unused,
				     struct machine *machine __maybe_unused)
218 219 220 221 222
{
	dump_printf(": unhandled!\n");
	return 0;
}

223 224 225 226
static int process_event_stub(struct perf_tool *tool __maybe_unused,
			      union perf_event *event __maybe_unused,
			      struct perf_sample *sample __maybe_unused,
			      struct machine *machine __maybe_unused)
227 228 229 230 231
{
	dump_printf(": unhandled!\n");
	return 0;
}

232 233 234 235
static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
				       union perf_event *event __maybe_unused,
				       struct perf_session *perf_session
				       __maybe_unused)
236 237 238 239 240
{
	dump_printf(": unhandled!\n");
	return 0;
}

241 242
static int process_event_type_stub(struct perf_tool *tool __maybe_unused,
				   union perf_event *event __maybe_unused)
243 244 245 246 247
{
	dump_printf(": unhandled!\n");
	return 0;
}

248
static int process_finished_round(struct perf_tool *tool,
249 250
				  union perf_event *event,
				  struct perf_session *session);
251

252
static void perf_tool__fill_defaults(struct perf_tool *tool)
253
{
254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282
	if (tool->sample == NULL)
		tool->sample = process_event_sample_stub;
	if (tool->mmap == NULL)
		tool->mmap = process_event_stub;
	if (tool->comm == NULL)
		tool->comm = process_event_stub;
	if (tool->fork == NULL)
		tool->fork = process_event_stub;
	if (tool->exit == NULL)
		tool->exit = process_event_stub;
	if (tool->lost == NULL)
		tool->lost = perf_event__process_lost;
	if (tool->read == NULL)
		tool->read = process_event_sample_stub;
	if (tool->throttle == NULL)
		tool->throttle = process_event_stub;
	if (tool->unthrottle == NULL)
		tool->unthrottle = process_event_stub;
	if (tool->attr == NULL)
		tool->attr = process_event_synth_attr_stub;
	if (tool->event_type == NULL)
		tool->event_type = process_event_type_stub;
	if (tool->tracing_data == NULL)
		tool->tracing_data = process_event_synth_tracing_data_stub;
	if (tool->build_id == NULL)
		tool->build_id = process_finished_round_stub;
	if (tool->finished_round == NULL) {
		if (tool->ordered_samples)
			tool->finished_round = process_finished_round;
283
		else
284
			tool->finished_round = process_finished_round_stub;
285
	}
286
}
287 288 289 290 291 292 293 294 295 296
 
void mem_bswap_32(void *src, int byte_size)
{
	u32 *m = src;
	while (byte_size > 0) {
		*m = bswap_32(*m);
		byte_size -= sizeof(u32);
		++m;
	}
}
297

298 299 300 301 302 303 304 305 306 307 308
void mem_bswap_64(void *src, int byte_size)
{
	u64 *m = src;

	while (byte_size > 0) {
		*m = bswap_64(*m);
		byte_size -= sizeof(u64);
		++m;
	}
}

309 310 311 312 313 314 315 316 317 318
static void swap_sample_id_all(union perf_event *event, void *data)
{
	void *end = (void *) event + event->header.size;
	int size = end - data;

	BUG_ON(size % sizeof(u64));
	mem_bswap_64(data, size);
}

static void perf_event__all64_swap(union perf_event *event,
319
				   bool sample_id_all __maybe_unused)
320
{
321 322
	struct perf_event_header *hdr = &event->header;
	mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
323 324
}

325
static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
326
{
327 328
	event->comm.pid = bswap_32(event->comm.pid);
	event->comm.tid = bswap_32(event->comm.tid);
329 330 331 332

	if (sample_id_all) {
		void *data = &event->comm.comm;

333
		data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
334 335
		swap_sample_id_all(event, data);
	}
336 337
}

338 339
static void perf_event__mmap_swap(union perf_event *event,
				  bool sample_id_all)
340
{
341 342 343 344 345
	event->mmap.pid	  = bswap_32(event->mmap.pid);
	event->mmap.tid	  = bswap_32(event->mmap.tid);
	event->mmap.start = bswap_64(event->mmap.start);
	event->mmap.len	  = bswap_64(event->mmap.len);
	event->mmap.pgoff = bswap_64(event->mmap.pgoff);
346 347 348 349

	if (sample_id_all) {
		void *data = &event->mmap.filename;

350
		data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
351 352
		swap_sample_id_all(event, data);
	}
353 354
}

355
static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
356
{
357 358 359 360 361
	event->fork.pid	 = bswap_32(event->fork.pid);
	event->fork.tid	 = bswap_32(event->fork.tid);
	event->fork.ppid = bswap_32(event->fork.ppid);
	event->fork.ptid = bswap_32(event->fork.ptid);
	event->fork.time = bswap_64(event->fork.time);
362 363 364

	if (sample_id_all)
		swap_sample_id_all(event, &event->fork + 1);
365 366
}

367
static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
368
{
369 370 371 372 373 374
	event->read.pid		 = bswap_32(event->read.pid);
	event->read.tid		 = bswap_32(event->read.tid);
	event->read.value	 = bswap_64(event->read.value);
	event->read.time_enabled = bswap_64(event->read.time_enabled);
	event->read.time_running = bswap_64(event->read.time_running);
	event->read.id		 = bswap_64(event->read.id);
375 376 377

	if (sample_id_all)
		swap_sample_id_all(event, &event->read + 1);
378 379
}

380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411
static u8 revbyte(u8 b)
{
	int rev = (b >> 4) | ((b & 0xf) << 4);
	rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
	rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
	return (u8) rev;
}

/*
 * XXX this is hack in attempt to carry flags bitfield
 * throught endian village. ABI says:
 *
 * Bit-fields are allocated from right to left (least to most significant)
 * on little-endian implementations and from left to right (most to least
 * significant) on big-endian implementations.
 *
 * The above seems to be byte specific, so we need to reverse each
 * byte of the bitfield. 'Internet' also says this might be implementation
 * specific and we probably need proper fix and carry perf_event_attr
 * bitfield flags in separate data file FEAT_ section. Thought this seems
 * to work for now.
 */
static void swap_bitfield(u8 *p, unsigned len)
{
	unsigned i;

	for (i = 0; i < len; i++) {
		*p = revbyte(*p);
		p++;
	}
}

412 413 414 415 416 417 418 419 420 421 422 423 424
/* exported for swapping attributes in file header */
void perf_event__attr_swap(struct perf_event_attr *attr)
{
	attr->type		= bswap_32(attr->type);
	attr->size		= bswap_32(attr->size);
	attr->config		= bswap_64(attr->config);
	attr->sample_period	= bswap_64(attr->sample_period);
	attr->sample_type	= bswap_64(attr->sample_type);
	attr->read_format	= bswap_64(attr->read_format);
	attr->wakeup_events	= bswap_32(attr->wakeup_events);
	attr->bp_type		= bswap_32(attr->bp_type);
	attr->bp_addr		= bswap_64(attr->bp_addr);
	attr->bp_len		= bswap_64(attr->bp_len);
425 426

	swap_bitfield((u8 *) (&attr->read_format + 1), sizeof(u64));
427 428
}

429
static void perf_event__hdr_attr_swap(union perf_event *event,
430
				      bool sample_id_all __maybe_unused)
431 432 433
{
	size_t size;

434
	perf_event__attr_swap(&event->attr.attr);
435

436 437 438
	size = event->header.size;
	size -= (void *)&event->attr.id - (void *)event;
	mem_bswap_64(event->attr.id, size);
439 440
}

441
static void perf_event__event_type_swap(union perf_event *event,
442
					bool sample_id_all __maybe_unused)
443
{
444 445
	event->event_type.event_type.event_id =
		bswap_64(event->event_type.event_type.event_id);
446 447
}

448
static void perf_event__tracing_data_swap(union perf_event *event,
449
					  bool sample_id_all __maybe_unused)
450
{
451
	event->tracing_data.size = bswap_32(event->tracing_data.size);
452 453
}

454 455
typedef void (*perf_event__swap_op)(union perf_event *event,
				    bool sample_id_all);
456

457 458 459 460 461 462 463 464
static perf_event__swap_op perf_event__swap_ops[] = {
	[PERF_RECORD_MMAP]		  = perf_event__mmap_swap,
	[PERF_RECORD_COMM]		  = perf_event__comm_swap,
	[PERF_RECORD_FORK]		  = perf_event__task_swap,
	[PERF_RECORD_EXIT]		  = perf_event__task_swap,
	[PERF_RECORD_LOST]		  = perf_event__all64_swap,
	[PERF_RECORD_READ]		  = perf_event__read_swap,
	[PERF_RECORD_SAMPLE]		  = perf_event__all64_swap,
465
	[PERF_RECORD_HEADER_ATTR]	  = perf_event__hdr_attr_swap,
466 467 468 469
	[PERF_RECORD_HEADER_EVENT_TYPE]	  = perf_event__event_type_swap,
	[PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
	[PERF_RECORD_HEADER_BUILD_ID]	  = NULL,
	[PERF_RECORD_HEADER_MAX]	  = NULL,
470 471
};

472 473
struct sample_queue {
	u64			timestamp;
474
	u64			file_offset;
475
	union perf_event	*event;
476 477 478
	struct list_head	list;
};

479 480 481 482
static void perf_session_free_sample_buffers(struct perf_session *session)
{
	struct ordered_samples *os = &session->ordered_samples;

483
	while (!list_empty(&os->to_free)) {
484 485
		struct sample_queue *sq;

486
		sq = list_entry(os->to_free.next, struct sample_queue, list);
487 488 489 490 491
		list_del(&sq->list);
		free(sq);
	}
}

492
static int perf_session_deliver_event(struct perf_session *session,
493
				      union perf_event *event,
494
				      struct perf_sample *sample,
495
				      struct perf_tool *tool,
496
				      u64 file_offset);
497

498
static int flush_sample_queue(struct perf_session *s,
499
			       struct perf_tool *tool)
500
{
501 502
	struct ordered_samples *os = &s->ordered_samples;
	struct list_head *head = &os->samples;
503
	struct sample_queue *tmp, *iter;
504
	struct perf_sample sample;
505 506
	u64 limit = os->next_flush;
	u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL;
507
	unsigned idx = 0, progress_next = os->nr_samples / 16;
508
	int ret;
509

510
	if (!tool->ordered_samples || !limit)
511
		return 0;
512 513 514

	list_for_each_entry_safe(iter, tmp, head, list) {
		if (iter->timestamp > limit)
515
			break;
516

517
		ret = perf_evlist__parse_sample(s->evlist, iter->event, &sample);
518 519
		if (ret)
			pr_err("Can't parse sample, err = %d\n", ret);
520 521 522 523 524 525
		else {
			ret = perf_session_deliver_event(s, iter->event, &sample, tool,
							 iter->file_offset);
			if (ret)
				return ret;
		}
526

527
		os->last_flush = iter->timestamp;
528
		list_del(&iter->list);
529
		list_add(&iter->list, &os->sample_cache);
530 531 532 533 534
		if (++idx >= progress_next) {
			progress_next += os->nr_samples / 16;
			ui_progress__update(idx, os->nr_samples,
					    "Processing time ordered events...");
		}
535
	}
536 537 538 539 540 541 542

	if (list_empty(head)) {
		os->last_sample = NULL;
	} else if (last_ts <= limit) {
		os->last_sample =
			list_entry(head->prev, struct sample_queue, list);
	}
543 544

	os->nr_samples = 0;
545 546

	return 0;
547 548
}

549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587
/*
 * When perf record finishes a pass on every buffers, it records this pseudo
 * event.
 * We record the max timestamp t found in the pass n.
 * Assuming these timestamps are monotonic across cpus, we know that if
 * a buffer still has events with timestamps below t, they will be all
 * available and then read in the pass n + 1.
 * Hence when we start to read the pass n + 2, we can safely flush every
 * events with timestamps below t.
 *
 *    ============ PASS n =================
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          1          |         2
 *          2          |         3
 *          -          |         4  <--- max recorded
 *
 *    ============ PASS n + 1 ==============
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          3          |         5
 *          4          |         6
 *          5          |         7 <---- max recorded
 *
 *      Flush every events below timestamp 4
 *
 *    ============ PASS n + 2 ==============
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          6          |         8
 *          7          |         9
 *          -          |         10
 *
 *      Flush every events below timestamp 7
 *      etc...
 */
588
static int process_finished_round(struct perf_tool *tool,
589
				  union perf_event *event __maybe_unused,
590
				  struct perf_session *session)
591
{
592 593 594
	int ret = flush_sample_queue(session, tool);
	if (!ret)
		session->ordered_samples.next_flush = session->ordered_samples.max_timestamp;
595

596
	return ret;
597 598
}

599
/* The queue is ordered by time */
600
static void __queue_event(struct sample_queue *new, struct perf_session *s)
601
{
602 603 604 605
	struct ordered_samples *os = &s->ordered_samples;
	struct sample_queue *sample = os->last_sample;
	u64 timestamp = new->timestamp;
	struct list_head *p;
606

607
	++os->nr_samples;
608
	os->last_sample = new;
609

610 611 612
	if (!sample) {
		list_add(&new->list, &os->samples);
		os->max_timestamp = timestamp;
613 614 615 616
		return;
	}

	/*
617 618 619
	 * last_sample might point to some random place in the list as it's
	 * the last queued event. We expect that the new event is close to
	 * this.
620
	 */
621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642
	if (sample->timestamp <= timestamp) {
		while (sample->timestamp <= timestamp) {
			p = sample->list.next;
			if (p == &os->samples) {
				list_add_tail(&new->list, &os->samples);
				os->max_timestamp = timestamp;
				return;
			}
			sample = list_entry(p, struct sample_queue, list);
		}
		list_add_tail(&new->list, &sample->list);
	} else {
		while (sample->timestamp > timestamp) {
			p = sample->list.prev;
			if (p == &os->samples) {
				list_add(&new->list, &os->samples);
				return;
			}
			sample = list_entry(p, struct sample_queue, list);
		}
		list_add(&new->list, &sample->list);
	}
643 644
}

645 646
#define MAX_SAMPLE_BUFFER	(64 * 1024 / sizeof(struct sample_queue))

647
static int perf_session_queue_event(struct perf_session *s, union perf_event *event,
648
				    struct perf_sample *sample, u64 file_offset)
649
{
650 651
	struct ordered_samples *os = &s->ordered_samples;
	struct list_head *sc = &os->sample_cache;
652
	u64 timestamp = sample->time;
653 654
	struct sample_queue *new;

655
	if (!timestamp || timestamp == ~0ULL)
656 657
		return -ETIME;

658 659 660 661 662
	if (timestamp < s->ordered_samples.last_flush) {
		printf("Warning: Timestamp below last timeslice flush\n");
		return -EINVAL;
	}

663 664 665
	if (!list_empty(sc)) {
		new = list_entry(sc->next, struct sample_queue, list);
		list_del(&new->list);
666 667 668 669
	} else if (os->sample_buffer) {
		new = os->sample_buffer + os->sample_buffer_idx;
		if (++os->sample_buffer_idx == MAX_SAMPLE_BUFFER)
			os->sample_buffer = NULL;
670
	} else {
671 672
		os->sample_buffer = malloc(MAX_SAMPLE_BUFFER * sizeof(*new));
		if (!os->sample_buffer)
673
			return -ENOMEM;
674 675 676
		list_add(&os->sample_buffer->list, &os->to_free);
		os->sample_buffer_idx = 2;
		new = os->sample_buffer + 1;
677
	}
678 679

	new->timestamp = timestamp;
680
	new->file_offset = file_offset;
681
	new->event = event;
682

683
	__queue_event(new, s);
684 685 686

	return 0;
}
687

688
static void callchain__printf(struct perf_sample *sample)
689 690
{
	unsigned int i;
691

692
	printf("... chain: nr:%" PRIu64 "\n", sample->callchain->nr);
693 694

	for (i = 0; i < sample->callchain->nr; i++)
695 696
		printf("..... %2d: %016" PRIx64 "\n",
		       i, sample->callchain->ips[i]);
697 698
}

699 700 701 702 703 704 705 706 707 708 709 710
static void branch_stack__printf(struct perf_sample *sample)
{
	uint64_t i;

	printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr);

	for (i = 0; i < sample->branch_stack->nr; i++)
		printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 "\n",
			i, sample->branch_stack->entries[i].from,
			sample->branch_stack->entries[i].to);
}

711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738
static void regs_dump__printf(u64 mask, u64 *regs)
{
	unsigned rid, i = 0;

	for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
		u64 val = regs[i++];

		printf(".... %-5s 0x%" PRIx64 "\n",
		       perf_reg_name(rid), val);
	}
}

static void regs_user__printf(struct perf_sample *sample, u64 mask)
{
	struct regs_dump *user_regs = &sample->user_regs;

	if (user_regs->regs) {
		printf("... user regs: mask 0x%" PRIx64 "\n", mask);
		regs_dump__printf(mask, user_regs->regs);
	}
}

static void stack_user__printf(struct stack_dump *dump)
{
	printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
	       dump->size, dump->offset);
}

739
static void perf_session__print_tstamp(struct perf_session *session,
740
				       union perf_event *event,
741
				       struct perf_sample *sample)
742
{
743 744
	u64 sample_type = perf_evlist__sample_type(session->evlist);

745
	if (event->header.type != PERF_RECORD_SAMPLE &&
746
	    !perf_evlist__sample_id_all(session->evlist)) {
747 748 749 750
		fputs("-1 -1 ", stdout);
		return;
	}

751
	if ((sample_type & PERF_SAMPLE_CPU))
752 753
		printf("%u ", sample->cpu);

754
	if (sample_type & PERF_SAMPLE_TIME)
755
		printf("%" PRIu64 " ", sample->time);
756 757
}

758
static void dump_event(struct perf_session *session, union perf_event *event,
759
		       u64 file_offset, struct perf_sample *sample)
760 761 762 763
{
	if (!dump_trace)
		return;

764 765
	printf("\n%#" PRIx64 " [%#x]: event: %d\n",
	       file_offset, event->header.size, event->header.type);
766 767 768 769 770 771

	trace_event(event);

	if (sample)
		perf_session__print_tstamp(session, event, sample);

772
	printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
773
	       event->header.size, perf_event__name(event->header.type));
774 775
}

776
static void dump_sample(struct perf_evsel *evsel, union perf_event *event,
777
			struct perf_sample *sample)
778
{
779 780
	u64 sample_type;

781 782 783
	if (!dump_trace)
		return;

784
	printf("(IP, %d): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
785
	       event->header.misc, sample->pid, sample->tid, sample->ip,
786
	       sample->period, sample->addr);
787

788
	sample_type = evsel->attr.sample_type;
789 790

	if (sample_type & PERF_SAMPLE_CALLCHAIN)
791
		callchain__printf(sample);
792

793
	if (sample_type & PERF_SAMPLE_BRANCH_STACK)
794
		branch_stack__printf(sample);
795 796 797 798 799 800

	if (sample_type & PERF_SAMPLE_REGS_USER)
		regs_user__printf(sample, evsel->attr.sample_regs_user);

	if (sample_type & PERF_SAMPLE_STACK_USER)
		stack_user__printf(&sample->user_stack);
801 802 803

	if (sample_type & PERF_SAMPLE_WEIGHT)
		printf("... weight: %" PRIu64 "\n", sample->weight);
804 805 806

	if (sample_type & PERF_SAMPLE_DATA_SRC)
		printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
807 808
}

809 810 811 812 813 814
static struct machine *
	perf_session__find_machine_for_cpumode(struct perf_session *session,
					       union perf_event *event)
{
	const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;

815 816 817
	if (perf_guest &&
	    ((cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
	     (cpumode == PERF_RECORD_MISC_GUEST_USER))) {
818 819 820 821 822 823 824
		u32 pid;

		if (event->header.type == PERF_RECORD_MMAP)
			pid = event->mmap.pid;
		else
			pid = event->ip.pid;

825
		return perf_session__findnew_machine(session, pid);
826
	}
827

828
	return &session->machines.host;
829 830
}

831
static int perf_session_deliver_event(struct perf_session *session,
832
				      union perf_event *event,
833
				      struct perf_sample *sample,
834
				      struct perf_tool *tool,
835
				      u64 file_offset)
836
{
837
	struct perf_evsel *evsel;
838
	struct machine *machine;
839

840 841
	dump_event(session, event, file_offset, sample);

842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859
	evsel = perf_evlist__id2evsel(session->evlist, sample->id);
	if (evsel != NULL && event->header.type != PERF_RECORD_SAMPLE) {
		/*
		 * XXX We're leaving PERF_RECORD_SAMPLE unnacounted here
		 * because the tools right now may apply filters, discarding
		 * some of the samples. For consistency, in the future we
		 * should have something like nr_filtered_samples and remove
		 * the sample->period from total_sample_period, etc, KISS for
		 * now tho.
		 *
		 * Also testing against NULL allows us to handle files without
		 * attr.sample_id_all and/or without PERF_SAMPLE_ID. In the
		 * future probably it'll be a good idea to restrict event
		 * processing via perf_session to files with both set.
		 */
		hists__inc_nr_events(&evsel->hists, event->header.type);
	}

860 861
	machine = perf_session__find_machine_for_cpumode(session, event);

862 863
	switch (event->header.type) {
	case PERF_RECORD_SAMPLE:
864
		dump_sample(evsel, event, sample);
865
		if (evsel == NULL) {
866
			++session->stats.nr_unknown_id;
867
			return 0;
868
		}
869
		if (machine == NULL) {
870
			++session->stats.nr_unprocessable_samples;
871
			return 0;
872
		}
873
		return tool->sample(tool, event, sample, evsel, machine);
874
	case PERF_RECORD_MMAP:
875
		return tool->mmap(tool, event, sample, machine);
876
	case PERF_RECORD_COMM:
877
		return tool->comm(tool, event, sample, machine);
878
	case PERF_RECORD_FORK:
879
		return tool->fork(tool, event, sample, machine);
880
	case PERF_RECORD_EXIT:
881
		return tool->exit(tool, event, sample, machine);
882
	case PERF_RECORD_LOST:
883
		if (tool->lost == perf_event__process_lost)
884
			session->stats.total_lost += event->lost.lost;
885
		return tool->lost(tool, event, sample, machine);
886
	case PERF_RECORD_READ:
887
		return tool->read(tool, event, sample, evsel, machine);
888
	case PERF_RECORD_THROTTLE:
889
		return tool->throttle(tool, event, sample, machine);
890
	case PERF_RECORD_UNTHROTTLE:
891
		return tool->unthrottle(tool, event, sample, machine);
892
	default:
893
		++session->stats.nr_unknown_events;
894 895 896 897
		return -1;
	}
}

898
static int perf_session__preprocess_sample(struct perf_session *session,
899
					   union perf_event *event, struct perf_sample *sample)
900 901
{
	if (event->header.type != PERF_RECORD_SAMPLE ||
902
	    !(perf_evlist__sample_type(session->evlist) & PERF_SAMPLE_CALLCHAIN))
903 904 905 906
		return 0;

	if (!ip_callchain__valid(sample->callchain, event)) {
		pr_debug("call-chain problem with event, skipping it.\n");
907 908
		++session->stats.nr_invalid_chains;
		session->stats.total_invalid_chains += sample->period;
909 910 911 912 913
		return -EINVAL;
	}
	return 0;
}

914
static int perf_session__process_user_event(struct perf_session *session, union perf_event *event,
915
					    struct perf_tool *tool, u64 file_offset)
916
{
917 918
	int err;

919
	dump_event(session, event, file_offset, NULL);
920

921
	/* These events are processed right away */
922
	switch (event->header.type) {
923
	case PERF_RECORD_HEADER_ATTR:
924
		err = tool->attr(event, &session->evlist);
925
		if (err == 0)
926
			perf_session__set_id_hdr_size(session);
927
		return err;
928
	case PERF_RECORD_HEADER_EVENT_TYPE:
929
		return tool->event_type(tool, event);
930 931
	case PERF_RECORD_HEADER_TRACING_DATA:
		/* setup for reading amidst mmap */
932
		lseek(session->fd, file_offset, SEEK_SET);
933
		return tool->tracing_data(event, session);
934
	case PERF_RECORD_HEADER_BUILD_ID:
935
		return tool->build_id(tool, event, session);
936
	case PERF_RECORD_FINISHED_ROUND:
937
		return tool->finished_round(tool, event, session);
938
	default:
939
		return -EINVAL;
940
	}
941 942
}

943 944 945 946 947 948 949 950 951
static void event_swap(union perf_event *event, bool sample_id_all)
{
	perf_event__swap_op swap;

	swap = perf_event__swap_ops[event->header.type];
	if (swap)
		swap(event, sample_id_all);
}

952
static int perf_session__process_event(struct perf_session *session,
953
				       union perf_event *event,
954
				       struct perf_tool *tool,
955 956
				       u64 file_offset)
{
957
	struct perf_sample sample;
958 959
	int ret;

960
	if (session->header.needs_swap)
961
		event_swap(event, perf_evlist__sample_id_all(session->evlist));
962 963 964 965

	if (event->header.type >= PERF_RECORD_HEADER_MAX)
		return -EINVAL;

966
	events_stats__inc(&session->stats, event->header.type);
967 968

	if (event->header.type >= PERF_RECORD_USER_TYPE_START)
969
		return perf_session__process_user_event(session, event, tool, file_offset);
970

971 972 973
	/*
	 * For all kernel events we get the sample data
	 */
974
	ret = perf_evlist__parse_sample(session->evlist, event, &sample);
975 976
	if (ret)
		return ret;
977 978 979 980 981

	/* Preprocess sample records - precheck callchains */
	if (perf_session__preprocess_sample(session, event, &sample))
		return 0;

982
	if (tool->ordered_samples) {
983 984
		ret = perf_session_queue_event(session, event, &sample,
					       file_offset);
985 986 987 988
		if (ret != -ETIME)
			return ret;
	}

989
	return perf_session_deliver_event(session, event, &sample, tool,
990
					  file_offset);
991 992
}

993 994 995 996 997 998 999
void perf_event_header__bswap(struct perf_event_header *self)
{
	self->type = bswap_32(self->type);
	self->misc = bswap_16(self->misc);
	self->size = bswap_16(self->size);
}

1000 1001
struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
{
1002
	return machine__findnew_thread(&session->machines.host, pid);
1003 1004
}

1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016
static struct thread *perf_session__register_idle_thread(struct perf_session *self)
{
	struct thread *thread = perf_session__findnew(self, 0);

	if (thread == NULL || thread__set_comm(thread, "swapper")) {
		pr_err("problem inserting idle task.\n");
		thread = NULL;
	}

	return thread;
}

1017
static void perf_session__warn_about_errors(const struct perf_session *session,
1018
					    const struct perf_tool *tool)
1019
{
1020
	if (tool->lost == perf_event__process_lost &&
1021
	    session->stats.nr_events[PERF_RECORD_LOST] != 0) {
1022 1023
		ui__warning("Processed %d events and lost %d chunks!\n\n"
			    "Check IO/CPU overload!\n\n",
1024 1025
			    session->stats.nr_events[0],
			    session->stats.nr_events[PERF_RECORD_LOST]);
1026 1027
	}

1028
	if (session->stats.nr_unknown_events != 0) {
1029 1030 1031 1032 1033
		ui__warning("Found %u unknown events!\n\n"
			    "Is this an older tool processing a perf.data "
			    "file generated by a more recent tool?\n\n"
			    "If that is not the case, consider "
			    "reporting to linux-kernel@vger.kernel.org.\n\n",
1034
			    session->stats.nr_unknown_events);
1035 1036
	}

1037
	if (session->stats.nr_unknown_id != 0) {
1038
		ui__warning("%u samples with id not present in the header\n",
1039
			    session->stats.nr_unknown_id);
1040 1041
	}

1042
 	if (session->stats.nr_invalid_chains != 0) {
1043 1044 1045
 		ui__warning("Found invalid callchains!\n\n"
 			    "%u out of %u events were discarded for this reason.\n\n"
 			    "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1046 1047
 			    session->stats.nr_invalid_chains,
 			    session->stats.nr_events[PERF_RECORD_SAMPLE]);
1048
 	}
1049

1050
	if (session->stats.nr_unprocessable_samples != 0) {
1051 1052
		ui__warning("%u unprocessable samples recorded.\n"
			    "Do you have a KVM guest running and not using 'perf kvm'?\n",
1053
			    session->stats.nr_unprocessable_samples);
1054
	}
1055 1056
}

1057 1058 1059 1060
#define session_done()	(*(volatile int *)(&session_done))
volatile int session_done;

static int __perf_session__process_pipe_events(struct perf_session *self,
1061
					       struct perf_tool *tool)
1062
{
1063 1064 1065
	union perf_event *event;
	uint32_t size, cur_size = 0;
	void *buf = NULL;
1066 1067 1068 1069 1070
	int skip = 0;
	u64 head;
	int err;
	void *p;

1071
	perf_tool__fill_defaults(tool);
1072 1073

	head = 0;
1074 1075 1076 1077 1078
	cur_size = sizeof(union perf_event);

	buf = malloc(cur_size);
	if (!buf)
		return -errno;
1079
more:
1080 1081
	event = buf;
	err = readn(self->fd, event, sizeof(struct perf_event_header));
1082 1083 1084 1085 1086 1087 1088 1089 1090
	if (err <= 0) {
		if (err == 0)
			goto done;

		pr_err("failed to read event header\n");
		goto out_err;
	}

	if (self->header.needs_swap)
1091
		perf_event_header__bswap(&event->header);
1092

1093
	size = event->header.size;
1094 1095 1096
	if (size == 0)
		size = 8;

1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107
	if (size > cur_size) {
		void *new = realloc(buf, size);
		if (!new) {
			pr_err("failed to allocate memory to read event\n");
			goto out_err;
		}
		buf = new;
		cur_size = size;
		event = buf;
	}
	p = event;
1108 1109
	p += sizeof(struct perf_event_header);

1110
	if (size - sizeof(struct perf_event_header)) {
1111
		err = readn(self->fd, p, size - sizeof(struct perf_event_header));
1112 1113 1114 1115 1116
		if (err <= 0) {
			if (err == 0) {
				pr_err("unexpected end of event stream\n");
				goto done;
			}
1117

1118 1119 1120
			pr_err("failed to read event data\n");
			goto out_err;
		}
1121 1122
	}

1123
	if ((skip = perf_session__process_event(self, event, tool, head)) < 0) {
1124
		pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1125
		       head, event->header.size, event->header.type);
1126 1127
		err = -EINVAL;
		goto out_err;
1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139
	}

	head += size;

	if (skip > 0)
		head += skip;

	if (!session_done())
		goto more;
done:
	err = 0;
out_err:
1140
	free(buf);
1141
	perf_session__warn_about_errors(self, tool);
1142
	perf_session_free_sample_buffers(self);
1143 1144 1145
	return err;
}

1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169
static union perf_event *
fetch_mmaped_event(struct perf_session *session,
		   u64 head, size_t mmap_size, char *buf)
{
	union perf_event *event;

	/*
	 * Ensure we have enough space remaining to read
	 * the size of the event in the headers.
	 */
	if (head + sizeof(event->header) > mmap_size)
		return NULL;

	event = (union perf_event *)(buf + head);

	if (session->header.needs_swap)
		perf_event_header__bswap(&event->header);

	if (head + event->header.size > mmap_size)
		return NULL;

	return event;
}

1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181
/*
 * On 64bit we can mmap the data file in one go. No need for tiny mmap
 * slices. On 32bit we use 32MB.
 */
#if BITS_PER_LONG == 64
#define MMAP_SIZE ULLONG_MAX
#define NUM_MMAPS 1
#else
#define MMAP_SIZE (32 * 1024 * 1024ULL)
#define NUM_MMAPS 128
#endif

1182
int __perf_session__process_events(struct perf_session *session,
1183
				   u64 data_offset, u64 data_size,
1184
				   u64 file_size, struct perf_tool *tool)
1185
{
1186
	u64 head, page_offset, file_offset, file_pos, progress_next;
1187
	int err, mmap_prot, mmap_flags, map_idx = 0;
1188
	size_t	mmap_size;
1189
	char *buf, *mmaps[NUM_MMAPS];
1190
	union perf_event *event;
1191
	uint32_t size;
1192

1193
	perf_tool__fill_defaults(tool);
1194

1195 1196 1197
	page_offset = page_size * (data_offset / page_size);
	file_offset = page_offset;
	head = data_offset - page_offset;
1198

1199 1200 1201
	if (data_offset + data_size < file_size)
		file_size = data_offset + data_size;

1202 1203
	progress_next = file_size / 16;

1204
	mmap_size = MMAP_SIZE;
1205 1206 1207
	if (mmap_size > file_size)
		mmap_size = file_size;

1208 1209
	memset(mmaps, 0, sizeof(mmaps));

1210 1211 1212
	mmap_prot  = PROT_READ;
	mmap_flags = MAP_SHARED;

1213
	if (session->header.needs_swap) {
1214 1215 1216
		mmap_prot  |= PROT_WRITE;
		mmap_flags = MAP_PRIVATE;
	}
1217
remap:
1218 1219
	buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, session->fd,
		   file_offset);
1220 1221 1222 1223 1224
	if (buf == MAP_FAILED) {
		pr_err("failed to mmap file\n");
		err = -errno;
		goto out_err;
	}
1225 1226
	mmaps[map_idx] = buf;
	map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
1227
	file_pos = file_offset + head;
1228 1229

more:
1230 1231
	event = fetch_mmaped_event(session, head, mmap_size, buf);
	if (!event) {
1232 1233 1234 1235
		if (mmaps[map_idx]) {
			munmap(mmaps[map_idx], mmap_size);
			mmaps[map_idx] = NULL;
		}
1236

1237 1238 1239
		page_offset = page_size * (head / page_size);
		file_offset += page_offset;
		head -= page_offset;
1240 1241 1242 1243 1244
		goto remap;
	}

	size = event->header.size;

1245
	if (size == 0 ||
1246
	    perf_session__process_event(session, event, tool, file_pos) < 0) {
1247 1248 1249 1250 1251
		pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
		       file_offset + head, event->header.size,
		       event->header.type);
		err = -EINVAL;
		goto out_err;
1252 1253 1254
	}

	head += size;
1255
	file_pos += size;
1256

1257 1258
	if (file_pos >= progress_next) {
		progress_next += file_size / 16;
1259 1260
		ui_progress__update(file_pos, file_size,
				    "Processing events...");
1261 1262
	}

1263
	if (file_pos < file_size)
1264
		goto more;
1265

1266
	err = 0;
1267
	/* do the final flush for ordered samples */
1268
	session->ordered_samples.next_flush = ULLONG_MAX;
1269
	err = flush_sample_queue(session, tool);
1270
out_err:
N
Namhyung Kim 已提交
1271
	ui_progress__finish();
1272
	perf_session__warn_about_errors(session, tool);
1273
	perf_session_free_sample_buffers(session);
1274 1275
	return err;
}
1276

1277
int perf_session__process_events(struct perf_session *self,
1278
				 struct perf_tool *tool)
1279 1280 1281 1282 1283 1284
{
	int err;

	if (perf_session__register_idle_thread(self) == NULL)
		return -ENOMEM;

1285 1286 1287 1288
	if (!self->fd_pipe)
		err = __perf_session__process_events(self,
						     self->header.data_offset,
						     self->header.data_size,
1289
						     self->size, tool);
1290
	else
1291
		err = __perf_session__process_pipe_events(self, tool);
1292

1293 1294 1295
	return err;
}

1296
bool perf_session__has_traces(struct perf_session *session, const char *msg)
1297
{
1298
	if (!(perf_evlist__sample_type(session->evlist) & PERF_SAMPLE_RAW)) {
1299 1300
		pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
		return false;
1301 1302
	}

1303
	return true;
1304
}
1305

1306 1307
int maps__set_kallsyms_ref_reloc_sym(struct map **maps,
				     const char *symbol_name, u64 addr)
1308 1309
{
	char *bracket;
1310
	enum map_type i;
1311 1312 1313 1314 1315
	struct ref_reloc_sym *ref;

	ref = zalloc(sizeof(struct ref_reloc_sym));
	if (ref == NULL)
		return -ENOMEM;
1316

1317 1318 1319
	ref->name = strdup(symbol_name);
	if (ref->name == NULL) {
		free(ref);
1320
		return -ENOMEM;
1321
	}
1322

1323
	bracket = strchr(ref->name, ']');
1324 1325 1326
	if (bracket)
		*bracket = '\0';

1327
	ref->addr = addr;
1328 1329

	for (i = 0; i < MAP__NR_TYPES; ++i) {
1330 1331
		struct kmap *kmap = map__kmap(maps[i]);
		kmap->ref_reloc_sym = ref;
1332 1333
	}

1334 1335
	return 0;
}
1336 1337 1338

size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp)
{
1339
	return machines__fprintf_dsos(&self->machines, fp);
1340
}
1341 1342

size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp,
1343
					  bool (skip)(struct dso *dso, int parm), int parm)
1344
{
1345
	return machines__fprintf_dsos_buildid(&self->machines, fp, skip, parm);
1346
}
1347 1348 1349 1350 1351 1352

size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
{
	struct perf_evsel *pos;
	size_t ret = fprintf(fp, "Aggregated stats:\n");

1353
	ret += events_stats__fprintf(&session->stats, fp);
1354 1355

	list_for_each_entry(pos, &session->evlist->entries, node) {
1356
		ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
1357
		ret += events_stats__fprintf(&pos->hists.stats, fp);
1358 1359 1360 1361
	}

	return ret;
}
1362

1363 1364 1365 1366 1367 1368
size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
{
	/*
	 * FIXME: Here we have to actually print all the machines in this
	 * session, not just the host...
	 */
1369
	return machine__fprintf(&session->machines.host, fp);
1370 1371
}

1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383
struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
					      unsigned int type)
{
	struct perf_evsel *pos;

	list_for_each_entry(pos, &session->evlist->entries, node) {
		if (pos->attr.type == type)
			return pos;
	}
	return NULL;
}

1384 1385 1386
void perf_evsel__print_ip(struct perf_evsel *evsel, union perf_event *event,
			  struct perf_sample *sample, struct machine *machine,
			  int print_sym, int print_dso, int print_symoffset)
1387 1388 1389 1390
{
	struct addr_location al;
	struct callchain_cursor_node *node;

1391
	if (perf_event__preprocess_sample(event, machine, &al, sample,
1392 1393 1394 1395 1396 1397 1398 1399
					  NULL) < 0) {
		error("problem processing %d event, skipping it.\n",
			event->header.type);
		return;
	}

	if (symbol_conf.use_callchain && sample->callchain) {

1400 1401 1402

		if (machine__resolve_callchain(machine, evsel, al.thread,
					       sample, NULL) != 0) {
1403 1404 1405 1406
			if (verbose)
				error("Failed to resolve callchain. Skipping\n");
			return;
		}
1407
		callchain_cursor_commit(&callchain_cursor);
1408 1409

		while (1) {
1410
			node = callchain_cursor_current(&callchain_cursor);
1411 1412 1413
			if (!node)
				break;

1414 1415
			printf("\t%16" PRIx64, node->ip);
			if (print_sym) {
1416 1417
				printf(" ");
				symbol__fprintf_symname(node->sym, stdout);
1418 1419
			}
			if (print_dso) {
1420
				printf(" (");
1421
				map__fprintf_dsoname(node->map, stdout);
1422
				printf(")");
1423 1424
			}
			printf("\n");
1425

1426
			callchain_cursor_advance(&callchain_cursor);
1427 1428 1429
		}

	} else {
1430
		printf("%16" PRIx64, sample->ip);
1431
		if (print_sym) {
1432
			printf(" ");
1433 1434 1435 1436 1437
			if (print_symoffset)
				symbol__fprintf_symname_offs(al.sym, &al,
							     stdout);
			else
				symbol__fprintf_symname(al.sym, stdout);
1438 1439 1440
		}

		if (print_dso) {
1441 1442 1443
			printf(" (");
			map__fprintf_dsoname(al.map, stdout);
			printf(")");
1444
		}
1445 1446
	}
}
1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468

int perf_session__cpu_bitmap(struct perf_session *session,
			     const char *cpu_list, unsigned long *cpu_bitmap)
{
	int i;
	struct cpu_map *map;

	for (i = 0; i < PERF_TYPE_MAX; ++i) {
		struct perf_evsel *evsel;

		evsel = perf_session__find_first_evtype(session, i);
		if (!evsel)
			continue;

		if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) {
			pr_err("File does not contain CPU events. "
			       "Remove -c option to proceed.\n");
			return -1;
		}
	}

	map = cpu_map__new(cpu_list);
1469 1470 1471 1472
	if (map == NULL) {
		pr_err("Invalid cpu_list\n");
		return -1;
	}
1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487

	for (i = 0; i < map->nr; i++) {
		int cpu = map->map[i];

		if (cpu >= MAX_NR_CPUS) {
			pr_err("Requested CPU %d too large. "
			       "Consider raising MAX_NR_CPUS\n", cpu);
			return -1;
		}

		set_bit(cpu, cpu_bitmap);
	}

	return 0;
}
1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506

void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
				bool full)
{
	struct stat st;
	int ret;

	if (session == NULL || fp == NULL)
		return;

	ret = fstat(session->fd, &st);
	if (ret == -1)
		return;

	fprintf(fp, "# ========\n");
	fprintf(fp, "# captured on: %s", ctime(&st.st_ctime));
	perf_header__fprintf_info(session, fp, full);
	fprintf(fp, "# ========\n#\n");
}
1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561


int __perf_session__set_tracepoints_handlers(struct perf_session *session,
					     const struct perf_evsel_str_handler *assocs,
					     size_t nr_assocs)
{
	struct perf_evlist *evlist = session->evlist;
	struct event_format *format;
	struct perf_evsel *evsel;
	char *tracepoint, *name;
	size_t i;
	int err;

	for (i = 0; i < nr_assocs; i++) {
		err = -ENOMEM;
		tracepoint = strdup(assocs[i].name);
		if (tracepoint == NULL)
			goto out;

		err = -ENOENT;
		name = strchr(tracepoint, ':');
		if (name == NULL)
			goto out_free;

		*name++ = '\0';
		format = pevent_find_event_by_name(session->pevent,
						   tracepoint, name);
		if (format == NULL) {
			/*
			 * Adding a handler for an event not in the session,
			 * just ignore it.
			 */
			goto next;
		}

		evsel = perf_evlist__find_tracepoint_by_id(evlist, format->id);
		if (evsel == NULL)
			goto next;

		err = -EEXIST;
		if (evsel->handler.func != NULL)
			goto out_free;
		evsel->handler.func = assocs[i].handler;
next:
		free(tracepoint);
	}

	err = 0;
out:
	return err;

out_free:
	free(tracepoint);
	goto out;
}