session.c 43.6 KB
Newer Older
1
#include <linux/kernel.h>
2
#include <traceevent/event-parse.h>
3

4
#include <byteswap.h>
5 6
#include <unistd.h>
#include <sys/types.h>
7
#include <sys/mman.h>
8

9 10
#include "evlist.h"
#include "evsel.h"
11
#include "session.h"
12
#include "tool.h"
13
#include "sort.h"
14
#include "util.h"
15
#include "cpumap.h"
16
#include "perf_regs.h"
17
#include "vdso.h"
18 19 20 21 22

static int perf_session__open(struct perf_session *self, bool force)
{
	struct stat input_stat;

23 24 25 26
	if (!strcmp(self->filename, "-")) {
		self->fd_pipe = true;
		self->fd = STDIN_FILENO;

27
		if (perf_session__read_header(self) < 0)
28
			pr_err("incompatible file format (rerun with -v to learn more)");
29 30 31 32

		return 0;
	}

33
	self->fd = open(self->filename, O_RDONLY);
34
	if (self->fd < 0) {
35 36 37 38
		int err = errno;

		pr_err("failed to open %s: %s", self->filename, strerror(err));
		if (err == ENOENT && !strcmp(self->filename, "perf.data"))
39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
			pr_err("  (try 'perf record' first)");
		pr_err("\n");
		return -errno;
	}

	if (fstat(self->fd, &input_stat) < 0)
		goto out_close;

	if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
		pr_err("file %s not owned by current user or root\n",
		       self->filename);
		goto out_close;
	}

	if (!input_stat.st_size) {
		pr_info("zero-sized file (%s), nothing to do!\n",
			self->filename);
		goto out_close;
	}

59
	if (perf_session__read_header(self) < 0) {
60
		pr_err("incompatible file format (rerun with -v to learn more)");
61 62 63
		goto out_close;
	}

64 65 66 67 68 69 70 71 72 73
	if (!perf_evlist__valid_sample_type(self->evlist)) {
		pr_err("non matching sample_type");
		goto out_close;
	}

	if (!perf_evlist__valid_sample_id_all(self->evlist)) {
		pr_err("non matching sample_id_all");
		goto out_close;
	}

74 75 76 77 78
	if (!perf_evlist__valid_read_format(self->evlist)) {
		pr_err("non matching read_format");
		goto out_close;
	}

79 80 81 82 83 84 85 86 87
	self->size = input_stat.st_size;
	return 0;

out_close:
	close(self->fd);
	self->fd = -1;
	return -1;
}

88
void perf_session__set_id_hdr_size(struct perf_session *session)
89
{
90 91 92
	u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);

	machines__set_id_hdr_size(&session->machines, id_hdr_size);
93 94
}

95 96
int perf_session__create_kernel_maps(struct perf_session *self)
{
97
	int ret = machine__create_kernel_maps(&self->machines.host);
98 99

	if (ret >= 0)
100
		ret = machines__create_guest_kernel_maps(&self->machines);
101 102 103
	return ret;
}

104 105
static void perf_session__destroy_kernel_maps(struct perf_session *self)
{
106
	machines__destroy_kernel_maps(&self->machines);
107 108
}

109 110
struct perf_session *perf_session__new(const char *filename, int mode,
				       bool force, bool repipe,
111
				       struct perf_tool *tool)
112
{
113 114 115 116 117 118 119 120 121 122 123 124 125
	struct perf_session *self;
	struct stat st;
	size_t len;

	if (!filename || !strlen(filename)) {
		if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode))
			filename = "-";
		else
			filename = "perf.data";
	}

	len = strlen(filename);
	self = zalloc(sizeof(*self) + len);
126 127 128 129 130

	if (self == NULL)
		goto out;

	memcpy(self->filename, filename, len);
T
Tom Zanussi 已提交
131
	self->repipe = repipe;
132
	INIT_LIST_HEAD(&self->ordered_samples.samples);
133
	INIT_LIST_HEAD(&self->ordered_samples.sample_cache);
134
	INIT_LIST_HEAD(&self->ordered_samples.to_free);
135
	machines__init(&self->machines);
136

137 138 139
	if (mode == O_RDONLY) {
		if (perf_session__open(self, force) < 0)
			goto out_delete;
140
		perf_session__set_id_hdr_size(self);
141 142 143
	} else if (mode == O_WRONLY) {
		/*
		 * In O_RDONLY mode this will be performed when reading the
144
		 * kernel MMAP event, in perf_event__process_mmap().
145 146 147 148
		 */
		if (perf_session__create_kernel_maps(self) < 0)
			goto out_delete;
	}
149

150
	if (tool && tool->ordering_requires_timestamps &&
151
	    tool->ordered_samples && !perf_evlist__sample_id_all(self->evlist)) {
152
		dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
153
		tool->ordered_samples = false;
154 155
	}

156 157
out:
	return self;
158 159 160
out_delete:
	perf_session__delete(self);
	return NULL;
161 162
}

163 164
static void perf_session__delete_dead_threads(struct perf_session *session)
{
165
	machine__delete_dead_threads(&session->machines.host);
166 167 168 169
}

static void perf_session__delete_threads(struct perf_session *session)
{
170
	machine__delete_threads(&session->machines.host);
171 172
}

173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
static void perf_session_env__delete(struct perf_session_env *env)
{
	free(env->hostname);
	free(env->os_release);
	free(env->version);
	free(env->arch);
	free(env->cpu_desc);
	free(env->cpuid);

	free(env->cmdline);
	free(env->sibling_cores);
	free(env->sibling_threads);
	free(env->numa_nodes);
	free(env->pmu_mappings);
}

189 190
void perf_session__delete(struct perf_session *self)
{
191
	perf_session__destroy_kernel_maps(self);
192 193
	perf_session__delete_dead_threads(self);
	perf_session__delete_threads(self);
194
	perf_session_env__delete(&self->header.env);
195
	machines__exit(&self->machines);
196 197
	close(self->fd);
	free(self);
198
	vdso__exit();
199
}
200

201 202 203
static int process_event_synth_tracing_data_stub(struct perf_tool *tool
						 __maybe_unused,
						 union perf_event *event
204 205 206
						 __maybe_unused,
						 struct perf_session *session
						__maybe_unused)
207 208 209 210 211
{
	dump_printf(": unhandled!\n");
	return 0;
}

212 213
static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
					 union perf_event *event __maybe_unused,
214 215
					 struct perf_evlist **pevlist
					 __maybe_unused)
216 217 218 219 220
{
	dump_printf(": unhandled!\n");
	return 0;
}

221 222 223 224 225
static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
				     union perf_event *event __maybe_unused,
				     struct perf_sample *sample __maybe_unused,
				     struct perf_evsel *evsel __maybe_unused,
				     struct machine *machine __maybe_unused)
226 227 228 229 230
{
	dump_printf(": unhandled!\n");
	return 0;
}

231 232 233 234
static int process_event_stub(struct perf_tool *tool __maybe_unused,
			      union perf_event *event __maybe_unused,
			      struct perf_sample *sample __maybe_unused,
			      struct machine *machine __maybe_unused)
235 236 237 238 239
{
	dump_printf(": unhandled!\n");
	return 0;
}

240 241 242 243
static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
				       union perf_event *event __maybe_unused,
				       struct perf_session *perf_session
				       __maybe_unused)
244 245 246 247 248
{
	dump_printf(": unhandled!\n");
	return 0;
}

249
static int process_finished_round(struct perf_tool *tool,
250 251
				  union perf_event *event,
				  struct perf_session *session);
252

253
void perf_tool__fill_defaults(struct perf_tool *tool)
254
{
255 256 257 258
	if (tool->sample == NULL)
		tool->sample = process_event_sample_stub;
	if (tool->mmap == NULL)
		tool->mmap = process_event_stub;
259 260
	if (tool->mmap2 == NULL)
		tool->mmap2 = process_event_stub;
261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283
	if (tool->comm == NULL)
		tool->comm = process_event_stub;
	if (tool->fork == NULL)
		tool->fork = process_event_stub;
	if (tool->exit == NULL)
		tool->exit = process_event_stub;
	if (tool->lost == NULL)
		tool->lost = perf_event__process_lost;
	if (tool->read == NULL)
		tool->read = process_event_sample_stub;
	if (tool->throttle == NULL)
		tool->throttle = process_event_stub;
	if (tool->unthrottle == NULL)
		tool->unthrottle = process_event_stub;
	if (tool->attr == NULL)
		tool->attr = process_event_synth_attr_stub;
	if (tool->tracing_data == NULL)
		tool->tracing_data = process_event_synth_tracing_data_stub;
	if (tool->build_id == NULL)
		tool->build_id = process_finished_round_stub;
	if (tool->finished_round == NULL) {
		if (tool->ordered_samples)
			tool->finished_round = process_finished_round;
284
		else
285
			tool->finished_round = process_finished_round_stub;
286
	}
287
}
288 289 290 291 292 293 294 295 296 297
 
void mem_bswap_32(void *src, int byte_size)
{
	u32 *m = src;
	while (byte_size > 0) {
		*m = bswap_32(*m);
		byte_size -= sizeof(u32);
		++m;
	}
}
298

299 300 301 302 303 304 305 306 307 308 309
void mem_bswap_64(void *src, int byte_size)
{
	u64 *m = src;

	while (byte_size > 0) {
		*m = bswap_64(*m);
		byte_size -= sizeof(u64);
		++m;
	}
}

310 311 312 313 314 315 316 317 318 319
static void swap_sample_id_all(union perf_event *event, void *data)
{
	void *end = (void *) event + event->header.size;
	int size = end - data;

	BUG_ON(size % sizeof(u64));
	mem_bswap_64(data, size);
}

static void perf_event__all64_swap(union perf_event *event,
320
				   bool sample_id_all __maybe_unused)
321
{
322 323
	struct perf_event_header *hdr = &event->header;
	mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
324 325
}

326
static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
327
{
328 329
	event->comm.pid = bswap_32(event->comm.pid);
	event->comm.tid = bswap_32(event->comm.tid);
330 331 332 333

	if (sample_id_all) {
		void *data = &event->comm.comm;

334
		data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
335 336
		swap_sample_id_all(event, data);
	}
337 338
}

339 340
static void perf_event__mmap_swap(union perf_event *event,
				  bool sample_id_all)
341
{
342 343 344 345 346
	event->mmap.pid	  = bswap_32(event->mmap.pid);
	event->mmap.tid	  = bswap_32(event->mmap.tid);
	event->mmap.start = bswap_64(event->mmap.start);
	event->mmap.len	  = bswap_64(event->mmap.len);
	event->mmap.pgoff = bswap_64(event->mmap.pgoff);
347 348 349 350

	if (sample_id_all) {
		void *data = &event->mmap.filename;

351
		data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
352 353
		swap_sample_id_all(event, data);
	}
354 355
}

356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374
static void perf_event__mmap2_swap(union perf_event *event,
				  bool sample_id_all)
{
	event->mmap2.pid   = bswap_32(event->mmap2.pid);
	event->mmap2.tid   = bswap_32(event->mmap2.tid);
	event->mmap2.start = bswap_64(event->mmap2.start);
	event->mmap2.len   = bswap_64(event->mmap2.len);
	event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
	event->mmap2.maj   = bswap_32(event->mmap2.maj);
	event->mmap2.min   = bswap_32(event->mmap2.min);
	event->mmap2.ino   = bswap_64(event->mmap2.ino);

	if (sample_id_all) {
		void *data = &event->mmap2.filename;

		data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
		swap_sample_id_all(event, data);
	}
}
375
static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
376
{
377 378 379 380 381
	event->fork.pid	 = bswap_32(event->fork.pid);
	event->fork.tid	 = bswap_32(event->fork.tid);
	event->fork.ppid = bswap_32(event->fork.ppid);
	event->fork.ptid = bswap_32(event->fork.ptid);
	event->fork.time = bswap_64(event->fork.time);
382 383 384

	if (sample_id_all)
		swap_sample_id_all(event, &event->fork + 1);
385 386
}

387
static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
388
{
389 390 391 392 393 394
	event->read.pid		 = bswap_32(event->read.pid);
	event->read.tid		 = bswap_32(event->read.tid);
	event->read.value	 = bswap_64(event->read.value);
	event->read.time_enabled = bswap_64(event->read.time_enabled);
	event->read.time_running = bswap_64(event->read.time_running);
	event->read.id		 = bswap_64(event->read.id);
395 396 397

	if (sample_id_all)
		swap_sample_id_all(event, &event->read + 1);
398 399
}

400 401 402 403 404 405 406 407 408 409 410
static void perf_event__throttle_swap(union perf_event *event,
				      bool sample_id_all)
{
	event->throttle.time	  = bswap_64(event->throttle.time);
	event->throttle.id	  = bswap_64(event->throttle.id);
	event->throttle.stream_id = bswap_64(event->throttle.stream_id);

	if (sample_id_all)
		swap_sample_id_all(event, &event->throttle + 1);
}

411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442
static u8 revbyte(u8 b)
{
	int rev = (b >> 4) | ((b & 0xf) << 4);
	rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
	rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
	return (u8) rev;
}

/*
 * XXX this is hack in attempt to carry flags bitfield
 * throught endian village. ABI says:
 *
 * Bit-fields are allocated from right to left (least to most significant)
 * on little-endian implementations and from left to right (most to least
 * significant) on big-endian implementations.
 *
 * The above seems to be byte specific, so we need to reverse each
 * byte of the bitfield. 'Internet' also says this might be implementation
 * specific and we probably need proper fix and carry perf_event_attr
 * bitfield flags in separate data file FEAT_ section. Thought this seems
 * to work for now.
 */
static void swap_bitfield(u8 *p, unsigned len)
{
	unsigned i;

	for (i = 0; i < len; i++) {
		*p = revbyte(*p);
		p++;
	}
}

443 444 445 446 447 448 449 450 451 452 453 454 455
/* exported for swapping attributes in file header */
void perf_event__attr_swap(struct perf_event_attr *attr)
{
	attr->type		= bswap_32(attr->type);
	attr->size		= bswap_32(attr->size);
	attr->config		= bswap_64(attr->config);
	attr->sample_period	= bswap_64(attr->sample_period);
	attr->sample_type	= bswap_64(attr->sample_type);
	attr->read_format	= bswap_64(attr->read_format);
	attr->wakeup_events	= bswap_32(attr->wakeup_events);
	attr->bp_type		= bswap_32(attr->bp_type);
	attr->bp_addr		= bswap_64(attr->bp_addr);
	attr->bp_len		= bswap_64(attr->bp_len);
456 457 458
	attr->branch_sample_type = bswap_64(attr->branch_sample_type);
	attr->sample_regs_user	 = bswap_64(attr->sample_regs_user);
	attr->sample_stack_user  = bswap_32(attr->sample_stack_user);
459 460

	swap_bitfield((u8 *) (&attr->read_format + 1), sizeof(u64));
461 462
}

463
static void perf_event__hdr_attr_swap(union perf_event *event,
464
				      bool sample_id_all __maybe_unused)
465 466 467
{
	size_t size;

468
	perf_event__attr_swap(&event->attr.attr);
469

470 471 472
	size = event->header.size;
	size -= (void *)&event->attr.id - (void *)event;
	mem_bswap_64(event->attr.id, size);
473 474
}

475
static void perf_event__event_type_swap(union perf_event *event,
476
					bool sample_id_all __maybe_unused)
477
{
478 479
	event->event_type.event_type.event_id =
		bswap_64(event->event_type.event_type.event_id);
480 481
}

482
static void perf_event__tracing_data_swap(union perf_event *event,
483
					  bool sample_id_all __maybe_unused)
484
{
485
	event->tracing_data.size = bswap_32(event->tracing_data.size);
486 487
}

488 489
typedef void (*perf_event__swap_op)(union perf_event *event,
				    bool sample_id_all);
490

491 492
static perf_event__swap_op perf_event__swap_ops[] = {
	[PERF_RECORD_MMAP]		  = perf_event__mmap_swap,
493
	[PERF_RECORD_MMAP2]		  = perf_event__mmap2_swap,
494 495 496 497 498
	[PERF_RECORD_COMM]		  = perf_event__comm_swap,
	[PERF_RECORD_FORK]		  = perf_event__task_swap,
	[PERF_RECORD_EXIT]		  = perf_event__task_swap,
	[PERF_RECORD_LOST]		  = perf_event__all64_swap,
	[PERF_RECORD_READ]		  = perf_event__read_swap,
499 500
	[PERF_RECORD_THROTTLE]		  = perf_event__throttle_swap,
	[PERF_RECORD_UNTHROTTLE]	  = perf_event__throttle_swap,
501
	[PERF_RECORD_SAMPLE]		  = perf_event__all64_swap,
502
	[PERF_RECORD_HEADER_ATTR]	  = perf_event__hdr_attr_swap,
503 504 505 506
	[PERF_RECORD_HEADER_EVENT_TYPE]	  = perf_event__event_type_swap,
	[PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
	[PERF_RECORD_HEADER_BUILD_ID]	  = NULL,
	[PERF_RECORD_HEADER_MAX]	  = NULL,
507 508
};

509 510
struct sample_queue {
	u64			timestamp;
511
	u64			file_offset;
512
	union perf_event	*event;
513 514 515
	struct list_head	list;
};

516 517 518 519
static void perf_session_free_sample_buffers(struct perf_session *session)
{
	struct ordered_samples *os = &session->ordered_samples;

520
	while (!list_empty(&os->to_free)) {
521 522
		struct sample_queue *sq;

523
		sq = list_entry(os->to_free.next, struct sample_queue, list);
524 525 526 527 528
		list_del(&sq->list);
		free(sq);
	}
}

529
static int perf_session_deliver_event(struct perf_session *session,
530
				      union perf_event *event,
531
				      struct perf_sample *sample,
532
				      struct perf_tool *tool,
533
				      u64 file_offset);
534

535
static int flush_sample_queue(struct perf_session *s,
536
		       struct perf_tool *tool)
537
{
538 539
	struct ordered_samples *os = &s->ordered_samples;
	struct list_head *head = &os->samples;
540
	struct sample_queue *tmp, *iter;
541
	struct perf_sample sample;
542 543
	u64 limit = os->next_flush;
	u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL;
544
	unsigned idx = 0, progress_next = os->nr_samples / 16;
545
	bool show_progress = limit == ULLONG_MAX;
546
	int ret;
547

548
	if (!tool->ordered_samples || !limit)
549
		return 0;
550 551

	list_for_each_entry_safe(iter, tmp, head, list) {
552 553 554
		if (session_done())
			return 0;

555
		if (iter->timestamp > limit)
556
			break;
557

558
		ret = perf_evlist__parse_sample(s->evlist, iter->event, &sample);
559 560
		if (ret)
			pr_err("Can't parse sample, err = %d\n", ret);
561 562 563 564 565 566
		else {
			ret = perf_session_deliver_event(s, iter->event, &sample, tool,
							 iter->file_offset);
			if (ret)
				return ret;
		}
567

568
		os->last_flush = iter->timestamp;
569
		list_del(&iter->list);
570
		list_add(&iter->list, &os->sample_cache);
571
		if (show_progress && (++idx >= progress_next)) {
572 573 574 575
			progress_next += os->nr_samples / 16;
			ui_progress__update(idx, os->nr_samples,
					    "Processing time ordered events...");
		}
576
	}
577 578 579 580 581 582 583

	if (list_empty(head)) {
		os->last_sample = NULL;
	} else if (last_ts <= limit) {
		os->last_sample =
			list_entry(head->prev, struct sample_queue, list);
	}
584 585

	os->nr_samples = 0;
586 587

	return 0;
588 589
}

590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628
/*
 * When perf record finishes a pass on every buffers, it records this pseudo
 * event.
 * We record the max timestamp t found in the pass n.
 * Assuming these timestamps are monotonic across cpus, we know that if
 * a buffer still has events with timestamps below t, they will be all
 * available and then read in the pass n + 1.
 * Hence when we start to read the pass n + 2, we can safely flush every
 * events with timestamps below t.
 *
 *    ============ PASS n =================
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          1          |         2
 *          2          |         3
 *          -          |         4  <--- max recorded
 *
 *    ============ PASS n + 1 ==============
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          3          |         5
 *          4          |         6
 *          5          |         7 <---- max recorded
 *
 *      Flush every events below timestamp 4
 *
 *    ============ PASS n + 2 ==============
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          6          |         8
 *          7          |         9
 *          -          |         10
 *
 *      Flush every events below timestamp 7
 *      etc...
 */
629
static int process_finished_round(struct perf_tool *tool,
630
				  union perf_event *event __maybe_unused,
631
				  struct perf_session *session)
632
{
633 634 635
	int ret = flush_sample_queue(session, tool);
	if (!ret)
		session->ordered_samples.next_flush = session->ordered_samples.max_timestamp;
636

637
	return ret;
638 639
}

640
/* The queue is ordered by time */
641
static void __queue_event(struct sample_queue *new, struct perf_session *s)
642
{
643 644 645 646
	struct ordered_samples *os = &s->ordered_samples;
	struct sample_queue *sample = os->last_sample;
	u64 timestamp = new->timestamp;
	struct list_head *p;
647

648
	++os->nr_samples;
649
	os->last_sample = new;
650

651 652 653
	if (!sample) {
		list_add(&new->list, &os->samples);
		os->max_timestamp = timestamp;
654 655 656 657
		return;
	}

	/*
658 659 660
	 * last_sample might point to some random place in the list as it's
	 * the last queued event. We expect that the new event is close to
	 * this.
661
	 */
662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683
	if (sample->timestamp <= timestamp) {
		while (sample->timestamp <= timestamp) {
			p = sample->list.next;
			if (p == &os->samples) {
				list_add_tail(&new->list, &os->samples);
				os->max_timestamp = timestamp;
				return;
			}
			sample = list_entry(p, struct sample_queue, list);
		}
		list_add_tail(&new->list, &sample->list);
	} else {
		while (sample->timestamp > timestamp) {
			p = sample->list.prev;
			if (p == &os->samples) {
				list_add(&new->list, &os->samples);
				return;
			}
			sample = list_entry(p, struct sample_queue, list);
		}
		list_add(&new->list, &sample->list);
	}
684 685
}

686 687
#define MAX_SAMPLE_BUFFER	(64 * 1024 / sizeof(struct sample_queue))

688
int perf_session_queue_event(struct perf_session *s, union perf_event *event,
689
				    struct perf_sample *sample, u64 file_offset)
690
{
691 692
	struct ordered_samples *os = &s->ordered_samples;
	struct list_head *sc = &os->sample_cache;
693
	u64 timestamp = sample->time;
694 695
	struct sample_queue *new;

696
	if (!timestamp || timestamp == ~0ULL)
697 698
		return -ETIME;

699 700 701 702 703
	if (timestamp < s->ordered_samples.last_flush) {
		printf("Warning: Timestamp below last timeslice flush\n");
		return -EINVAL;
	}

704 705 706
	if (!list_empty(sc)) {
		new = list_entry(sc->next, struct sample_queue, list);
		list_del(&new->list);
707 708 709 710
	} else if (os->sample_buffer) {
		new = os->sample_buffer + os->sample_buffer_idx;
		if (++os->sample_buffer_idx == MAX_SAMPLE_BUFFER)
			os->sample_buffer = NULL;
711
	} else {
712 713
		os->sample_buffer = malloc(MAX_SAMPLE_BUFFER * sizeof(*new));
		if (!os->sample_buffer)
714
			return -ENOMEM;
715 716 717
		list_add(&os->sample_buffer->list, &os->to_free);
		os->sample_buffer_idx = 2;
		new = os->sample_buffer + 1;
718
	}
719 720

	new->timestamp = timestamp;
721
	new->file_offset = file_offset;
722
	new->event = event;
723

724
	__queue_event(new, s);
725 726 727

	return 0;
}
728

729
static void callchain__printf(struct perf_sample *sample)
730 731
{
	unsigned int i;
732

733
	printf("... chain: nr:%" PRIu64 "\n", sample->callchain->nr);
734 735

	for (i = 0; i < sample->callchain->nr; i++)
736 737
		printf("..... %2d: %016" PRIx64 "\n",
		       i, sample->callchain->ips[i]);
738 739
}

740 741 742 743 744 745 746 747 748 749 750 751
static void branch_stack__printf(struct perf_sample *sample)
{
	uint64_t i;

	printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr);

	for (i = 0; i < sample->branch_stack->nr; i++)
		printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 "\n",
			i, sample->branch_stack->entries[i].from,
			sample->branch_stack->entries[i].to);
}

752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779
static void regs_dump__printf(u64 mask, u64 *regs)
{
	unsigned rid, i = 0;

	for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
		u64 val = regs[i++];

		printf(".... %-5s 0x%" PRIx64 "\n",
		       perf_reg_name(rid), val);
	}
}

static void regs_user__printf(struct perf_sample *sample, u64 mask)
{
	struct regs_dump *user_regs = &sample->user_regs;

	if (user_regs->regs) {
		printf("... user regs: mask 0x%" PRIx64 "\n", mask);
		regs_dump__printf(mask, user_regs->regs);
	}
}

static void stack_user__printf(struct stack_dump *dump)
{
	printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
	       dump->size, dump->offset);
}

780
static void perf_session__print_tstamp(struct perf_session *session,
781
				       union perf_event *event,
782
				       struct perf_sample *sample)
783
{
784
	u64 sample_type = __perf_evlist__combined_sample_type(session->evlist);
785

786
	if (event->header.type != PERF_RECORD_SAMPLE &&
787
	    !perf_evlist__sample_id_all(session->evlist)) {
788 789 790 791
		fputs("-1 -1 ", stdout);
		return;
	}

792
	if ((sample_type & PERF_SAMPLE_CPU))
793 794
		printf("%u ", sample->cpu);

795
	if (sample_type & PERF_SAMPLE_TIME)
796
		printf("%" PRIu64 " ", sample->time);
797 798
}

799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828
static void sample_read__printf(struct perf_sample *sample, u64 read_format)
{
	printf("... sample_read:\n");

	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
		printf("...... time enabled %016" PRIx64 "\n",
		       sample->read.time_enabled);

	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
		printf("...... time running %016" PRIx64 "\n",
		       sample->read.time_running);

	if (read_format & PERF_FORMAT_GROUP) {
		u64 i;

		printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);

		for (i = 0; i < sample->read.group.nr; i++) {
			struct sample_read_value *value;

			value = &sample->read.group.values[i];
			printf("..... id %016" PRIx64
			       ", value %016" PRIx64 "\n",
			       value->id, value->value);
		}
	} else
		printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
			sample->read.one.id, sample->read.one.value);
}

829
static void dump_event(struct perf_session *session, union perf_event *event,
830
		       u64 file_offset, struct perf_sample *sample)
831 832 833 834
{
	if (!dump_trace)
		return;

835 836
	printf("\n%#" PRIx64 " [%#x]: event: %d\n",
	       file_offset, event->header.size, event->header.type);
837 838 839 840 841 842

	trace_event(event);

	if (sample)
		perf_session__print_tstamp(session, event, sample);

843
	printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
844
	       event->header.size, perf_event__name(event->header.type));
845 846
}

847
static void dump_sample(struct perf_evsel *evsel, union perf_event *event,
848
			struct perf_sample *sample)
849
{
850 851
	u64 sample_type;

852 853 854
	if (!dump_trace)
		return;

855
	printf("(IP, %d): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
856
	       event->header.misc, sample->pid, sample->tid, sample->ip,
857
	       sample->period, sample->addr);
858

859
	sample_type = evsel->attr.sample_type;
860 861

	if (sample_type & PERF_SAMPLE_CALLCHAIN)
862
		callchain__printf(sample);
863

864
	if (sample_type & PERF_SAMPLE_BRANCH_STACK)
865
		branch_stack__printf(sample);
866 867 868 869 870 871

	if (sample_type & PERF_SAMPLE_REGS_USER)
		regs_user__printf(sample, evsel->attr.sample_regs_user);

	if (sample_type & PERF_SAMPLE_STACK_USER)
		stack_user__printf(&sample->user_stack);
872 873 874

	if (sample_type & PERF_SAMPLE_WEIGHT)
		printf("... weight: %" PRIu64 "\n", sample->weight);
875 876 877

	if (sample_type & PERF_SAMPLE_DATA_SRC)
		printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
878

879 880 881
	if (sample_type & PERF_SAMPLE_TRANSACTION)
		printf("... transaction: %" PRIx64 "\n", sample->transaction);

882 883
	if (sample_type & PERF_SAMPLE_READ)
		sample_read__printf(sample, evsel->attr.read_format);
884 885
}

886 887
static struct machine *
	perf_session__find_machine_for_cpumode(struct perf_session *session,
888 889
					       union perf_event *event,
					       struct perf_sample *sample)
890 891 892
{
	const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;

893 894 895
	if (perf_guest &&
	    ((cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
	     (cpumode == PERF_RECORD_MISC_GUEST_USER))) {
896 897
		u32 pid;

898 899
		if (event->header.type == PERF_RECORD_MMAP
		    || event->header.type == PERF_RECORD_MMAP2)
900 901
			pid = event->mmap.pid;
		else
902
			pid = sample->pid;
903

904
		return perf_session__findnew_machine(session, pid);
905
	}
906

907
	return &session->machines.host;
908 909
}

910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978
static int deliver_sample_value(struct perf_session *session,
				struct perf_tool *tool,
				union perf_event *event,
				struct perf_sample *sample,
				struct sample_read_value *v,
				struct machine *machine)
{
	struct perf_sample_id *sid;

	sid = perf_evlist__id2sid(session->evlist, v->id);
	if (sid) {
		sample->id     = v->id;
		sample->period = v->value - sid->period;
		sid->period    = v->value;
	}

	if (!sid || sid->evsel == NULL) {
		++session->stats.nr_unknown_id;
		return 0;
	}

	return tool->sample(tool, event, sample, sid->evsel, machine);
}

static int deliver_sample_group(struct perf_session *session,
				struct perf_tool *tool,
				union  perf_event *event,
				struct perf_sample *sample,
				struct machine *machine)
{
	int ret = -EINVAL;
	u64 i;

	for (i = 0; i < sample->read.group.nr; i++) {
		ret = deliver_sample_value(session, tool, event, sample,
					   &sample->read.group.values[i],
					   machine);
		if (ret)
			break;
	}

	return ret;
}

static int
perf_session__deliver_sample(struct perf_session *session,
			     struct perf_tool *tool,
			     union  perf_event *event,
			     struct perf_sample *sample,
			     struct perf_evsel *evsel,
			     struct machine *machine)
{
	/* We know evsel != NULL. */
	u64 sample_type = evsel->attr.sample_type;
	u64 read_format = evsel->attr.read_format;

	/* Standard sample delievery. */
	if (!(sample_type & PERF_SAMPLE_READ))
		return tool->sample(tool, event, sample, evsel, machine);

	/* For PERF_SAMPLE_READ we have either single or group mode. */
	if (read_format & PERF_FORMAT_GROUP)
		return deliver_sample_group(session, tool, event, sample,
					    machine);
	else
		return deliver_sample_value(session, tool, event, sample,
					    &sample->read.one, machine);
}

979
static int perf_session_deliver_event(struct perf_session *session,
980
				      union perf_event *event,
981
				      struct perf_sample *sample,
982
				      struct perf_tool *tool,
983
				      u64 file_offset)
984
{
985
	struct perf_evsel *evsel;
986
	struct machine *machine;
987

988 989
	dump_event(session, event, file_offset, sample);

990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007
	evsel = perf_evlist__id2evsel(session->evlist, sample->id);
	if (evsel != NULL && event->header.type != PERF_RECORD_SAMPLE) {
		/*
		 * XXX We're leaving PERF_RECORD_SAMPLE unnacounted here
		 * because the tools right now may apply filters, discarding
		 * some of the samples. For consistency, in the future we
		 * should have something like nr_filtered_samples and remove
		 * the sample->period from total_sample_period, etc, KISS for
		 * now tho.
		 *
		 * Also testing against NULL allows us to handle files without
		 * attr.sample_id_all and/or without PERF_SAMPLE_ID. In the
		 * future probably it'll be a good idea to restrict event
		 * processing via perf_session to files with both set.
		 */
		hists__inc_nr_events(&evsel->hists, event->header.type);
	}

1008 1009
	machine = perf_session__find_machine_for_cpumode(session, event,
							 sample);
1010

1011 1012
	switch (event->header.type) {
	case PERF_RECORD_SAMPLE:
1013
		dump_sample(evsel, event, sample);
1014
		if (evsel == NULL) {
1015
			++session->stats.nr_unknown_id;
1016
			return 0;
1017
		}
1018
		if (machine == NULL) {
1019
			++session->stats.nr_unprocessable_samples;
1020
			return 0;
1021
		}
1022 1023
		return perf_session__deliver_sample(session, tool, event,
						    sample, evsel, machine);
1024
	case PERF_RECORD_MMAP:
1025
		return tool->mmap(tool, event, sample, machine);
1026 1027
	case PERF_RECORD_MMAP2:
		return tool->mmap2(tool, event, sample, machine);
1028
	case PERF_RECORD_COMM:
1029
		return tool->comm(tool, event, sample, machine);
1030
	case PERF_RECORD_FORK:
1031
		return tool->fork(tool, event, sample, machine);
1032
	case PERF_RECORD_EXIT:
1033
		return tool->exit(tool, event, sample, machine);
1034
	case PERF_RECORD_LOST:
1035
		if (tool->lost == perf_event__process_lost)
1036
			session->stats.total_lost += event->lost.lost;
1037
		return tool->lost(tool, event, sample, machine);
1038
	case PERF_RECORD_READ:
1039
		return tool->read(tool, event, sample, evsel, machine);
1040
	case PERF_RECORD_THROTTLE:
1041
		return tool->throttle(tool, event, sample, machine);
1042
	case PERF_RECORD_UNTHROTTLE:
1043
		return tool->unthrottle(tool, event, sample, machine);
1044
	default:
1045
		++session->stats.nr_unknown_events;
1046 1047 1048 1049
		return -1;
	}
}

1050
static int perf_session__process_user_event(struct perf_session *session, union perf_event *event,
1051
					    struct perf_tool *tool, u64 file_offset)
1052
{
1053 1054
	int err;

1055
	dump_event(session, event, file_offset, NULL);
1056

1057
	/* These events are processed right away */
1058
	switch (event->header.type) {
1059
	case PERF_RECORD_HEADER_ATTR:
1060
		err = tool->attr(tool, event, &session->evlist);
1061
		if (err == 0)
1062
			perf_session__set_id_hdr_size(session);
1063
		return err;
1064 1065
	case PERF_RECORD_HEADER_TRACING_DATA:
		/* setup for reading amidst mmap */
1066
		lseek(session->fd, file_offset, SEEK_SET);
1067
		return tool->tracing_data(tool, event, session);
1068
	case PERF_RECORD_HEADER_BUILD_ID:
1069
		return tool->build_id(tool, event, session);
1070
	case PERF_RECORD_FINISHED_ROUND:
1071
		return tool->finished_round(tool, event, session);
1072
	default:
1073
		return -EINVAL;
1074
	}
1075 1076
}

1077 1078 1079 1080 1081 1082 1083 1084 1085
static void event_swap(union perf_event *event, bool sample_id_all)
{
	perf_event__swap_op swap;

	swap = perf_event__swap_ops[event->header.type];
	if (swap)
		swap(event, sample_id_all);
}

1086 1087 1088 1089
static int perf_session__process_event(struct perf_session *session,
				       union perf_event *event,
				       struct perf_tool *tool,
				       u64 file_offset)
1090
{
1091
	struct perf_sample sample;
1092 1093
	int ret;

1094
	if (session->header.needs_swap)
1095
		event_swap(event, perf_evlist__sample_id_all(session->evlist));
1096 1097 1098 1099

	if (event->header.type >= PERF_RECORD_HEADER_MAX)
		return -EINVAL;

1100
	events_stats__inc(&session->stats, event->header.type);
1101 1102

	if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1103
		return perf_session__process_user_event(session, event, tool, file_offset);
1104

1105 1106 1107
	/*
	 * For all kernel events we get the sample data
	 */
1108
	ret = perf_evlist__parse_sample(session->evlist, event, &sample);
1109 1110
	if (ret)
		return ret;
1111

1112
	if (tool->ordered_samples) {
1113 1114
		ret = perf_session_queue_event(session, event, &sample,
					       file_offset);
1115 1116 1117 1118
		if (ret != -ETIME)
			return ret;
	}

1119
	return perf_session_deliver_event(session, event, &sample, tool,
1120
					  file_offset);
1121 1122
}

1123 1124 1125 1126 1127 1128 1129
void perf_event_header__bswap(struct perf_event_header *self)
{
	self->type = bswap_32(self->type);
	self->misc = bswap_16(self->misc);
	self->size = bswap_16(self->size);
}

1130 1131
struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
{
1132
	return machine__findnew_thread(&session->machines.host, 0, pid);
1133 1134
}

1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146
static struct thread *perf_session__register_idle_thread(struct perf_session *self)
{
	struct thread *thread = perf_session__findnew(self, 0);

	if (thread == NULL || thread__set_comm(thread, "swapper")) {
		pr_err("problem inserting idle task.\n");
		thread = NULL;
	}

	return thread;
}

1147
static void perf_session__warn_about_errors(const struct perf_session *session,
1148
					    const struct perf_tool *tool)
1149
{
1150
	if (tool->lost == perf_event__process_lost &&
1151
	    session->stats.nr_events[PERF_RECORD_LOST] != 0) {
1152 1153
		ui__warning("Processed %d events and lost %d chunks!\n\n"
			    "Check IO/CPU overload!\n\n",
1154 1155
			    session->stats.nr_events[0],
			    session->stats.nr_events[PERF_RECORD_LOST]);
1156 1157
	}

1158
	if (session->stats.nr_unknown_events != 0) {
1159 1160 1161 1162 1163
		ui__warning("Found %u unknown events!\n\n"
			    "Is this an older tool processing a perf.data "
			    "file generated by a more recent tool?\n\n"
			    "If that is not the case, consider "
			    "reporting to linux-kernel@vger.kernel.org.\n\n",
1164
			    session->stats.nr_unknown_events);
1165 1166
	}

1167
	if (session->stats.nr_unknown_id != 0) {
1168
		ui__warning("%u samples with id not present in the header\n",
1169
			    session->stats.nr_unknown_id);
1170 1171
	}

1172
 	if (session->stats.nr_invalid_chains != 0) {
1173 1174 1175
 		ui__warning("Found invalid callchains!\n\n"
 			    "%u out of %u events were discarded for this reason.\n\n"
 			    "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1176 1177
 			    session->stats.nr_invalid_chains,
 			    session->stats.nr_events[PERF_RECORD_SAMPLE]);
1178
 	}
1179

1180
	if (session->stats.nr_unprocessable_samples != 0) {
1181 1182
		ui__warning("%u unprocessable samples recorded.\n"
			    "Do you have a KVM guest running and not using 'perf kvm'?\n",
1183
			    session->stats.nr_unprocessable_samples);
1184
	}
1185 1186
}

1187 1188 1189
volatile int session_done;

static int __perf_session__process_pipe_events(struct perf_session *self,
1190
					       struct perf_tool *tool)
1191
{
1192 1193 1194
	union perf_event *event;
	uint32_t size, cur_size = 0;
	void *buf = NULL;
1195 1196 1197 1198 1199
	int skip = 0;
	u64 head;
	int err;
	void *p;

1200
	perf_tool__fill_defaults(tool);
1201 1202

	head = 0;
1203 1204 1205 1206 1207
	cur_size = sizeof(union perf_event);

	buf = malloc(cur_size);
	if (!buf)
		return -errno;
1208
more:
1209 1210
	event = buf;
	err = readn(self->fd, event, sizeof(struct perf_event_header));
1211 1212 1213 1214 1215 1216 1217 1218 1219
	if (err <= 0) {
		if (err == 0)
			goto done;

		pr_err("failed to read event header\n");
		goto out_err;
	}

	if (self->header.needs_swap)
1220
		perf_event_header__bswap(&event->header);
1221

1222
	size = event->header.size;
1223 1224 1225 1226
	if (size < sizeof(struct perf_event_header)) {
		pr_err("bad event header size\n");
		goto out_err;
	}
1227

1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238
	if (size > cur_size) {
		void *new = realloc(buf, size);
		if (!new) {
			pr_err("failed to allocate memory to read event\n");
			goto out_err;
		}
		buf = new;
		cur_size = size;
		event = buf;
	}
	p = event;
1239 1240
	p += sizeof(struct perf_event_header);

1241
	if (size - sizeof(struct perf_event_header)) {
1242
		err = readn(self->fd, p, size - sizeof(struct perf_event_header));
1243 1244 1245 1246 1247
		if (err <= 0) {
			if (err == 0) {
				pr_err("unexpected end of event stream\n");
				goto done;
			}
1248

1249 1250 1251
			pr_err("failed to read event data\n");
			goto out_err;
		}
1252 1253
	}

1254
	if ((skip = perf_session__process_event(self, event, tool, head)) < 0) {
1255
		pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1256
		       head, event->header.size, event->header.type);
1257 1258
		err = -EINVAL;
		goto out_err;
1259 1260 1261 1262 1263 1264 1265 1266 1267 1268
	}

	head += size;

	if (skip > 0)
		head += skip;

	if (!session_done())
		goto more;
done:
1269 1270 1271
	/* do the final flush for ordered samples */
	self->ordered_samples.next_flush = ULLONG_MAX;
	err = flush_sample_queue(self, tool);
1272
out_err:
1273
	free(buf);
1274
	perf_session__warn_about_errors(self, tool);
1275
	perf_session_free_sample_buffers(self);
1276 1277 1278
	return err;
}

1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296
static union perf_event *
fetch_mmaped_event(struct perf_session *session,
		   u64 head, size_t mmap_size, char *buf)
{
	union perf_event *event;

	/*
	 * Ensure we have enough space remaining to read
	 * the size of the event in the headers.
	 */
	if (head + sizeof(event->header) > mmap_size)
		return NULL;

	event = (union perf_event *)(buf + head);

	if (session->header.needs_swap)
		perf_event_header__bswap(&event->header);

1297 1298 1299 1300
	if (head + event->header.size > mmap_size) {
		/* We're not fetching the event so swap back again */
		if (session->header.needs_swap)
			perf_event_header__bswap(&event->header);
1301
		return NULL;
1302
	}
1303 1304 1305 1306

	return event;
}

1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318
/*
 * On 64bit we can mmap the data file in one go. No need for tiny mmap
 * slices. On 32bit we use 32MB.
 */
#if BITS_PER_LONG == 64
#define MMAP_SIZE ULLONG_MAX
#define NUM_MMAPS 1
#else
#define MMAP_SIZE (32 * 1024 * 1024ULL)
#define NUM_MMAPS 128
#endif

1319
int __perf_session__process_events(struct perf_session *session,
1320
				   u64 data_offset, u64 data_size,
1321
				   u64 file_size, struct perf_tool *tool)
1322
{
1323
	u64 head, page_offset, file_offset, file_pos, progress_next;
1324
	int err, mmap_prot, mmap_flags, map_idx = 0;
1325
	size_t	mmap_size;
1326
	char *buf, *mmaps[NUM_MMAPS];
1327
	union perf_event *event;
1328
	uint32_t size;
1329

1330
	perf_tool__fill_defaults(tool);
1331

1332 1333 1334
	page_offset = page_size * (data_offset / page_size);
	file_offset = page_offset;
	head = data_offset - page_offset;
1335

1336
	if (data_size && (data_offset + data_size < file_size))
1337 1338
		file_size = data_offset + data_size;

1339 1340
	progress_next = file_size / 16;

1341
	mmap_size = MMAP_SIZE;
1342 1343 1344
	if (mmap_size > file_size)
		mmap_size = file_size;

1345 1346
	memset(mmaps, 0, sizeof(mmaps));

1347 1348 1349
	mmap_prot  = PROT_READ;
	mmap_flags = MAP_SHARED;

1350
	if (session->header.needs_swap) {
1351 1352 1353
		mmap_prot  |= PROT_WRITE;
		mmap_flags = MAP_PRIVATE;
	}
1354
remap:
1355 1356
	buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, session->fd,
		   file_offset);
1357 1358 1359 1360 1361
	if (buf == MAP_FAILED) {
		pr_err("failed to mmap file\n");
		err = -errno;
		goto out_err;
	}
1362 1363
	mmaps[map_idx] = buf;
	map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
1364
	file_pos = file_offset + head;
1365 1366

more:
1367 1368
	event = fetch_mmaped_event(session, head, mmap_size, buf);
	if (!event) {
1369 1370 1371 1372
		if (mmaps[map_idx]) {
			munmap(mmaps[map_idx], mmap_size);
			mmaps[map_idx] = NULL;
		}
1373

1374 1375 1376
		page_offset = page_size * (head / page_size);
		file_offset += page_offset;
		head -= page_offset;
1377 1378 1379 1380 1381
		goto remap;
	}

	size = event->header.size;

1382
	if (size < sizeof(struct perf_event_header) ||
1383
	    perf_session__process_event(session, event, tool, file_pos) < 0) {
1384 1385 1386 1387 1388
		pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
		       file_offset + head, event->header.size,
		       event->header.type);
		err = -EINVAL;
		goto out_err;
1389 1390 1391
	}

	head += size;
1392
	file_pos += size;
1393

1394 1395
	if (file_pos >= progress_next) {
		progress_next += file_size / 16;
1396 1397
		ui_progress__update(file_pos, file_size,
				    "Processing events...");
1398 1399
	}

1400
	if (session_done())
1401
		goto out;
1402

1403
	if (file_pos < file_size)
1404
		goto more;
1405

1406
out:
1407
	/* do the final flush for ordered samples */
1408
	session->ordered_samples.next_flush = ULLONG_MAX;
1409
	err = flush_sample_queue(session, tool);
1410
out_err:
N
Namhyung Kim 已提交
1411
	ui_progress__finish();
1412
	perf_session__warn_about_errors(session, tool);
1413
	perf_session_free_sample_buffers(session);
1414 1415
	return err;
}
1416

1417
int perf_session__process_events(struct perf_session *self,
1418
				 struct perf_tool *tool)
1419 1420 1421 1422 1423 1424
{
	int err;

	if (perf_session__register_idle_thread(self) == NULL)
		return -ENOMEM;

1425 1426 1427 1428
	if (!self->fd_pipe)
		err = __perf_session__process_events(self,
						     self->header.data_offset,
						     self->header.data_size,
1429
						     self->size, tool);
1430
	else
1431
		err = __perf_session__process_pipe_events(self, tool);
1432

1433 1434 1435
	return err;
}

1436
bool perf_session__has_traces(struct perf_session *session, const char *msg)
1437
{
1438 1439 1440 1441 1442
	struct perf_evsel *evsel;

	list_for_each_entry(evsel, &session->evlist->entries, node) {
		if (evsel->attr.type == PERF_TYPE_TRACEPOINT)
			return true;
1443 1444
	}

1445 1446
	pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
	return false;
1447
}
1448

1449 1450
int maps__set_kallsyms_ref_reloc_sym(struct map **maps,
				     const char *symbol_name, u64 addr)
1451 1452
{
	char *bracket;
1453
	enum map_type i;
1454 1455 1456 1457 1458
	struct ref_reloc_sym *ref;

	ref = zalloc(sizeof(struct ref_reloc_sym));
	if (ref == NULL)
		return -ENOMEM;
1459

1460 1461 1462
	ref->name = strdup(symbol_name);
	if (ref->name == NULL) {
		free(ref);
1463
		return -ENOMEM;
1464
	}
1465

1466
	bracket = strchr(ref->name, ']');
1467 1468 1469
	if (bracket)
		*bracket = '\0';

1470
	ref->addr = addr;
1471 1472

	for (i = 0; i < MAP__NR_TYPES; ++i) {
1473 1474
		struct kmap *kmap = map__kmap(maps[i]);
		kmap->ref_reloc_sym = ref;
1475 1476
	}

1477 1478
	return 0;
}
1479 1480 1481

size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp)
{
1482
	return machines__fprintf_dsos(&self->machines, fp);
1483
}
1484 1485

size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp,
1486
					  bool (skip)(struct dso *dso, int parm), int parm)
1487
{
1488
	return machines__fprintf_dsos_buildid(&self->machines, fp, skip, parm);
1489
}
1490 1491 1492 1493 1494 1495

size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
{
	struct perf_evsel *pos;
	size_t ret = fprintf(fp, "Aggregated stats:\n");

1496
	ret += events_stats__fprintf(&session->stats, fp);
1497 1498

	list_for_each_entry(pos, &session->evlist->entries, node) {
1499
		ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
1500
		ret += events_stats__fprintf(&pos->hists.stats, fp);
1501 1502 1503 1504
	}

	return ret;
}
1505

1506 1507 1508 1509 1510 1511
size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
{
	/*
	 * FIXME: Here we have to actually print all the machines in this
	 * session, not just the host...
	 */
1512
	return machine__fprintf(&session->machines.host, fp);
1513 1514
}

1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526
struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
					      unsigned int type)
{
	struct perf_evsel *pos;

	list_for_each_entry(pos, &session->evlist->entries, node) {
		if (pos->attr.type == type)
			return pos;
	}
	return NULL;
}

1527 1528
void perf_evsel__print_ip(struct perf_evsel *evsel, union perf_event *event,
			  struct perf_sample *sample, struct machine *machine,
1529
			  unsigned int print_opts, unsigned int stack_depth)
1530 1531 1532
{
	struct addr_location al;
	struct callchain_cursor_node *node;
1533 1534 1535 1536
	int print_ip = print_opts & PRINT_IP_OPT_IP;
	int print_sym = print_opts & PRINT_IP_OPT_SYM;
	int print_dso = print_opts & PRINT_IP_OPT_DSO;
	int print_symoffset = print_opts & PRINT_IP_OPT_SYMOFFSET;
1537 1538
	int print_oneline = print_opts & PRINT_IP_OPT_ONELINE;
	char s = print_oneline ? ' ' : '\t';
1539

1540
	if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) {
1541 1542 1543 1544 1545 1546 1547
		error("problem processing %d event, skipping it.\n",
			event->header.type);
		return;
	}

	if (symbol_conf.use_callchain && sample->callchain) {

1548
		if (machine__resolve_callchain(machine, evsel, al.thread,
1549
					       sample, NULL, NULL) != 0) {
1550 1551 1552 1553
			if (verbose)
				error("Failed to resolve callchain. Skipping\n");
			return;
		}
1554
		callchain_cursor_commit(&callchain_cursor);
1555

1556
		while (stack_depth) {
1557
			node = callchain_cursor_current(&callchain_cursor);
1558 1559 1560
			if (!node)
				break;

1561
			if (print_ip)
1562
				printf("%c%16" PRIx64, s, node->ip);
1563

1564
			if (print_sym) {
1565
				printf(" ");
1566 1567
				if (print_symoffset) {
					al.addr = node->ip;
1568
					al.map  = node->map;
1569 1570 1571
					symbol__fprintf_symname_offs(node->sym, &al, stdout);
				} else
					symbol__fprintf_symname(node->sym, stdout);
1572
			}
1573

1574
			if (print_dso) {
1575
				printf(" (");
1576
				map__fprintf_dsoname(node->map, stdout);
1577
				printf(")");
1578
			}
1579 1580 1581

			if (!print_oneline)
				printf("\n");
1582

1583
			callchain_cursor_advance(&callchain_cursor);
1584 1585

			stack_depth--;
1586 1587 1588
		}

	} else {
1589 1590 1591
		if (print_ip)
			printf("%16" PRIx64, sample->ip);

1592
		if (print_sym) {
1593
			printf(" ");
1594 1595 1596 1597 1598
			if (print_symoffset)
				symbol__fprintf_symname_offs(al.sym, &al,
							     stdout);
			else
				symbol__fprintf_symname(al.sym, stdout);
1599 1600 1601
		}

		if (print_dso) {
1602 1603 1604
			printf(" (");
			map__fprintf_dsoname(al.map, stdout);
			printf(")");
1605
		}
1606 1607
	}
}
1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629

int perf_session__cpu_bitmap(struct perf_session *session,
			     const char *cpu_list, unsigned long *cpu_bitmap)
{
	int i;
	struct cpu_map *map;

	for (i = 0; i < PERF_TYPE_MAX; ++i) {
		struct perf_evsel *evsel;

		evsel = perf_session__find_first_evtype(session, i);
		if (!evsel)
			continue;

		if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) {
			pr_err("File does not contain CPU events. "
			       "Remove -c option to proceed.\n");
			return -1;
		}
	}

	map = cpu_map__new(cpu_list);
1630 1631 1632 1633
	if (map == NULL) {
		pr_err("Invalid cpu_list\n");
		return -1;
	}
1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648

	for (i = 0; i < map->nr; i++) {
		int cpu = map->map[i];

		if (cpu >= MAX_NR_CPUS) {
			pr_err("Requested CPU %d too large. "
			       "Consider raising MAX_NR_CPUS\n", cpu);
			return -1;
		}

		set_bit(cpu, cpu_bitmap);
	}

	return 0;
}
1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667

void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
				bool full)
{
	struct stat st;
	int ret;

	if (session == NULL || fp == NULL)
		return;

	ret = fstat(session->fd, &st);
	if (ret == -1)
		return;

	fprintf(fp, "# ========\n");
	fprintf(fp, "# captured on: %s", ctime(&st.st_ctime));
	perf_header__fprintf_info(session, fp, full);
	fprintf(fp, "# ========\n#\n");
}
1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678


int __perf_session__set_tracepoints_handlers(struct perf_session *session,
					     const struct perf_evsel_str_handler *assocs,
					     size_t nr_assocs)
{
	struct perf_evsel *evsel;
	size_t i;
	int err;

	for (i = 0; i < nr_assocs; i++) {
1679 1680 1681 1682 1683
		/*
		 * Adding a handler for an event not in the session,
		 * just ignore it.
		 */
		evsel = perf_evlist__find_tracepoint_by_name(session->evlist, assocs[i].name);
1684
		if (evsel == NULL)
1685
			continue;
1686 1687 1688

		err = -EEXIST;
		if (evsel->handler.func != NULL)
1689
			goto out;
1690 1691 1692 1693 1694 1695 1696
		evsel->handler.func = assocs[i].handler;
	}

	err = 0;
out:
	return err;
}