session.c 21.6 KB
Newer Older
1 2
#define _FILE_OFFSET_BITS 64

3 4
#include <linux/kernel.h>

5
#include <byteswap.h>
6 7
#include <unistd.h>
#include <sys/types.h>
8
#include <sys/mman.h>
9 10

#include "session.h"
11
#include "sort.h"
12 13 14 15 16 17
#include "util.h"

static int perf_session__open(struct perf_session *self, bool force)
{
	struct stat input_stat;

18 19 20 21 22 23 24 25 26 27
	if (!strcmp(self->filename, "-")) {
		self->fd_pipe = true;
		self->fd = STDIN_FILENO;

		if (perf_header__read(self, self->fd) < 0)
			pr_err("incompatible file format");

		return 0;
	}

28
	self->fd = open(self->filename, O_RDONLY);
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51
	if (self->fd < 0) {
		pr_err("failed to open file: %s", self->filename);
		if (!strcmp(self->filename, "perf.data"))
			pr_err("  (try 'perf record' first)");
		pr_err("\n");
		return -errno;
	}

	if (fstat(self->fd, &input_stat) < 0)
		goto out_close;

	if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
		pr_err("file %s not owned by current user or root\n",
		       self->filename);
		goto out_close;
	}

	if (!input_stat.st_size) {
		pr_info("zero-sized file (%s), nothing to do!\n",
			self->filename);
		goto out_close;
	}

52
	if (perf_header__read(self, self->fd) < 0) {
53 54 55 56 57 58 59 60 61 62 63 64 65
		pr_err("incompatible file format");
		goto out_close;
	}

	self->size = input_stat.st_size;
	return 0;

out_close:
	close(self->fd);
	self->fd = -1;
	return -1;
}

66 67 68 69 70
void perf_session__update_sample_type(struct perf_session *self)
{
	self->sample_type = perf_header__sample_type(&self->header);
}

71 72
int perf_session__create_kernel_maps(struct perf_session *self)
{
73
	int ret = machine__create_kernel_maps(&self->host_machine);
74 75

	if (ret >= 0)
76
		ret = machines__create_guest_kernel_maps(&self->machines);
77 78 79
	return ret;
}

T
Tom Zanussi 已提交
80
struct perf_session *perf_session__new(const char *filename, int mode, bool force, bool repipe)
81
{
82
	size_t len = filename ? strlen(filename) + 1 : 0;
83 84 85 86 87 88
	struct perf_session *self = zalloc(sizeof(*self) + len);

	if (self == NULL)
		goto out;

	if (perf_header__init(&self->header) < 0)
89
		goto out_free;
90 91

	memcpy(self->filename, filename, len);
92
	self->threads = RB_ROOT;
93
	self->hists_tree = RB_ROOT;
94
	self->last_match = NULL;
95 96 97
	self->mmap_window = 32;
	self->cwd = NULL;
	self->cwdlen = 0;
98
	self->machines = RB_ROOT;
T
Tom Zanussi 已提交
99
	self->repipe = repipe;
100
	INIT_LIST_HEAD(&self->ordered_samples.samples_head);
101
	machine__init(&self->host_machine, "", HOST_KERNEL_ID);
102

103 104 105 106 107 108 109 110 111 112 113
	if (mode == O_RDONLY) {
		if (perf_session__open(self, force) < 0)
			goto out_delete;
	} else if (mode == O_WRONLY) {
		/*
		 * In O_RDONLY mode this will be performed when reading the
		 * kernel MMAP event, in event__process_mmap().
		 */
		if (perf_session__create_kernel_maps(self) < 0)
			goto out_delete;
	}
114

115
	perf_session__update_sample_type(self);
116 117
out:
	return self;
118
out_free:
119 120
	free(self);
	return NULL;
121 122 123
out_delete:
	perf_session__delete(self);
	return NULL;
124 125 126 127 128 129
}

void perf_session__delete(struct perf_session *self)
{
	perf_header__exit(&self->header);
	close(self->fd);
130
	free(self->cwd);
131 132
	free(self);
}
133 134 135 136 137 138 139 140 141

static bool symbol__match_parent_regex(struct symbol *sym)
{
	if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
		return 1;

	return 0;
}

142 143 144 145
struct map_symbol *perf_session__resolve_callchain(struct perf_session *self,
						   struct thread *thread,
						   struct ip_callchain *chain,
						   struct symbol **parent)
146 147 148
{
	u8 cpumode = PERF_RECORD_MISC_USER;
	unsigned int i;
149
	struct map_symbol *syms = calloc(chain->nr, sizeof(*syms));
150

151 152
	if (!syms)
		return NULL;
153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171

	for (i = 0; i < chain->nr; i++) {
		u64 ip = chain->ips[i];
		struct addr_location al;

		if (ip >= PERF_CONTEXT_MAX) {
			switch (ip) {
			case PERF_CONTEXT_HV:
				cpumode = PERF_RECORD_MISC_HYPERVISOR;	break;
			case PERF_CONTEXT_KERNEL:
				cpumode = PERF_RECORD_MISC_KERNEL;	break;
			case PERF_CONTEXT_USER:
				cpumode = PERF_RECORD_MISC_USER;	break;
			default:
				break;
			}
			continue;
		}

172
		al.filtered = false;
173
		thread__find_addr_location(thread, self, cpumode,
174
				MAP__FUNCTION, thread->pid, ip, &al, NULL);
175 176 177 178
		if (al.sym != NULL) {
			if (sort__has_parent && !*parent &&
			    symbol__match_parent_regex(al.sym))
				*parent = al.sym;
179
			if (!symbol_conf.use_callchain)
180
				break;
181 182
			syms[i].map = al.map;
			syms[i].sym = al.sym;
183 184 185 186 187
		}
	}

	return syms;
}
188 189 190 191 192 193 194 195

static int process_event_stub(event_t *event __used,
			      struct perf_session *session __used)
{
	dump_printf(": unhandled!\n");
	return 0;
}

196 197 198 199 200 201 202 203 204 205 206 207
static int process_finished_round_stub(event_t *event __used,
				       struct perf_session *session __used,
				       struct perf_event_ops *ops __used)
{
	dump_printf(": unhandled!\n");
	return 0;
}

static int process_finished_round(event_t *event,
				  struct perf_session *session,
				  struct perf_event_ops *ops);

208 209
static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
{
210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227
	if (handler->sample == NULL)
		handler->sample = process_event_stub;
	if (handler->mmap == NULL)
		handler->mmap = process_event_stub;
	if (handler->comm == NULL)
		handler->comm = process_event_stub;
	if (handler->fork == NULL)
		handler->fork = process_event_stub;
	if (handler->exit == NULL)
		handler->exit = process_event_stub;
	if (handler->lost == NULL)
		handler->lost = process_event_stub;
	if (handler->read == NULL)
		handler->read = process_event_stub;
	if (handler->throttle == NULL)
		handler->throttle = process_event_stub;
	if (handler->unthrottle == NULL)
		handler->unthrottle = process_event_stub;
228 229
	if (handler->attr == NULL)
		handler->attr = process_event_stub;
230 231
	if (handler->event_type == NULL)
		handler->event_type = process_event_stub;
232 233
	if (handler->tracing_data == NULL)
		handler->tracing_data = process_event_stub;
234 235
	if (handler->build_id == NULL)
		handler->build_id = process_event_stub;
236 237 238 239 240 241
	if (handler->finished_round == NULL) {
		if (handler->ordered_samples)
			handler->finished_round = process_finished_round;
		else
			handler->finished_round = process_finished_round_stub;
	}
242 243
}

244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294
void mem_bswap_64(void *src, int byte_size)
{
	u64 *m = src;

	while (byte_size > 0) {
		*m = bswap_64(*m);
		byte_size -= sizeof(u64);
		++m;
	}
}

static void event__all64_swap(event_t *self)
{
	struct perf_event_header *hdr = &self->header;
	mem_bswap_64(hdr + 1, self->header.size - sizeof(*hdr));
}

static void event__comm_swap(event_t *self)
{
	self->comm.pid = bswap_32(self->comm.pid);
	self->comm.tid = bswap_32(self->comm.tid);
}

static void event__mmap_swap(event_t *self)
{
	self->mmap.pid	 = bswap_32(self->mmap.pid);
	self->mmap.tid	 = bswap_32(self->mmap.tid);
	self->mmap.start = bswap_64(self->mmap.start);
	self->mmap.len	 = bswap_64(self->mmap.len);
	self->mmap.pgoff = bswap_64(self->mmap.pgoff);
}

static void event__task_swap(event_t *self)
{
	self->fork.pid	= bswap_32(self->fork.pid);
	self->fork.tid	= bswap_32(self->fork.tid);
	self->fork.ppid	= bswap_32(self->fork.ppid);
	self->fork.ptid	= bswap_32(self->fork.ptid);
	self->fork.time	= bswap_64(self->fork.time);
}

static void event__read_swap(event_t *self)
{
	self->read.pid		= bswap_32(self->read.pid);
	self->read.tid		= bswap_32(self->read.tid);
	self->read.value	= bswap_64(self->read.value);
	self->read.time_enabled	= bswap_64(self->read.time_enabled);
	self->read.time_running	= bswap_64(self->read.time_running);
	self->read.id		= bswap_64(self->read.id);
}

295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314
static void event__attr_swap(event_t *self)
{
	size_t size;

	self->attr.attr.type		= bswap_32(self->attr.attr.type);
	self->attr.attr.size		= bswap_32(self->attr.attr.size);
	self->attr.attr.config		= bswap_64(self->attr.attr.config);
	self->attr.attr.sample_period	= bswap_64(self->attr.attr.sample_period);
	self->attr.attr.sample_type	= bswap_64(self->attr.attr.sample_type);
	self->attr.attr.read_format	= bswap_64(self->attr.attr.read_format);
	self->attr.attr.wakeup_events	= bswap_32(self->attr.attr.wakeup_events);
	self->attr.attr.bp_type		= bswap_32(self->attr.attr.bp_type);
	self->attr.attr.bp_addr		= bswap_64(self->attr.attr.bp_addr);
	self->attr.attr.bp_len		= bswap_64(self->attr.attr.bp_len);

	size = self->header.size;
	size -= (void *)&self->attr.id - (void *)self;
	mem_bswap_64(self->attr.id, size);
}

315 316 317 318 319 320
static void event__event_type_swap(event_t *self)
{
	self->event_type.event_type.event_id =
		bswap_64(self->event_type.event_type.event_id);
}

321 322 323 324 325
static void event__tracing_data_swap(event_t *self)
{
	self->tracing_data.size = bswap_32(self->tracing_data.size);
}

326 327 328 329 330 331 332 333 334 335
typedef void (*event__swap_op)(event_t *self);

static event__swap_op event__swap_ops[] = {
	[PERF_RECORD_MMAP]   = event__mmap_swap,
	[PERF_RECORD_COMM]   = event__comm_swap,
	[PERF_RECORD_FORK]   = event__task_swap,
	[PERF_RECORD_EXIT]   = event__task_swap,
	[PERF_RECORD_LOST]   = event__all64_swap,
	[PERF_RECORD_READ]   = event__read_swap,
	[PERF_RECORD_SAMPLE] = event__all64_swap,
336
	[PERF_RECORD_HEADER_ATTR]   = event__attr_swap,
337
	[PERF_RECORD_HEADER_EVENT_TYPE]   = event__event_type_swap,
338
	[PERF_RECORD_HEADER_TRACING_DATA]   = event__tracing_data_swap,
339
	[PERF_RECORD_HEADER_BUILD_ID]   = NULL,
340
	[PERF_RECORD_HEADER_MAX]    = NULL,
341 342
};

343 344 345 346 347 348 349 350 351 352
struct sample_queue {
	u64			timestamp;
	struct sample_event	*event;
	struct list_head	list;
};

static void flush_sample_queue(struct perf_session *s,
			       struct perf_event_ops *ops)
{
	struct list_head *head = &s->ordered_samples.samples_head;
353
	u64 limit = s->ordered_samples.next_flush;
354 355
	struct sample_queue *tmp, *iter;

356
	if (!ops->ordered_samples || !limit)
357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374
		return;

	list_for_each_entry_safe(iter, tmp, head, list) {
		if (iter->timestamp > limit)
			return;

		if (iter == s->ordered_samples.last_inserted)
			s->ordered_samples.last_inserted = NULL;

		ops->sample((event_t *)iter->event, s);

		s->ordered_samples.last_flush = iter->timestamp;
		list_del(&iter->list);
		free(iter->event);
		free(iter);
	}
}

375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423
/*
 * When perf record finishes a pass on every buffers, it records this pseudo
 * event.
 * We record the max timestamp t found in the pass n.
 * Assuming these timestamps are monotonic across cpus, we know that if
 * a buffer still has events with timestamps below t, they will be all
 * available and then read in the pass n + 1.
 * Hence when we start to read the pass n + 2, we can safely flush every
 * events with timestamps below t.
 *
 *    ============ PASS n =================
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          1          |         2
 *          2          |         3
 *          -          |         4  <--- max recorded
 *
 *    ============ PASS n + 1 ==============
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          3          |         5
 *          4          |         6
 *          5          |         7 <---- max recorded
 *
 *      Flush every events below timestamp 4
 *
 *    ============ PASS n + 2 ==============
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          6          |         8
 *          7          |         9
 *          -          |         10
 *
 *      Flush every events below timestamp 7
 *      etc...
 */
static int process_finished_round(event_t *event __used,
				  struct perf_session *session,
				  struct perf_event_ops *ops)
{
	flush_sample_queue(session, ops);
	session->ordered_samples.next_flush = session->ordered_samples.max_timestamp;

	return 0;
}

424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491
static void __queue_sample_end(struct sample_queue *new, struct list_head *head)
{
	struct sample_queue *iter;

	list_for_each_entry_reverse(iter, head, list) {
		if (iter->timestamp < new->timestamp) {
			list_add(&new->list, &iter->list);
			return;
		}
	}

	list_add(&new->list, head);
}

static void __queue_sample_before(struct sample_queue *new,
				  struct sample_queue *iter,
				  struct list_head *head)
{
	list_for_each_entry_continue_reverse(iter, head, list) {
		if (iter->timestamp < new->timestamp) {
			list_add(&new->list, &iter->list);
			return;
		}
	}

	list_add(&new->list, head);
}

static void __queue_sample_after(struct sample_queue *new,
				 struct sample_queue *iter,
				 struct list_head *head)
{
	list_for_each_entry_continue(iter, head, list) {
		if (iter->timestamp > new->timestamp) {
			list_add_tail(&new->list, &iter->list);
			return;
		}
	}
	list_add_tail(&new->list, head);
}

/* The queue is ordered by time */
static void __queue_sample_event(struct sample_queue *new,
				 struct perf_session *s)
{
	struct sample_queue *last_inserted = s->ordered_samples.last_inserted;
	struct list_head *head = &s->ordered_samples.samples_head;


	if (!last_inserted) {
		__queue_sample_end(new, head);
		return;
	}

	/*
	 * Most of the time the current event has a timestamp
	 * very close to the last event inserted, unless we just switched
	 * to another event buffer. Having a sorting based on a list and
	 * on the last inserted event that is close to the current one is
	 * probably more efficient than an rbtree based sorting.
	 */
	if (last_inserted->timestamp >= new->timestamp)
		__queue_sample_before(new, last_inserted, head);
	else
		__queue_sample_after(new, last_inserted, head);
}

static int queue_sample_event(event_t *event, struct sample_data *data,
492
			      struct perf_session *s)
493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519
{
	u64 timestamp = data->time;
	struct sample_queue *new;


	if (timestamp < s->ordered_samples.last_flush) {
		printf("Warning: Timestamp below last timeslice flush\n");
		return -EINVAL;
	}

	new = malloc(sizeof(*new));
	if (!new)
		return -ENOMEM;

	new->timestamp = timestamp;

	new->event = malloc(event->header.size);
	if (!new->event) {
		free(new);
		return -ENOMEM;
	}

	memcpy(new->event, event, event->header.size);

	__queue_sample_event(new, s);
	s->ordered_samples.last_inserted = new;

520 521
	if (new->timestamp > s->ordered_samples.max_timestamp)
		s->ordered_samples.max_timestamp = new->timestamp;
522 523 524 525 526 527 528 529 530 531 532 533 534 535 536

	return 0;
}

static int perf_session__process_sample(event_t *event, struct perf_session *s,
					struct perf_event_ops *ops)
{
	struct sample_data data;

	if (!ops->ordered_samples)
		return ops->sample(event, s);

	bzero(&data, sizeof(struct sample_data));
	event__parse_sample(event, s->sample_type, &data);

537
	queue_sample_event(event, &data, s);
538 539 540 541

	return 0;
}

542 543 544
static int perf_session__process_event(struct perf_session *self,
				       event_t *event,
				       struct perf_event_ops *ops,
545
				       u64 offset, u64 head)
546 547 548
{
	trace_event(event);

549
	if (event->header.type < PERF_RECORD_HEADER_MAX) {
550
		dump_printf("%#Lx [%#x]: PERF_RECORD_%s",
551
			    offset + head, event->header.size,
552
			    event__name[event->header.type]);
553
		hists__inc_nr_events(&self->hists, event->header.type);
554 555
	}

556 557 558
	if (self->header.needs_swap && event__swap_ops[event->header.type])
		event__swap_ops[event->header.type](event);

559 560
	switch (event->header.type) {
	case PERF_RECORD_SAMPLE:
561
		return perf_session__process_sample(event, self, ops);
562
	case PERF_RECORD_MMAP:
563
		return ops->mmap(event, self);
564
	case PERF_RECORD_COMM:
565
		return ops->comm(event, self);
566
	case PERF_RECORD_FORK:
567
		return ops->fork(event, self);
568
	case PERF_RECORD_EXIT:
569
		return ops->exit(event, self);
570
	case PERF_RECORD_LOST:
571
		return ops->lost(event, self);
572
	case PERF_RECORD_READ:
573
		return ops->read(event, self);
574
	case PERF_RECORD_THROTTLE:
575
		return ops->throttle(event, self);
576
	case PERF_RECORD_UNTHROTTLE:
577
		return ops->unthrottle(event, self);
578 579
	case PERF_RECORD_HEADER_ATTR:
		return ops->attr(event, self);
580 581
	case PERF_RECORD_HEADER_EVENT_TYPE:
		return ops->event_type(event, self);
582 583 584 585
	case PERF_RECORD_HEADER_TRACING_DATA:
		/* setup for reading amidst mmap */
		lseek(self->fd, offset + head, SEEK_SET);
		return ops->tracing_data(event, self);
586 587
	case PERF_RECORD_HEADER_BUILD_ID:
		return ops->build_id(event, self);
588 589
	case PERF_RECORD_FINISHED_ROUND:
		return ops->finished_round(event, self, ops);
590
	default:
591
		++self->hists.stats.nr_unknown_events;
592 593 594 595
		return -1;
	}
}

596 597 598 599 600 601 602
void perf_event_header__bswap(struct perf_event_header *self)
{
	self->type = bswap_32(self->type);
	self->misc = bswap_16(self->misc);
	self->size = bswap_16(self->size);
}

603 604 605 606 607 608 609 610 611 612 613 614
static struct thread *perf_session__register_idle_thread(struct perf_session *self)
{
	struct thread *thread = perf_session__findnew(self, 0);

	if (thread == NULL || thread__set_comm(thread, "swapper")) {
		pr_err("problem inserting idle task.\n");
		thread = NULL;
	}

	return thread;
}

615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667
int do_read(int fd, void *buf, size_t size)
{
	void *buf_start = buf;

	while (size) {
		int ret = read(fd, buf, size);

		if (ret <= 0)
			return ret;

		size -= ret;
		buf += ret;
	}

	return buf - buf_start;
}

#define session_done()	(*(volatile int *)(&session_done))
volatile int session_done;

static int __perf_session__process_pipe_events(struct perf_session *self,
					       struct perf_event_ops *ops)
{
	event_t event;
	uint32_t size;
	int skip = 0;
	u64 head;
	int err;
	void *p;

	perf_event_ops__fill_defaults(ops);

	head = 0;
more:
	err = do_read(self->fd, &event, sizeof(struct perf_event_header));
	if (err <= 0) {
		if (err == 0)
			goto done;

		pr_err("failed to read event header\n");
		goto out_err;
	}

	if (self->header.needs_swap)
		perf_event_header__bswap(&event.header);

	size = event.header.size;
	if (size == 0)
		size = 8;

	p = &event;
	p += sizeof(struct perf_event_header);

668 669 670 671 672 673 674 675
	if (size - sizeof(struct perf_event_header)) {
		err = do_read(self->fd, p,
			      size - sizeof(struct perf_event_header));
		if (err <= 0) {
			if (err == 0) {
				pr_err("unexpected end of event stream\n");
				goto done;
			}
676

677 678 679
			pr_err("failed to read event data\n");
			goto out_err;
		}
680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712
	}

	if (size == 0 ||
	    (skip = perf_session__process_event(self, &event, ops,
						0, head)) < 0) {
		dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
			    head, event.header.size, event.header.type);
		/*
		 * assume we lost track of the stream, check alignment, and
		 * increment a single u64 in the hope to catch on again 'soon'.
		 */
		if (unlikely(head & 7))
			head &= ~7ULL;

		size = 8;
	}

	head += size;

	dump_printf("\n%#Lx [%#x]: event: %d\n",
		    head, event.header.size, event.header.type);

	if (skip > 0)
		head += skip;

	if (!session_done())
		goto more;
done:
	err = 0;
out_err:
	return err;
}

713 714 715
int __perf_session__process_events(struct perf_session *self,
				   u64 data_offset, u64 data_size,
				   u64 file_size, struct perf_event_ops *ops)
716
{
717 718 719
	int err, mmap_prot, mmap_flags;
	u64 head, shift;
	u64 offset = 0;
720 721 722 723
	size_t	page_size;
	event_t *event;
	uint32_t size;
	char *buf;
724 725 726 727
	struct ui_progress *progress = ui_progress__new("Processing events...",
							self->size);
	if (progress == NULL)
		return -1;
728 729 730

	perf_event_ops__fill_defaults(ops);

731
	page_size = sysconf(_SC_PAGESIZE);
732

733
	head = data_offset;
734 735 736 737
	shift = page_size * (head / page_size);
	offset += shift;
	head -= shift;

738 739 740 741 742 743 744
	mmap_prot  = PROT_READ;
	mmap_flags = MAP_SHARED;

	if (self->header.needs_swap) {
		mmap_prot  |= PROT_WRITE;
		mmap_flags = MAP_PRIVATE;
	}
745
remap:
746 747
	buf = mmap(NULL, page_size * self->mmap_window, mmap_prot,
		   mmap_flags, self->fd, offset);
748 749 750 751 752 753 754 755
	if (buf == MAP_FAILED) {
		pr_err("failed to mmap file\n");
		err = -errno;
		goto out_err;
	}

more:
	event = (event_t *)(buf + head);
756
	ui_progress__update(progress, offset);
757

758 759
	if (self->header.needs_swap)
		perf_event_header__bswap(&event->header);
760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778
	size = event->header.size;
	if (size == 0)
		size = 8;

	if (head + event->header.size >= page_size * self->mmap_window) {
		int munmap_ret;

		shift = page_size * (head / page_size);

		munmap_ret = munmap(buf, page_size * self->mmap_window);
		assert(munmap_ret == 0);

		offset += shift;
		head -= shift;
		goto remap;
	}

	size = event->header.size;

779
	dump_printf("\n%#Lx [%#x]: event: %d\n",
780
		    offset + head, event->header.size, event->header.type);
781 782 783

	if (size == 0 ||
	    perf_session__process_event(self, event, ops, offset, head) < 0) {
784
		dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
785
			    offset + head, event->header.size,
786 787 788 789 790 791 792 793 794 795 796 797 798
			    event->header.type);
		/*
		 * assume we lost track of the stream, check alignment, and
		 * increment a single u64 in the hope to catch on again 'soon'.
		 */
		if (unlikely(head & 7))
			head &= ~7ULL;

		size = 8;
	}

	head += size;

799
	if (offset + head >= data_offset + data_size)
800 801
		goto done;

802
	if (offset + head < file_size)
803 804 805
		goto more;
done:
	err = 0;
806
	/* do the final flush for ordered samples */
807
	self->ordered_samples.next_flush = ULLONG_MAX;
808
	flush_sample_queue(self, ops);
809
out_err:
810
	ui_progress__delete(progress);
811 812
	return err;
}
813

814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838
int perf_session__process_events(struct perf_session *self,
				 struct perf_event_ops *ops)
{
	int err;

	if (perf_session__register_idle_thread(self) == NULL)
		return -ENOMEM;

	if (!symbol_conf.full_paths) {
		char bf[PATH_MAX];

		if (getcwd(bf, sizeof(bf)) == NULL) {
			err = -errno;
out_getcwd_err:
			pr_err("failed to get the current directory\n");
			goto out_err;
		}
		self->cwd = strdup(bf);
		if (self->cwd == NULL) {
			err = -ENOMEM;
			goto out_getcwd_err;
		}
		self->cwdlen = strlen(self->cwd);
	}

839 840 841 842 843 844 845
	if (!self->fd_pipe)
		err = __perf_session__process_events(self,
						     self->header.data_offset,
						     self->header.data_size,
						     self->size, ops);
	else
		err = __perf_session__process_pipe_events(self, ops);
846 847 848 849
out_err:
	return err;
}

850
bool perf_session__has_traces(struct perf_session *self, const char *msg)
851 852
{
	if (!(self->sample_type & PERF_SAMPLE_RAW)) {
853 854
		pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
		return false;
855 856
	}

857
	return true;
858
}
859

860
int perf_session__set_kallsyms_ref_reloc_sym(struct map **maps,
861 862 863 864
					     const char *symbol_name,
					     u64 addr)
{
	char *bracket;
865
	enum map_type i;
866 867 868 869 870
	struct ref_reloc_sym *ref;

	ref = zalloc(sizeof(struct ref_reloc_sym));
	if (ref == NULL)
		return -ENOMEM;
871

872 873 874
	ref->name = strdup(symbol_name);
	if (ref->name == NULL) {
		free(ref);
875
		return -ENOMEM;
876
	}
877

878
	bracket = strchr(ref->name, ']');
879 880 881
	if (bracket)
		*bracket = '\0';

882
	ref->addr = addr;
883 884

	for (i = 0; i < MAP__NR_TYPES; ++i) {
885 886
		struct kmap *kmap = map__kmap(maps[i]);
		kmap->ref_reloc_sym = ref;
887 888
	}

889 890
	return 0;
}
891 892 893 894 895 896 897

size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp)
{
	return __dsos__fprintf(&self->host_machine.kernel_dsos, fp) +
	       __dsos__fprintf(&self->host_machine.user_dsos, fp) +
	       machines__fprintf_dsos(&self->machines, fp);
}