session.c 21.9 KB
Newer Older
1 2
#define _FILE_OFFSET_BITS 64

3 4
#include <linux/kernel.h>

5
#include <byteswap.h>
6 7
#include <unistd.h>
#include <sys/types.h>
8
#include <sys/mman.h>
9 10

#include "session.h"
11
#include "sort.h"
12 13 14 15 16 17
#include "util.h"

static int perf_session__open(struct perf_session *self, bool force)
{
	struct stat input_stat;

18 19 20 21 22 23 24 25 26 27
	if (!strcmp(self->filename, "-")) {
		self->fd_pipe = true;
		self->fd = STDIN_FILENO;

		if (perf_header__read(self, self->fd) < 0)
			pr_err("incompatible file format");

		return 0;
	}

28
	self->fd = open(self->filename, O_RDONLY);
29
	if (self->fd < 0) {
30 31 32 33
		int err = errno;

		pr_err("failed to open %s: %s", self->filename, strerror(err));
		if (err == ENOENT && !strcmp(self->filename, "perf.data"))
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53
			pr_err("  (try 'perf record' first)");
		pr_err("\n");
		return -errno;
	}

	if (fstat(self->fd, &input_stat) < 0)
		goto out_close;

	if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
		pr_err("file %s not owned by current user or root\n",
		       self->filename);
		goto out_close;
	}

	if (!input_stat.st_size) {
		pr_info("zero-sized file (%s), nothing to do!\n",
			self->filename);
		goto out_close;
	}

54
	if (perf_header__read(self, self->fd) < 0) {
55 56 57 58 59 60 61 62 63 64 65 66 67
		pr_err("incompatible file format");
		goto out_close;
	}

	self->size = input_stat.st_size;
	return 0;

out_close:
	close(self->fd);
	self->fd = -1;
	return -1;
}

68 69 70 71 72
void perf_session__update_sample_type(struct perf_session *self)
{
	self->sample_type = perf_header__sample_type(&self->header);
}

73 74
int perf_session__create_kernel_maps(struct perf_session *self)
{
75
	int ret = machine__create_kernel_maps(&self->host_machine);
76 77

	if (ret >= 0)
78
		ret = machines__create_guest_kernel_maps(&self->machines);
79 80 81
	return ret;
}

T
Tom Zanussi 已提交
82
struct perf_session *perf_session__new(const char *filename, int mode, bool force, bool repipe)
83
{
84
	size_t len = filename ? strlen(filename) + 1 : 0;
85 86 87 88 89 90
	struct perf_session *self = zalloc(sizeof(*self) + len);

	if (self == NULL)
		goto out;

	if (perf_header__init(&self->header) < 0)
91
		goto out_free;
92 93

	memcpy(self->filename, filename, len);
94
	self->threads = RB_ROOT;
95
	INIT_LIST_HEAD(&self->dead_threads);
96
	self->hists_tree = RB_ROOT;
97
	self->last_match = NULL;
98
	self->mmap_window = 32;
99
	self->machines = RB_ROOT;
T
Tom Zanussi 已提交
100
	self->repipe = repipe;
101
	INIT_LIST_HEAD(&self->ordered_samples.samples_head);
102
	machine__init(&self->host_machine, "", HOST_KERNEL_ID);
103

104 105 106 107 108 109 110 111 112 113 114
	if (mode == O_RDONLY) {
		if (perf_session__open(self, force) < 0)
			goto out_delete;
	} else if (mode == O_WRONLY) {
		/*
		 * In O_RDONLY mode this will be performed when reading the
		 * kernel MMAP event, in event__process_mmap().
		 */
		if (perf_session__create_kernel_maps(self) < 0)
			goto out_delete;
	}
115

116
	perf_session__update_sample_type(self);
117 118
out:
	return self;
119
out_free:
120 121
	free(self);
	return NULL;
122 123 124
out_delete:
	perf_session__delete(self);
	return NULL;
125 126 127 128 129 130 131 132
}

void perf_session__delete(struct perf_session *self)
{
	perf_header__exit(&self->header);
	close(self->fd);
	free(self);
}
133

134 135 136 137 138 139 140 141 142 143
void perf_session__remove_thread(struct perf_session *self, struct thread *th)
{
	rb_erase(&th->rb_node, &self->threads);
	/*
	 * We may have references to this thread, for instance in some hist_entry
	 * instances, so just move them to a separate list.
	 */
	list_add_tail(&th->node, &self->dead_threads);
}

144 145 146 147 148 149 150 151
static bool symbol__match_parent_regex(struct symbol *sym)
{
	if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
		return 1;

	return 0;
}

152 153 154 155
struct map_symbol *perf_session__resolve_callchain(struct perf_session *self,
						   struct thread *thread,
						   struct ip_callchain *chain,
						   struct symbol **parent)
156 157 158
{
	u8 cpumode = PERF_RECORD_MISC_USER;
	unsigned int i;
159
	struct map_symbol *syms = calloc(chain->nr, sizeof(*syms));
160

161 162
	if (!syms)
		return NULL;
163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181

	for (i = 0; i < chain->nr; i++) {
		u64 ip = chain->ips[i];
		struct addr_location al;

		if (ip >= PERF_CONTEXT_MAX) {
			switch (ip) {
			case PERF_CONTEXT_HV:
				cpumode = PERF_RECORD_MISC_HYPERVISOR;	break;
			case PERF_CONTEXT_KERNEL:
				cpumode = PERF_RECORD_MISC_KERNEL;	break;
			case PERF_CONTEXT_USER:
				cpumode = PERF_RECORD_MISC_USER;	break;
			default:
				break;
			}
			continue;
		}

182
		al.filtered = false;
183
		thread__find_addr_location(thread, self, cpumode,
184
				MAP__FUNCTION, thread->pid, ip, &al, NULL);
185 186 187 188
		if (al.sym != NULL) {
			if (sort__has_parent && !*parent &&
			    symbol__match_parent_regex(al.sym))
				*parent = al.sym;
189
			if (!symbol_conf.use_callchain)
190
				break;
191 192
			syms[i].map = al.map;
			syms[i].sym = al.sym;
193 194 195 196 197
		}
	}

	return syms;
}
198 199 200 201 202 203 204 205

static int process_event_stub(event_t *event __used,
			      struct perf_session *session __used)
{
	dump_printf(": unhandled!\n");
	return 0;
}

206 207 208 209 210 211 212 213 214 215 216 217
static int process_finished_round_stub(event_t *event __used,
				       struct perf_session *session __used,
				       struct perf_event_ops *ops __used)
{
	dump_printf(": unhandled!\n");
	return 0;
}

static int process_finished_round(event_t *event,
				  struct perf_session *session,
				  struct perf_event_ops *ops);

218 219
static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
{
220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237
	if (handler->sample == NULL)
		handler->sample = process_event_stub;
	if (handler->mmap == NULL)
		handler->mmap = process_event_stub;
	if (handler->comm == NULL)
		handler->comm = process_event_stub;
	if (handler->fork == NULL)
		handler->fork = process_event_stub;
	if (handler->exit == NULL)
		handler->exit = process_event_stub;
	if (handler->lost == NULL)
		handler->lost = process_event_stub;
	if (handler->read == NULL)
		handler->read = process_event_stub;
	if (handler->throttle == NULL)
		handler->throttle = process_event_stub;
	if (handler->unthrottle == NULL)
		handler->unthrottle = process_event_stub;
238 239
	if (handler->attr == NULL)
		handler->attr = process_event_stub;
240 241
	if (handler->event_type == NULL)
		handler->event_type = process_event_stub;
242 243
	if (handler->tracing_data == NULL)
		handler->tracing_data = process_event_stub;
244 245
	if (handler->build_id == NULL)
		handler->build_id = process_event_stub;
246 247 248 249 250 251
	if (handler->finished_round == NULL) {
		if (handler->ordered_samples)
			handler->finished_round = process_finished_round;
		else
			handler->finished_round = process_finished_round_stub;
	}
252 253
}

254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304
void mem_bswap_64(void *src, int byte_size)
{
	u64 *m = src;

	while (byte_size > 0) {
		*m = bswap_64(*m);
		byte_size -= sizeof(u64);
		++m;
	}
}

static void event__all64_swap(event_t *self)
{
	struct perf_event_header *hdr = &self->header;
	mem_bswap_64(hdr + 1, self->header.size - sizeof(*hdr));
}

static void event__comm_swap(event_t *self)
{
	self->comm.pid = bswap_32(self->comm.pid);
	self->comm.tid = bswap_32(self->comm.tid);
}

static void event__mmap_swap(event_t *self)
{
	self->mmap.pid	 = bswap_32(self->mmap.pid);
	self->mmap.tid	 = bswap_32(self->mmap.tid);
	self->mmap.start = bswap_64(self->mmap.start);
	self->mmap.len	 = bswap_64(self->mmap.len);
	self->mmap.pgoff = bswap_64(self->mmap.pgoff);
}

static void event__task_swap(event_t *self)
{
	self->fork.pid	= bswap_32(self->fork.pid);
	self->fork.tid	= bswap_32(self->fork.tid);
	self->fork.ppid	= bswap_32(self->fork.ppid);
	self->fork.ptid	= bswap_32(self->fork.ptid);
	self->fork.time	= bswap_64(self->fork.time);
}

static void event__read_swap(event_t *self)
{
	self->read.pid		= bswap_32(self->read.pid);
	self->read.tid		= bswap_32(self->read.tid);
	self->read.value	= bswap_64(self->read.value);
	self->read.time_enabled	= bswap_64(self->read.time_enabled);
	self->read.time_running	= bswap_64(self->read.time_running);
	self->read.id		= bswap_64(self->read.id);
}

305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324
static void event__attr_swap(event_t *self)
{
	size_t size;

	self->attr.attr.type		= bswap_32(self->attr.attr.type);
	self->attr.attr.size		= bswap_32(self->attr.attr.size);
	self->attr.attr.config		= bswap_64(self->attr.attr.config);
	self->attr.attr.sample_period	= bswap_64(self->attr.attr.sample_period);
	self->attr.attr.sample_type	= bswap_64(self->attr.attr.sample_type);
	self->attr.attr.read_format	= bswap_64(self->attr.attr.read_format);
	self->attr.attr.wakeup_events	= bswap_32(self->attr.attr.wakeup_events);
	self->attr.attr.bp_type		= bswap_32(self->attr.attr.bp_type);
	self->attr.attr.bp_addr		= bswap_64(self->attr.attr.bp_addr);
	self->attr.attr.bp_len		= bswap_64(self->attr.attr.bp_len);

	size = self->header.size;
	size -= (void *)&self->attr.id - (void *)self;
	mem_bswap_64(self->attr.id, size);
}

325 326 327 328 329 330
static void event__event_type_swap(event_t *self)
{
	self->event_type.event_type.event_id =
		bswap_64(self->event_type.event_type.event_id);
}

331 332 333 334 335
static void event__tracing_data_swap(event_t *self)
{
	self->tracing_data.size = bswap_32(self->tracing_data.size);
}

336 337 338 339 340 341 342 343 344 345
typedef void (*event__swap_op)(event_t *self);

static event__swap_op event__swap_ops[] = {
	[PERF_RECORD_MMAP]   = event__mmap_swap,
	[PERF_RECORD_COMM]   = event__comm_swap,
	[PERF_RECORD_FORK]   = event__task_swap,
	[PERF_RECORD_EXIT]   = event__task_swap,
	[PERF_RECORD_LOST]   = event__all64_swap,
	[PERF_RECORD_READ]   = event__read_swap,
	[PERF_RECORD_SAMPLE] = event__all64_swap,
346
	[PERF_RECORD_HEADER_ATTR]   = event__attr_swap,
347
	[PERF_RECORD_HEADER_EVENT_TYPE]   = event__event_type_swap,
348
	[PERF_RECORD_HEADER_TRACING_DATA]   = event__tracing_data_swap,
349
	[PERF_RECORD_HEADER_BUILD_ID]   = NULL,
350
	[PERF_RECORD_HEADER_MAX]    = NULL,
351 352
};

353 354 355 356 357 358 359 360 361 362
struct sample_queue {
	u64			timestamp;
	struct sample_event	*event;
	struct list_head	list;
};

static void flush_sample_queue(struct perf_session *s,
			       struct perf_event_ops *ops)
{
	struct list_head *head = &s->ordered_samples.samples_head;
363
	u64 limit = s->ordered_samples.next_flush;
364 365
	struct sample_queue *tmp, *iter;

366
	if (!ops->ordered_samples || !limit)
367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384
		return;

	list_for_each_entry_safe(iter, tmp, head, list) {
		if (iter->timestamp > limit)
			return;

		if (iter == s->ordered_samples.last_inserted)
			s->ordered_samples.last_inserted = NULL;

		ops->sample((event_t *)iter->event, s);

		s->ordered_samples.last_flush = iter->timestamp;
		list_del(&iter->list);
		free(iter->event);
		free(iter);
	}
}

385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433
/*
 * When perf record finishes a pass on every buffers, it records this pseudo
 * event.
 * We record the max timestamp t found in the pass n.
 * Assuming these timestamps are monotonic across cpus, we know that if
 * a buffer still has events with timestamps below t, they will be all
 * available and then read in the pass n + 1.
 * Hence when we start to read the pass n + 2, we can safely flush every
 * events with timestamps below t.
 *
 *    ============ PASS n =================
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          1          |         2
 *          2          |         3
 *          -          |         4  <--- max recorded
 *
 *    ============ PASS n + 1 ==============
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          3          |         5
 *          4          |         6
 *          5          |         7 <---- max recorded
 *
 *      Flush every events below timestamp 4
 *
 *    ============ PASS n + 2 ==============
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          6          |         8
 *          7          |         9
 *          -          |         10
 *
 *      Flush every events below timestamp 7
 *      etc...
 */
static int process_finished_round(event_t *event __used,
				  struct perf_session *session,
				  struct perf_event_ops *ops)
{
	flush_sample_queue(session, ops);
	session->ordered_samples.next_flush = session->ordered_samples.max_timestamp;

	return 0;
}

434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501
static void __queue_sample_end(struct sample_queue *new, struct list_head *head)
{
	struct sample_queue *iter;

	list_for_each_entry_reverse(iter, head, list) {
		if (iter->timestamp < new->timestamp) {
			list_add(&new->list, &iter->list);
			return;
		}
	}

	list_add(&new->list, head);
}

static void __queue_sample_before(struct sample_queue *new,
				  struct sample_queue *iter,
				  struct list_head *head)
{
	list_for_each_entry_continue_reverse(iter, head, list) {
		if (iter->timestamp < new->timestamp) {
			list_add(&new->list, &iter->list);
			return;
		}
	}

	list_add(&new->list, head);
}

static void __queue_sample_after(struct sample_queue *new,
				 struct sample_queue *iter,
				 struct list_head *head)
{
	list_for_each_entry_continue(iter, head, list) {
		if (iter->timestamp > new->timestamp) {
			list_add_tail(&new->list, &iter->list);
			return;
		}
	}
	list_add_tail(&new->list, head);
}

/* The queue is ordered by time */
static void __queue_sample_event(struct sample_queue *new,
				 struct perf_session *s)
{
	struct sample_queue *last_inserted = s->ordered_samples.last_inserted;
	struct list_head *head = &s->ordered_samples.samples_head;


	if (!last_inserted) {
		__queue_sample_end(new, head);
		return;
	}

	/*
	 * Most of the time the current event has a timestamp
	 * very close to the last event inserted, unless we just switched
	 * to another event buffer. Having a sorting based on a list and
	 * on the last inserted event that is close to the current one is
	 * probably more efficient than an rbtree based sorting.
	 */
	if (last_inserted->timestamp >= new->timestamp)
		__queue_sample_before(new, last_inserted, head);
	else
		__queue_sample_after(new, last_inserted, head);
}

static int queue_sample_event(event_t *event, struct sample_data *data,
502
			      struct perf_session *s)
503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529
{
	u64 timestamp = data->time;
	struct sample_queue *new;


	if (timestamp < s->ordered_samples.last_flush) {
		printf("Warning: Timestamp below last timeslice flush\n");
		return -EINVAL;
	}

	new = malloc(sizeof(*new));
	if (!new)
		return -ENOMEM;

	new->timestamp = timestamp;

	new->event = malloc(event->header.size);
	if (!new->event) {
		free(new);
		return -ENOMEM;
	}

	memcpy(new->event, event, event->header.size);

	__queue_sample_event(new, s);
	s->ordered_samples.last_inserted = new;

530 531
	if (new->timestamp > s->ordered_samples.max_timestamp)
		s->ordered_samples.max_timestamp = new->timestamp;
532 533 534 535 536 537 538 539 540 541 542 543 544 545 546

	return 0;
}

static int perf_session__process_sample(event_t *event, struct perf_session *s,
					struct perf_event_ops *ops)
{
	struct sample_data data;

	if (!ops->ordered_samples)
		return ops->sample(event, s);

	bzero(&data, sizeof(struct sample_data));
	event__parse_sample(event, s->sample_type, &data);

547
	queue_sample_event(event, &data, s);
548 549 550 551

	return 0;
}

552 553 554
static int perf_session__process_event(struct perf_session *self,
				       event_t *event,
				       struct perf_event_ops *ops,
555
				       u64 offset, u64 head)
556 557 558
{
	trace_event(event);

559
	if (event->header.type < PERF_RECORD_HEADER_MAX) {
560
		dump_printf("%#Lx [%#x]: PERF_RECORD_%s",
561
			    offset + head, event->header.size,
562
			    event__name[event->header.type]);
563
		hists__inc_nr_events(&self->hists, event->header.type);
564 565
	}

566 567 568
	if (self->header.needs_swap && event__swap_ops[event->header.type])
		event__swap_ops[event->header.type](event);

569 570
	switch (event->header.type) {
	case PERF_RECORD_SAMPLE:
571
		return perf_session__process_sample(event, self, ops);
572
	case PERF_RECORD_MMAP:
573
		return ops->mmap(event, self);
574
	case PERF_RECORD_COMM:
575
		return ops->comm(event, self);
576
	case PERF_RECORD_FORK:
577
		return ops->fork(event, self);
578
	case PERF_RECORD_EXIT:
579
		return ops->exit(event, self);
580
	case PERF_RECORD_LOST:
581
		return ops->lost(event, self);
582
	case PERF_RECORD_READ:
583
		return ops->read(event, self);
584
	case PERF_RECORD_THROTTLE:
585
		return ops->throttle(event, self);
586
	case PERF_RECORD_UNTHROTTLE:
587
		return ops->unthrottle(event, self);
588 589
	case PERF_RECORD_HEADER_ATTR:
		return ops->attr(event, self);
590 591
	case PERF_RECORD_HEADER_EVENT_TYPE:
		return ops->event_type(event, self);
592 593 594 595
	case PERF_RECORD_HEADER_TRACING_DATA:
		/* setup for reading amidst mmap */
		lseek(self->fd, offset + head, SEEK_SET);
		return ops->tracing_data(event, self);
596 597
	case PERF_RECORD_HEADER_BUILD_ID:
		return ops->build_id(event, self);
598 599
	case PERF_RECORD_FINISHED_ROUND:
		return ops->finished_round(event, self, ops);
600
	default:
601
		++self->hists.stats.nr_unknown_events;
602 603 604 605
		return -1;
	}
}

606 607 608 609 610 611 612
void perf_event_header__bswap(struct perf_event_header *self)
{
	self->type = bswap_32(self->type);
	self->misc = bswap_16(self->misc);
	self->size = bswap_16(self->size);
}

613 614 615 616 617 618 619 620 621 622 623 624
static struct thread *perf_session__register_idle_thread(struct perf_session *self)
{
	struct thread *thread = perf_session__findnew(self, 0);

	if (thread == NULL || thread__set_comm(thread, "swapper")) {
		pr_err("problem inserting idle task.\n");
		thread = NULL;
	}

	return thread;
}

625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677
int do_read(int fd, void *buf, size_t size)
{
	void *buf_start = buf;

	while (size) {
		int ret = read(fd, buf, size);

		if (ret <= 0)
			return ret;

		size -= ret;
		buf += ret;
	}

	return buf - buf_start;
}

#define session_done()	(*(volatile int *)(&session_done))
volatile int session_done;

static int __perf_session__process_pipe_events(struct perf_session *self,
					       struct perf_event_ops *ops)
{
	event_t event;
	uint32_t size;
	int skip = 0;
	u64 head;
	int err;
	void *p;

	perf_event_ops__fill_defaults(ops);

	head = 0;
more:
	err = do_read(self->fd, &event, sizeof(struct perf_event_header));
	if (err <= 0) {
		if (err == 0)
			goto done;

		pr_err("failed to read event header\n");
		goto out_err;
	}

	if (self->header.needs_swap)
		perf_event_header__bswap(&event.header);

	size = event.header.size;
	if (size == 0)
		size = 8;

	p = &event;
	p += sizeof(struct perf_event_header);

678 679 680 681 682 683 684 685
	if (size - sizeof(struct perf_event_header)) {
		err = do_read(self->fd, p,
			      size - sizeof(struct perf_event_header));
		if (err <= 0) {
			if (err == 0) {
				pr_err("unexpected end of event stream\n");
				goto done;
			}
686

687 688 689
			pr_err("failed to read event data\n");
			goto out_err;
		}
690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722
	}

	if (size == 0 ||
	    (skip = perf_session__process_event(self, &event, ops,
						0, head)) < 0) {
		dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
			    head, event.header.size, event.header.type);
		/*
		 * assume we lost track of the stream, check alignment, and
		 * increment a single u64 in the hope to catch on again 'soon'.
		 */
		if (unlikely(head & 7))
			head &= ~7ULL;

		size = 8;
	}

	head += size;

	dump_printf("\n%#Lx [%#x]: event: %d\n",
		    head, event.header.size, event.header.type);

	if (skip > 0)
		head += skip;

	if (!session_done())
		goto more;
done:
	err = 0;
out_err:
	return err;
}

723 724 725
int __perf_session__process_events(struct perf_session *self,
				   u64 data_offset, u64 data_size,
				   u64 file_size, struct perf_event_ops *ops)
726
{
727 728 729
	int err, mmap_prot, mmap_flags;
	u64 head, shift;
	u64 offset = 0;
730 731 732 733
	size_t	page_size;
	event_t *event;
	uint32_t size;
	char *buf;
734 735 736 737
	struct ui_progress *progress = ui_progress__new("Processing events...",
							self->size);
	if (progress == NULL)
		return -1;
738 739 740

	perf_event_ops__fill_defaults(ops);

741
	page_size = sysconf(_SC_PAGESIZE);
742

743
	head = data_offset;
744 745 746 747
	shift = page_size * (head / page_size);
	offset += shift;
	head -= shift;

748 749 750 751 752 753 754
	mmap_prot  = PROT_READ;
	mmap_flags = MAP_SHARED;

	if (self->header.needs_swap) {
		mmap_prot  |= PROT_WRITE;
		mmap_flags = MAP_PRIVATE;
	}
755
remap:
756 757
	buf = mmap(NULL, page_size * self->mmap_window, mmap_prot,
		   mmap_flags, self->fd, offset);
758 759 760 761 762 763 764 765
	if (buf == MAP_FAILED) {
		pr_err("failed to mmap file\n");
		err = -errno;
		goto out_err;
	}

more:
	event = (event_t *)(buf + head);
766
	ui_progress__update(progress, offset);
767

768 769
	if (self->header.needs_swap)
		perf_event_header__bswap(&event->header);
770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788
	size = event->header.size;
	if (size == 0)
		size = 8;

	if (head + event->header.size >= page_size * self->mmap_window) {
		int munmap_ret;

		shift = page_size * (head / page_size);

		munmap_ret = munmap(buf, page_size * self->mmap_window);
		assert(munmap_ret == 0);

		offset += shift;
		head -= shift;
		goto remap;
	}

	size = event->header.size;

789
	dump_printf("\n%#Lx [%#x]: event: %d\n",
790
		    offset + head, event->header.size, event->header.type);
791 792 793

	if (size == 0 ||
	    perf_session__process_event(self, event, ops, offset, head) < 0) {
794
		dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
795
			    offset + head, event->header.size,
796 797 798 799 800 801 802 803 804 805 806 807 808
			    event->header.type);
		/*
		 * assume we lost track of the stream, check alignment, and
		 * increment a single u64 in the hope to catch on again 'soon'.
		 */
		if (unlikely(head & 7))
			head &= ~7ULL;

		size = 8;
	}

	head += size;

809
	if (offset + head >= data_offset + data_size)
810 811
		goto done;

812
	if (offset + head < file_size)
813 814 815
		goto more;
done:
	err = 0;
816
	/* do the final flush for ordered samples */
817
	self->ordered_samples.next_flush = ULLONG_MAX;
818
	flush_sample_queue(self, ops);
819
out_err:
820
	ui_progress__delete(progress);
821 822
	return err;
}
823

824 825 826 827 828 829 830 831
int perf_session__process_events(struct perf_session *self,
				 struct perf_event_ops *ops)
{
	int err;

	if (perf_session__register_idle_thread(self) == NULL)
		return -ENOMEM;

832 833 834 835 836 837 838
	if (!self->fd_pipe)
		err = __perf_session__process_events(self,
						     self->header.data_offset,
						     self->header.data_size,
						     self->size, ops);
	else
		err = __perf_session__process_pipe_events(self, ops);
839

840 841 842
	return err;
}

843
bool perf_session__has_traces(struct perf_session *self, const char *msg)
844 845
{
	if (!(self->sample_type & PERF_SAMPLE_RAW)) {
846 847
		pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
		return false;
848 849
	}

850
	return true;
851
}
852

853
int perf_session__set_kallsyms_ref_reloc_sym(struct map **maps,
854 855 856 857
					     const char *symbol_name,
					     u64 addr)
{
	char *bracket;
858
	enum map_type i;
859 860 861 862 863
	struct ref_reloc_sym *ref;

	ref = zalloc(sizeof(struct ref_reloc_sym));
	if (ref == NULL)
		return -ENOMEM;
864

865 866 867
	ref->name = strdup(symbol_name);
	if (ref->name == NULL) {
		free(ref);
868
		return -ENOMEM;
869
	}
870

871
	bracket = strchr(ref->name, ']');
872 873 874
	if (bracket)
		*bracket = '\0';

875
	ref->addr = addr;
876 877

	for (i = 0; i < MAP__NR_TYPES; ++i) {
878 879
		struct kmap *kmap = map__kmap(maps[i]);
		kmap->ref_reloc_sym = ref;
880 881
	}

882 883
	return 0;
}
884 885 886 887 888 889 890

size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp)
{
	return __dsos__fprintf(&self->host_machine.kernel_dsos, fp) +
	       __dsos__fprintf(&self->host_machine.user_dsos, fp) +
	       machines__fprintf_dsos(&self->machines, fp);
}
891 892 893 894 895 896 897

size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp,
					  bool with_hits)
{
	size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits);
	return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits);
}