session.c 20.8 KB
Newer Older
1 2
#define _FILE_OFFSET_BITS 64

3 4
#include <linux/kernel.h>

5
#include <byteswap.h>
6 7 8 9
#include <unistd.h>
#include <sys/types.h>

#include "session.h"
10
#include "sort.h"
11 12 13 14 15 16
#include "util.h"

static int perf_session__open(struct perf_session *self, bool force)
{
	struct stat input_stat;

17 18 19 20 21 22 23 24 25 26
	if (!strcmp(self->filename, "-")) {
		self->fd_pipe = true;
		self->fd = STDIN_FILENO;

		if (perf_header__read(self, self->fd) < 0)
			pr_err("incompatible file format");

		return 0;
	}

27
	self->fd = open(self->filename, O_RDONLY);
28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50
	if (self->fd < 0) {
		pr_err("failed to open file: %s", self->filename);
		if (!strcmp(self->filename, "perf.data"))
			pr_err("  (try 'perf record' first)");
		pr_err("\n");
		return -errno;
	}

	if (fstat(self->fd, &input_stat) < 0)
		goto out_close;

	if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
		pr_err("file %s not owned by current user or root\n",
		       self->filename);
		goto out_close;
	}

	if (!input_stat.st_size) {
		pr_info("zero-sized file (%s), nothing to do!\n",
			self->filename);
		goto out_close;
	}

51
	if (perf_header__read(self, self->fd) < 0) {
52 53 54 55 56 57 58 59 60 61 62 63 64
		pr_err("incompatible file format");
		goto out_close;
	}

	self->size = input_stat.st_size;
	return 0;

out_close:
	close(self->fd);
	self->fd = -1;
	return -1;
}

65 66 67 68 69
void perf_session__update_sample_type(struct perf_session *self)
{
	self->sample_type = perf_header__sample_type(&self->header);
}

70 71 72
int perf_session__create_kernel_maps(struct perf_session *self)
{
	int ret;
73
	struct rb_root *root = &self->machines;
74 75 76 77 78 79 80

	ret = map_groups__create_kernel_maps(root, HOST_KERNEL_ID);
	if (ret >= 0)
		ret = map_groups__create_guest_kernel_maps(root);
	return ret;
}

81
struct perf_session *perf_session__new(const char *filename, int mode, bool force)
82
{
83
	size_t len = filename ? strlen(filename) + 1 : 0;
84 85 86 87 88 89
	struct perf_session *self = zalloc(sizeof(*self) + len);

	if (self == NULL)
		goto out;

	if (perf_header__init(&self->header) < 0)
90
		goto out_free;
91 92

	memcpy(self->filename, filename, len);
93
	self->threads = RB_ROOT;
94
	self->stats_by_id = RB_ROOT;
95
	self->last_match = NULL;
96 97 98
	self->mmap_window = 32;
	self->cwd = NULL;
	self->cwdlen = 0;
99
	self->unknown_events = 0;
100
	self->machines = RB_ROOT;
101 102
	self->ordered_samples.flush_limit = ULLONG_MAX;
	INIT_LIST_HEAD(&self->ordered_samples.samples_head);
103

104 105 106 107 108 109 110 111 112 113 114
	if (mode == O_RDONLY) {
		if (perf_session__open(self, force) < 0)
			goto out_delete;
	} else if (mode == O_WRONLY) {
		/*
		 * In O_RDONLY mode this will be performed when reading the
		 * kernel MMAP event, in event__process_mmap().
		 */
		if (perf_session__create_kernel_maps(self) < 0)
			goto out_delete;
	}
115

116
	perf_session__update_sample_type(self);
117 118
out:
	return self;
119
out_free:
120 121
	free(self);
	return NULL;
122 123 124
out_delete:
	perf_session__delete(self);
	return NULL;
125 126 127 128 129 130
}

void perf_session__delete(struct perf_session *self)
{
	perf_header__exit(&self->header);
	close(self->fd);
131
	free(self->cwd);
132 133
	free(self);
}
134 135 136 137 138 139 140 141 142

static bool symbol__match_parent_regex(struct symbol *sym)
{
	if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
		return 1;

	return 0;
}

143 144 145 146
struct map_symbol *perf_session__resolve_callchain(struct perf_session *self,
						   struct thread *thread,
						   struct ip_callchain *chain,
						   struct symbol **parent)
147 148 149
{
	u8 cpumode = PERF_RECORD_MISC_USER;
	unsigned int i;
150
	struct map_symbol *syms = calloc(chain->nr, sizeof(*syms));
151

152 153
	if (!syms)
		return NULL;
154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172

	for (i = 0; i < chain->nr; i++) {
		u64 ip = chain->ips[i];
		struct addr_location al;

		if (ip >= PERF_CONTEXT_MAX) {
			switch (ip) {
			case PERF_CONTEXT_HV:
				cpumode = PERF_RECORD_MISC_HYPERVISOR;	break;
			case PERF_CONTEXT_KERNEL:
				cpumode = PERF_RECORD_MISC_KERNEL;	break;
			case PERF_CONTEXT_USER:
				cpumode = PERF_RECORD_MISC_USER;	break;
			default:
				break;
			}
			continue;
		}

173
		al.filtered = false;
174
		thread__find_addr_location(thread, self, cpumode,
175
				MAP__FUNCTION, thread->pid, ip, &al, NULL);
176 177 178 179
		if (al.sym != NULL) {
			if (sort__has_parent && !*parent &&
			    symbol__match_parent_regex(al.sym))
				*parent = al.sym;
180
			if (!symbol_conf.use_callchain)
181
				break;
182 183
			syms[i].map = al.map;
			syms[i].sym = al.sym;
184 185 186 187 188
		}
	}

	return syms;
}
189 190 191 192 193 194 195 196 197 198

static int process_event_stub(event_t *event __used,
			      struct perf_session *session __used)
{
	dump_printf(": unhandled!\n");
	return 0;
}

static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
{
199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
	if (handler->sample == NULL)
		handler->sample = process_event_stub;
	if (handler->mmap == NULL)
		handler->mmap = process_event_stub;
	if (handler->comm == NULL)
		handler->comm = process_event_stub;
	if (handler->fork == NULL)
		handler->fork = process_event_stub;
	if (handler->exit == NULL)
		handler->exit = process_event_stub;
	if (handler->lost == NULL)
		handler->lost = process_event_stub;
	if (handler->read == NULL)
		handler->read = process_event_stub;
	if (handler->throttle == NULL)
		handler->throttle = process_event_stub;
	if (handler->unthrottle == NULL)
		handler->unthrottle = process_event_stub;
217 218
	if (handler->attr == NULL)
		handler->attr = process_event_stub;
219 220
	if (handler->event_type == NULL)
		handler->event_type = process_event_stub;
221 222
	if (handler->tracing_data == NULL)
		handler->tracing_data = process_event_stub;
223 224
	if (handler->build_id == NULL)
		handler->build_id = process_event_stub;
225 226 227 228 229 230 231 232 233 234 235 236 237
}

static const char *event__name[] = {
	[0]			 = "TOTAL",
	[PERF_RECORD_MMAP]	 = "MMAP",
	[PERF_RECORD_LOST]	 = "LOST",
	[PERF_RECORD_COMM]	 = "COMM",
	[PERF_RECORD_EXIT]	 = "EXIT",
	[PERF_RECORD_THROTTLE]	 = "THROTTLE",
	[PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE",
	[PERF_RECORD_FORK]	 = "FORK",
	[PERF_RECORD_READ]	 = "READ",
	[PERF_RECORD_SAMPLE]	 = "SAMPLE",
238
	[PERF_RECORD_HEADER_ATTR]	 = "ATTR",
239
	[PERF_RECORD_HEADER_EVENT_TYPE]	 = "EVENT_TYPE",
240
	[PERF_RECORD_HEADER_TRACING_DATA]	 = "TRACING_DATA",
241
	[PERF_RECORD_HEADER_BUILD_ID]	 = "BUILD_ID",
242 243
};

244
unsigned long event__total[PERF_RECORD_HEADER_MAX];
245 246 247 248

void event__print_totals(void)
{
	int i;
249 250 251
	for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
		if (!event__name[i])
			continue;
252 253
		pr_info("%10s events: %10ld\n",
			event__name[i], event__total[i]);
254
	}
255 256
}

257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307
void mem_bswap_64(void *src, int byte_size)
{
	u64 *m = src;

	while (byte_size > 0) {
		*m = bswap_64(*m);
		byte_size -= sizeof(u64);
		++m;
	}
}

static void event__all64_swap(event_t *self)
{
	struct perf_event_header *hdr = &self->header;
	mem_bswap_64(hdr + 1, self->header.size - sizeof(*hdr));
}

static void event__comm_swap(event_t *self)
{
	self->comm.pid = bswap_32(self->comm.pid);
	self->comm.tid = bswap_32(self->comm.tid);
}

static void event__mmap_swap(event_t *self)
{
	self->mmap.pid	 = bswap_32(self->mmap.pid);
	self->mmap.tid	 = bswap_32(self->mmap.tid);
	self->mmap.start = bswap_64(self->mmap.start);
	self->mmap.len	 = bswap_64(self->mmap.len);
	self->mmap.pgoff = bswap_64(self->mmap.pgoff);
}

static void event__task_swap(event_t *self)
{
	self->fork.pid	= bswap_32(self->fork.pid);
	self->fork.tid	= bswap_32(self->fork.tid);
	self->fork.ppid	= bswap_32(self->fork.ppid);
	self->fork.ptid	= bswap_32(self->fork.ptid);
	self->fork.time	= bswap_64(self->fork.time);
}

static void event__read_swap(event_t *self)
{
	self->read.pid		= bswap_32(self->read.pid);
	self->read.tid		= bswap_32(self->read.tid);
	self->read.value	= bswap_64(self->read.value);
	self->read.time_enabled	= bswap_64(self->read.time_enabled);
	self->read.time_running	= bswap_64(self->read.time_running);
	self->read.id		= bswap_64(self->read.id);
}

308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327
static void event__attr_swap(event_t *self)
{
	size_t size;

	self->attr.attr.type		= bswap_32(self->attr.attr.type);
	self->attr.attr.size		= bswap_32(self->attr.attr.size);
	self->attr.attr.config		= bswap_64(self->attr.attr.config);
	self->attr.attr.sample_period	= bswap_64(self->attr.attr.sample_period);
	self->attr.attr.sample_type	= bswap_64(self->attr.attr.sample_type);
	self->attr.attr.read_format	= bswap_64(self->attr.attr.read_format);
	self->attr.attr.wakeup_events	= bswap_32(self->attr.attr.wakeup_events);
	self->attr.attr.bp_type		= bswap_32(self->attr.attr.bp_type);
	self->attr.attr.bp_addr		= bswap_64(self->attr.attr.bp_addr);
	self->attr.attr.bp_len		= bswap_64(self->attr.attr.bp_len);

	size = self->header.size;
	size -= (void *)&self->attr.id - (void *)self;
	mem_bswap_64(self->attr.id, size);
}

328 329 330 331 332 333
static void event__event_type_swap(event_t *self)
{
	self->event_type.event_type.event_id =
		bswap_64(self->event_type.event_type.event_id);
}

334 335 336 337 338
static void event__tracing_data_swap(event_t *self)
{
	self->tracing_data.size = bswap_32(self->tracing_data.size);
}

339 340 341 342 343 344 345 346 347 348
typedef void (*event__swap_op)(event_t *self);

static event__swap_op event__swap_ops[] = {
	[PERF_RECORD_MMAP]   = event__mmap_swap,
	[PERF_RECORD_COMM]   = event__comm_swap,
	[PERF_RECORD_FORK]   = event__task_swap,
	[PERF_RECORD_EXIT]   = event__task_swap,
	[PERF_RECORD_LOST]   = event__all64_swap,
	[PERF_RECORD_READ]   = event__read_swap,
	[PERF_RECORD_SAMPLE] = event__all64_swap,
349
	[PERF_RECORD_HEADER_ATTR]   = event__attr_swap,
350
	[PERF_RECORD_HEADER_EVENT_TYPE]   = event__event_type_swap,
351
	[PERF_RECORD_HEADER_TRACING_DATA]   = event__tracing_data_swap,
352
	[PERF_RECORD_HEADER_BUILD_ID]   = NULL,
353
	[PERF_RECORD_HEADER_MAX]    = NULL,
354 355
};

356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527
struct sample_queue {
	u64			timestamp;
	struct sample_event	*event;
	struct list_head	list;
};

#define FLUSH_PERIOD	(2 * NSEC_PER_SEC)

static void flush_sample_queue(struct perf_session *s,
			       struct perf_event_ops *ops)
{
	struct list_head *head = &s->ordered_samples.samples_head;
	u64 limit = s->ordered_samples.flush_limit;
	struct sample_queue *tmp, *iter;

	if (!ops->ordered_samples)
		return;

	list_for_each_entry_safe(iter, tmp, head, list) {
		if (iter->timestamp > limit)
			return;

		if (iter == s->ordered_samples.last_inserted)
			s->ordered_samples.last_inserted = NULL;

		ops->sample((event_t *)iter->event, s);

		s->ordered_samples.last_flush = iter->timestamp;
		list_del(&iter->list);
		free(iter->event);
		free(iter);
	}
}

static void __queue_sample_end(struct sample_queue *new, struct list_head *head)
{
	struct sample_queue *iter;

	list_for_each_entry_reverse(iter, head, list) {
		if (iter->timestamp < new->timestamp) {
			list_add(&new->list, &iter->list);
			return;
		}
	}

	list_add(&new->list, head);
}

static void __queue_sample_before(struct sample_queue *new,
				  struct sample_queue *iter,
				  struct list_head *head)
{
	list_for_each_entry_continue_reverse(iter, head, list) {
		if (iter->timestamp < new->timestamp) {
			list_add(&new->list, &iter->list);
			return;
		}
	}

	list_add(&new->list, head);
}

static void __queue_sample_after(struct sample_queue *new,
				 struct sample_queue *iter,
				 struct list_head *head)
{
	list_for_each_entry_continue(iter, head, list) {
		if (iter->timestamp > new->timestamp) {
			list_add_tail(&new->list, &iter->list);
			return;
		}
	}
	list_add_tail(&new->list, head);
}

/* The queue is ordered by time */
static void __queue_sample_event(struct sample_queue *new,
				 struct perf_session *s)
{
	struct sample_queue *last_inserted = s->ordered_samples.last_inserted;
	struct list_head *head = &s->ordered_samples.samples_head;


	if (!last_inserted) {
		__queue_sample_end(new, head);
		return;
	}

	/*
	 * Most of the time the current event has a timestamp
	 * very close to the last event inserted, unless we just switched
	 * to another event buffer. Having a sorting based on a list and
	 * on the last inserted event that is close to the current one is
	 * probably more efficient than an rbtree based sorting.
	 */
	if (last_inserted->timestamp >= new->timestamp)
		__queue_sample_before(new, last_inserted, head);
	else
		__queue_sample_after(new, last_inserted, head);
}

static int queue_sample_event(event_t *event, struct sample_data *data,
			      struct perf_session *s,
			      struct perf_event_ops *ops)
{
	u64 timestamp = data->time;
	struct sample_queue *new;
	u64 flush_limit;


	if (s->ordered_samples.flush_limit == ULLONG_MAX)
		s->ordered_samples.flush_limit = timestamp + FLUSH_PERIOD;

	if (timestamp < s->ordered_samples.last_flush) {
		printf("Warning: Timestamp below last timeslice flush\n");
		return -EINVAL;
	}

	new = malloc(sizeof(*new));
	if (!new)
		return -ENOMEM;

	new->timestamp = timestamp;

	new->event = malloc(event->header.size);
	if (!new->event) {
		free(new);
		return -ENOMEM;
	}

	memcpy(new->event, event, event->header.size);

	__queue_sample_event(new, s);
	s->ordered_samples.last_inserted = new;

	/*
	 * We want to have a slice of events covering 2 * FLUSH_PERIOD
	 * If FLUSH_PERIOD is big enough, it ensures every events that occured
	 * in the first half of the timeslice have all been buffered and there
	 * are none remaining (we need that because of the weakly ordered
	 * event recording we have). Then once we reach the 2 * FLUSH_PERIOD
	 * timeslice, we flush the first half to be gentle with the memory
	 * (the second half can still get new events in the middle, so wait
	 * another period to flush it)
	 */
	flush_limit = s->ordered_samples.flush_limit;

	if (new->timestamp > flush_limit &&
		new->timestamp - flush_limit > FLUSH_PERIOD) {
		s->ordered_samples.flush_limit += FLUSH_PERIOD;
		flush_sample_queue(s, ops);
	}

	return 0;
}

static int perf_session__process_sample(event_t *event, struct perf_session *s,
					struct perf_event_ops *ops)
{
	struct sample_data data;

	if (!ops->ordered_samples)
		return ops->sample(event, s);

	bzero(&data, sizeof(struct sample_data));
	event__parse_sample(event, s->sample_type, &data);

	queue_sample_event(event, &data, s, ops);

	return 0;
}

528 529 530
static int perf_session__process_event(struct perf_session *self,
				       event_t *event,
				       struct perf_event_ops *ops,
531
				       u64 offset, u64 head)
532 533 534
{
	trace_event(event);

535
	if (event->header.type < PERF_RECORD_HEADER_MAX) {
536
		dump_printf("%#Lx [%#x]: PERF_RECORD_%s",
537
			    offset + head, event->header.size,
538 539 540 541 542
			    event__name[event->header.type]);
		++event__total[0];
		++event__total[event->header.type];
	}

543 544 545
	if (self->header.needs_swap && event__swap_ops[event->header.type])
		event__swap_ops[event->header.type](event);

546 547
	switch (event->header.type) {
	case PERF_RECORD_SAMPLE:
548
		return perf_session__process_sample(event, self, ops);
549
	case PERF_RECORD_MMAP:
550
		return ops->mmap(event, self);
551
	case PERF_RECORD_COMM:
552
		return ops->comm(event, self);
553
	case PERF_RECORD_FORK:
554
		return ops->fork(event, self);
555
	case PERF_RECORD_EXIT:
556
		return ops->exit(event, self);
557
	case PERF_RECORD_LOST:
558
		return ops->lost(event, self);
559
	case PERF_RECORD_READ:
560
		return ops->read(event, self);
561
	case PERF_RECORD_THROTTLE:
562
		return ops->throttle(event, self);
563
	case PERF_RECORD_UNTHROTTLE:
564
		return ops->unthrottle(event, self);
565 566
	case PERF_RECORD_HEADER_ATTR:
		return ops->attr(event, self);
567 568
	case PERF_RECORD_HEADER_EVENT_TYPE:
		return ops->event_type(event, self);
569 570 571 572
	case PERF_RECORD_HEADER_TRACING_DATA:
		/* setup for reading amidst mmap */
		lseek(self->fd, offset + head, SEEK_SET);
		return ops->tracing_data(event, self);
573 574
	case PERF_RECORD_HEADER_BUILD_ID:
		return ops->build_id(event, self);
575
	default:
576
		self->unknown_events++;
577 578 579 580
		return -1;
	}
}

581 582 583 584 585 586 587
void perf_event_header__bswap(struct perf_event_header *self)
{
	self->type = bswap_32(self->type);
	self->misc = bswap_16(self->misc);
	self->size = bswap_16(self->size);
}

588 589 590 591 592 593 594 595 596 597 598 599
static struct thread *perf_session__register_idle_thread(struct perf_session *self)
{
	struct thread *thread = perf_session__findnew(self, 0);

	if (thread == NULL || thread__set_comm(thread, "swapper")) {
		pr_err("problem inserting idle task.\n");
		thread = NULL;
	}

	return thread;
}

600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694
int do_read(int fd, void *buf, size_t size)
{
	void *buf_start = buf;

	while (size) {
		int ret = read(fd, buf, size);

		if (ret <= 0)
			return ret;

		size -= ret;
		buf += ret;
	}

	return buf - buf_start;
}

#define session_done()	(*(volatile int *)(&session_done))
volatile int session_done;

static int __perf_session__process_pipe_events(struct perf_session *self,
					       struct perf_event_ops *ops)
{
	event_t event;
	uint32_t size;
	int skip = 0;
	u64 head;
	int err;
	void *p;

	perf_event_ops__fill_defaults(ops);

	head = 0;
more:
	err = do_read(self->fd, &event, sizeof(struct perf_event_header));
	if (err <= 0) {
		if (err == 0)
			goto done;

		pr_err("failed to read event header\n");
		goto out_err;
	}

	if (self->header.needs_swap)
		perf_event_header__bswap(&event.header);

	size = event.header.size;
	if (size == 0)
		size = 8;

	p = &event;
	p += sizeof(struct perf_event_header);

	err = do_read(self->fd, p, size - sizeof(struct perf_event_header));
	if (err <= 0) {
		if (err == 0) {
			pr_err("unexpected end of event stream\n");
			goto done;
		}

		pr_err("failed to read event data\n");
		goto out_err;
	}

	if (size == 0 ||
	    (skip = perf_session__process_event(self, &event, ops,
						0, head)) < 0) {
		dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
			    head, event.header.size, event.header.type);
		/*
		 * assume we lost track of the stream, check alignment, and
		 * increment a single u64 in the hope to catch on again 'soon'.
		 */
		if (unlikely(head & 7))
			head &= ~7ULL;

		size = 8;
	}

	head += size;

	dump_printf("\n%#Lx [%#x]: event: %d\n",
		    head, event.header.size, event.header.type);

	if (skip > 0)
		head += skip;

	if (!session_done())
		goto more;
done:
	err = 0;
out_err:
	return err;
}

695 696 697
int __perf_session__process_events(struct perf_session *self,
				   u64 data_offset, u64 data_size,
				   u64 file_size, struct perf_event_ops *ops)
698
{
699 700 701
	int err, mmap_prot, mmap_flags;
	u64 head, shift;
	u64 offset = 0;
702 703 704 705
	size_t	page_size;
	event_t *event;
	uint32_t size;
	char *buf;
706 707 708 709
	struct ui_progress *progress = ui_progress__new("Processing events...",
							self->size);
	if (progress == NULL)
		return -1;
710 711 712

	perf_event_ops__fill_defaults(ops);

713
	page_size = sysconf(_SC_PAGESIZE);
714

715
	head = data_offset;
716 717 718 719
	shift = page_size * (head / page_size);
	offset += shift;
	head -= shift;

720 721 722 723 724 725 726
	mmap_prot  = PROT_READ;
	mmap_flags = MAP_SHARED;

	if (self->header.needs_swap) {
		mmap_prot  |= PROT_WRITE;
		mmap_flags = MAP_PRIVATE;
	}
727
remap:
728 729
	buf = mmap(NULL, page_size * self->mmap_window, mmap_prot,
		   mmap_flags, self->fd, offset);
730 731 732 733 734 735 736 737
	if (buf == MAP_FAILED) {
		pr_err("failed to mmap file\n");
		err = -errno;
		goto out_err;
	}

more:
	event = (event_t *)(buf + head);
738
	ui_progress__update(progress, offset);
739

740 741
	if (self->header.needs_swap)
		perf_event_header__bswap(&event->header);
742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760
	size = event->header.size;
	if (size == 0)
		size = 8;

	if (head + event->header.size >= page_size * self->mmap_window) {
		int munmap_ret;

		shift = page_size * (head / page_size);

		munmap_ret = munmap(buf, page_size * self->mmap_window);
		assert(munmap_ret == 0);

		offset += shift;
		head -= shift;
		goto remap;
	}

	size = event->header.size;

761
	dump_printf("\n%#Lx [%#x]: event: %d\n",
762
		    offset + head, event->header.size, event->header.type);
763 764 765

	if (size == 0 ||
	    perf_session__process_event(self, event, ops, offset, head) < 0) {
766
		dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
767
			    offset + head, event->header.size,
768 769 770 771 772 773 774 775 776 777 778 779 780
			    event->header.type);
		/*
		 * assume we lost track of the stream, check alignment, and
		 * increment a single u64 in the hope to catch on again 'soon'.
		 */
		if (unlikely(head & 7))
			head &= ~7ULL;

		size = 8;
	}

	head += size;

781
	if (offset + head >= data_offset + data_size)
782 783
		goto done;

784
	if (offset + head < file_size)
785 786 787
		goto more;
done:
	err = 0;
788 789 790
	/* do the final flush for ordered samples */
	self->ordered_samples.flush_limit = ULLONG_MAX;
	flush_sample_queue(self, ops);
791
out_err:
792
	ui_progress__delete(progress);
793 794
	return err;
}
795

796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820
int perf_session__process_events(struct perf_session *self,
				 struct perf_event_ops *ops)
{
	int err;

	if (perf_session__register_idle_thread(self) == NULL)
		return -ENOMEM;

	if (!symbol_conf.full_paths) {
		char bf[PATH_MAX];

		if (getcwd(bf, sizeof(bf)) == NULL) {
			err = -errno;
out_getcwd_err:
			pr_err("failed to get the current directory\n");
			goto out_err;
		}
		self->cwd = strdup(bf);
		if (self->cwd == NULL) {
			err = -ENOMEM;
			goto out_getcwd_err;
		}
		self->cwdlen = strlen(self->cwd);
	}

821 822 823 824 825 826 827
	if (!self->fd_pipe)
		err = __perf_session__process_events(self,
						     self->header.data_offset,
						     self->header.data_size,
						     self->size, ops);
	else
		err = __perf_session__process_pipe_events(self, ops);
828 829 830 831
out_err:
	return err;
}

832
bool perf_session__has_traces(struct perf_session *self, const char *msg)
833 834
{
	if (!(self->sample_type & PERF_SAMPLE_RAW)) {
835 836
		pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
		return false;
837 838
	}

839
	return true;
840
}
841

842
int perf_session__set_kallsyms_ref_reloc_sym(struct map **maps,
843 844 845 846
					     const char *symbol_name,
					     u64 addr)
{
	char *bracket;
847
	enum map_type i;
848 849 850 851 852
	struct ref_reloc_sym *ref;

	ref = zalloc(sizeof(struct ref_reloc_sym));
	if (ref == NULL)
		return -ENOMEM;
853

854 855 856
	ref->name = strdup(symbol_name);
	if (ref->name == NULL) {
		free(ref);
857
		return -ENOMEM;
858
	}
859

860
	bracket = strchr(ref->name, ']');
861 862 863
	if (bracket)
		*bracket = '\0';

864
	ref->addr = addr;
865 866

	for (i = 0; i < MAP__NR_TYPES; ++i) {
867 868
		struct kmap *kmap = map__kmap(maps[i]);
		kmap->ref_reloc_sym = ref;
869 870
	}

871 872
	return 0;
}