session.c 22.5 KB
Newer Older
1 2
#define _FILE_OFFSET_BITS 64

3 4
#include <linux/kernel.h>

5
#include <byteswap.h>
6 7
#include <unistd.h>
#include <sys/types.h>
8
#include <sys/mman.h>
9 10

#include "session.h"
11
#include "sort.h"
12 13 14 15 16 17
#include "util.h"

static int perf_session__open(struct perf_session *self, bool force)
{
	struct stat input_stat;

18 19 20 21 22 23 24 25 26 27
	if (!strcmp(self->filename, "-")) {
		self->fd_pipe = true;
		self->fd = STDIN_FILENO;

		if (perf_header__read(self, self->fd) < 0)
			pr_err("incompatible file format");

		return 0;
	}

28
	self->fd = open(self->filename, O_RDONLY);
29
	if (self->fd < 0) {
30 31 32 33
		int err = errno;

		pr_err("failed to open %s: %s", self->filename, strerror(err));
		if (err == ENOENT && !strcmp(self->filename, "perf.data"))
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53
			pr_err("  (try 'perf record' first)");
		pr_err("\n");
		return -errno;
	}

	if (fstat(self->fd, &input_stat) < 0)
		goto out_close;

	if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
		pr_err("file %s not owned by current user or root\n",
		       self->filename);
		goto out_close;
	}

	if (!input_stat.st_size) {
		pr_info("zero-sized file (%s), nothing to do!\n",
			self->filename);
		goto out_close;
	}

54
	if (perf_header__read(self, self->fd) < 0) {
55 56 57 58 59 60 61 62 63 64 65 66 67
		pr_err("incompatible file format");
		goto out_close;
	}

	self->size = input_stat.st_size;
	return 0;

out_close:
	close(self->fd);
	self->fd = -1;
	return -1;
}

68 69 70 71 72
void perf_session__update_sample_type(struct perf_session *self)
{
	self->sample_type = perf_header__sample_type(&self->header);
}

73 74
int perf_session__create_kernel_maps(struct perf_session *self)
{
75
	int ret = machine__create_kernel_maps(&self->host_machine);
76 77

	if (ret >= 0)
78
		ret = machines__create_guest_kernel_maps(&self->machines);
79 80 81
	return ret;
}

T
Tom Zanussi 已提交
82
struct perf_session *perf_session__new(const char *filename, int mode, bool force, bool repipe)
83
{
84
	size_t len = filename ? strlen(filename) + 1 : 0;
85 86 87 88 89 90
	struct perf_session *self = zalloc(sizeof(*self) + len);

	if (self == NULL)
		goto out;

	if (perf_header__init(&self->header) < 0)
91
		goto out_free;
92 93

	memcpy(self->filename, filename, len);
94
	self->threads = RB_ROOT;
95
	INIT_LIST_HEAD(&self->dead_threads);
96
	self->hists_tree = RB_ROOT;
97
	self->last_match = NULL;
98
	self->mmap_window = 32;
99
	self->machines = RB_ROOT;
T
Tom Zanussi 已提交
100
	self->repipe = repipe;
101
	INIT_LIST_HEAD(&self->ordered_samples.samples_head);
102
	machine__init(&self->host_machine, "", HOST_KERNEL_ID);
103

104 105 106 107 108 109 110 111 112 113 114
	if (mode == O_RDONLY) {
		if (perf_session__open(self, force) < 0)
			goto out_delete;
	} else if (mode == O_WRONLY) {
		/*
		 * In O_RDONLY mode this will be performed when reading the
		 * kernel MMAP event, in event__process_mmap().
		 */
		if (perf_session__create_kernel_maps(self) < 0)
			goto out_delete;
	}
115

116
	perf_session__update_sample_type(self);
117 118
out:
	return self;
119
out_free:
120 121
	free(self);
	return NULL;
122 123 124
out_delete:
	perf_session__delete(self);
	return NULL;
125 126
}

127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149
static void perf_session__delete_dead_threads(struct perf_session *self)
{
	struct thread *n, *t;

	list_for_each_entry_safe(t, n, &self->dead_threads, node) {
		list_del(&t->node);
		thread__delete(t);
	}
}

static void perf_session__delete_threads(struct perf_session *self)
{
	struct rb_node *nd = rb_first(&self->threads);

	while (nd) {
		struct thread *t = rb_entry(nd, struct thread, rb_node);

		rb_erase(&t->rb_node, &self->threads);
		nd = rb_next(nd);
		thread__delete(t);
	}
}

150 151 152
void perf_session__delete(struct perf_session *self)
{
	perf_header__exit(&self->header);
153 154 155
	perf_session__delete_dead_threads(self);
	perf_session__delete_threads(self);
	machine__exit(&self->host_machine);
156 157 158
	close(self->fd);
	free(self);
}
159

160 161 162 163 164 165 166 167 168 169
void perf_session__remove_thread(struct perf_session *self, struct thread *th)
{
	rb_erase(&th->rb_node, &self->threads);
	/*
	 * We may have references to this thread, for instance in some hist_entry
	 * instances, so just move them to a separate list.
	 */
	list_add_tail(&th->node, &self->dead_threads);
}

170 171 172 173 174 175 176 177
static bool symbol__match_parent_regex(struct symbol *sym)
{
	if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
		return 1;

	return 0;
}

178 179 180 181
struct map_symbol *perf_session__resolve_callchain(struct perf_session *self,
						   struct thread *thread,
						   struct ip_callchain *chain,
						   struct symbol **parent)
182 183 184
{
	u8 cpumode = PERF_RECORD_MISC_USER;
	unsigned int i;
185
	struct map_symbol *syms = calloc(chain->nr, sizeof(*syms));
186

187 188
	if (!syms)
		return NULL;
189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207

	for (i = 0; i < chain->nr; i++) {
		u64 ip = chain->ips[i];
		struct addr_location al;

		if (ip >= PERF_CONTEXT_MAX) {
			switch (ip) {
			case PERF_CONTEXT_HV:
				cpumode = PERF_RECORD_MISC_HYPERVISOR;	break;
			case PERF_CONTEXT_KERNEL:
				cpumode = PERF_RECORD_MISC_KERNEL;	break;
			case PERF_CONTEXT_USER:
				cpumode = PERF_RECORD_MISC_USER;	break;
			default:
				break;
			}
			continue;
		}

208
		al.filtered = false;
209
		thread__find_addr_location(thread, self, cpumode,
210
				MAP__FUNCTION, thread->pid, ip, &al, NULL);
211 212 213 214
		if (al.sym != NULL) {
			if (sort__has_parent && !*parent &&
			    symbol__match_parent_regex(al.sym))
				*parent = al.sym;
215
			if (!symbol_conf.use_callchain)
216
				break;
217 218
			syms[i].map = al.map;
			syms[i].sym = al.sym;
219 220 221 222 223
		}
	}

	return syms;
}
224 225 226 227 228 229 230 231

static int process_event_stub(event_t *event __used,
			      struct perf_session *session __used)
{
	dump_printf(": unhandled!\n");
	return 0;
}

232 233 234 235 236 237 238 239 240 241 242 243
static int process_finished_round_stub(event_t *event __used,
				       struct perf_session *session __used,
				       struct perf_event_ops *ops __used)
{
	dump_printf(": unhandled!\n");
	return 0;
}

static int process_finished_round(event_t *event,
				  struct perf_session *session,
				  struct perf_event_ops *ops);

244 245
static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
{
246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263
	if (handler->sample == NULL)
		handler->sample = process_event_stub;
	if (handler->mmap == NULL)
		handler->mmap = process_event_stub;
	if (handler->comm == NULL)
		handler->comm = process_event_stub;
	if (handler->fork == NULL)
		handler->fork = process_event_stub;
	if (handler->exit == NULL)
		handler->exit = process_event_stub;
	if (handler->lost == NULL)
		handler->lost = process_event_stub;
	if (handler->read == NULL)
		handler->read = process_event_stub;
	if (handler->throttle == NULL)
		handler->throttle = process_event_stub;
	if (handler->unthrottle == NULL)
		handler->unthrottle = process_event_stub;
264 265
	if (handler->attr == NULL)
		handler->attr = process_event_stub;
266 267
	if (handler->event_type == NULL)
		handler->event_type = process_event_stub;
268 269
	if (handler->tracing_data == NULL)
		handler->tracing_data = process_event_stub;
270 271
	if (handler->build_id == NULL)
		handler->build_id = process_event_stub;
272 273 274 275 276 277
	if (handler->finished_round == NULL) {
		if (handler->ordered_samples)
			handler->finished_round = process_finished_round;
		else
			handler->finished_round = process_finished_round_stub;
	}
278 279
}

280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330
void mem_bswap_64(void *src, int byte_size)
{
	u64 *m = src;

	while (byte_size > 0) {
		*m = bswap_64(*m);
		byte_size -= sizeof(u64);
		++m;
	}
}

static void event__all64_swap(event_t *self)
{
	struct perf_event_header *hdr = &self->header;
	mem_bswap_64(hdr + 1, self->header.size - sizeof(*hdr));
}

static void event__comm_swap(event_t *self)
{
	self->comm.pid = bswap_32(self->comm.pid);
	self->comm.tid = bswap_32(self->comm.tid);
}

static void event__mmap_swap(event_t *self)
{
	self->mmap.pid	 = bswap_32(self->mmap.pid);
	self->mmap.tid	 = bswap_32(self->mmap.tid);
	self->mmap.start = bswap_64(self->mmap.start);
	self->mmap.len	 = bswap_64(self->mmap.len);
	self->mmap.pgoff = bswap_64(self->mmap.pgoff);
}

static void event__task_swap(event_t *self)
{
	self->fork.pid	= bswap_32(self->fork.pid);
	self->fork.tid	= bswap_32(self->fork.tid);
	self->fork.ppid	= bswap_32(self->fork.ppid);
	self->fork.ptid	= bswap_32(self->fork.ptid);
	self->fork.time	= bswap_64(self->fork.time);
}

static void event__read_swap(event_t *self)
{
	self->read.pid		= bswap_32(self->read.pid);
	self->read.tid		= bswap_32(self->read.tid);
	self->read.value	= bswap_64(self->read.value);
	self->read.time_enabled	= bswap_64(self->read.time_enabled);
	self->read.time_running	= bswap_64(self->read.time_running);
	self->read.id		= bswap_64(self->read.id);
}

331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350
static void event__attr_swap(event_t *self)
{
	size_t size;

	self->attr.attr.type		= bswap_32(self->attr.attr.type);
	self->attr.attr.size		= bswap_32(self->attr.attr.size);
	self->attr.attr.config		= bswap_64(self->attr.attr.config);
	self->attr.attr.sample_period	= bswap_64(self->attr.attr.sample_period);
	self->attr.attr.sample_type	= bswap_64(self->attr.attr.sample_type);
	self->attr.attr.read_format	= bswap_64(self->attr.attr.read_format);
	self->attr.attr.wakeup_events	= bswap_32(self->attr.attr.wakeup_events);
	self->attr.attr.bp_type		= bswap_32(self->attr.attr.bp_type);
	self->attr.attr.bp_addr		= bswap_64(self->attr.attr.bp_addr);
	self->attr.attr.bp_len		= bswap_64(self->attr.attr.bp_len);

	size = self->header.size;
	size -= (void *)&self->attr.id - (void *)self;
	mem_bswap_64(self->attr.id, size);
}

351 352 353 354 355 356
static void event__event_type_swap(event_t *self)
{
	self->event_type.event_type.event_id =
		bswap_64(self->event_type.event_type.event_id);
}

357 358 359 360 361
static void event__tracing_data_swap(event_t *self)
{
	self->tracing_data.size = bswap_32(self->tracing_data.size);
}

362 363 364 365 366 367 368 369 370 371
typedef void (*event__swap_op)(event_t *self);

static event__swap_op event__swap_ops[] = {
	[PERF_RECORD_MMAP]   = event__mmap_swap,
	[PERF_RECORD_COMM]   = event__comm_swap,
	[PERF_RECORD_FORK]   = event__task_swap,
	[PERF_RECORD_EXIT]   = event__task_swap,
	[PERF_RECORD_LOST]   = event__all64_swap,
	[PERF_RECORD_READ]   = event__read_swap,
	[PERF_RECORD_SAMPLE] = event__all64_swap,
372
	[PERF_RECORD_HEADER_ATTR]   = event__attr_swap,
373
	[PERF_RECORD_HEADER_EVENT_TYPE]   = event__event_type_swap,
374
	[PERF_RECORD_HEADER_TRACING_DATA]   = event__tracing_data_swap,
375
	[PERF_RECORD_HEADER_BUILD_ID]   = NULL,
376
	[PERF_RECORD_HEADER_MAX]    = NULL,
377 378
};

379 380 381 382 383 384 385 386 387 388
struct sample_queue {
	u64			timestamp;
	struct sample_event	*event;
	struct list_head	list;
};

static void flush_sample_queue(struct perf_session *s,
			       struct perf_event_ops *ops)
{
	struct list_head *head = &s->ordered_samples.samples_head;
389
	u64 limit = s->ordered_samples.next_flush;
390 391
	struct sample_queue *tmp, *iter;

392
	if (!ops->ordered_samples || !limit)
393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410
		return;

	list_for_each_entry_safe(iter, tmp, head, list) {
		if (iter->timestamp > limit)
			return;

		if (iter == s->ordered_samples.last_inserted)
			s->ordered_samples.last_inserted = NULL;

		ops->sample((event_t *)iter->event, s);

		s->ordered_samples.last_flush = iter->timestamp;
		list_del(&iter->list);
		free(iter->event);
		free(iter);
	}
}

411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459
/*
 * When perf record finishes a pass on every buffers, it records this pseudo
 * event.
 * We record the max timestamp t found in the pass n.
 * Assuming these timestamps are monotonic across cpus, we know that if
 * a buffer still has events with timestamps below t, they will be all
 * available and then read in the pass n + 1.
 * Hence when we start to read the pass n + 2, we can safely flush every
 * events with timestamps below t.
 *
 *    ============ PASS n =================
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          1          |         2
 *          2          |         3
 *          -          |         4  <--- max recorded
 *
 *    ============ PASS n + 1 ==============
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          3          |         5
 *          4          |         6
 *          5          |         7 <---- max recorded
 *
 *      Flush every events below timestamp 4
 *
 *    ============ PASS n + 2 ==============
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          6          |         8
 *          7          |         9
 *          -          |         10
 *
 *      Flush every events below timestamp 7
 *      etc...
 */
static int process_finished_round(event_t *event __used,
				  struct perf_session *session,
				  struct perf_event_ops *ops)
{
	flush_sample_queue(session, ops);
	session->ordered_samples.next_flush = session->ordered_samples.max_timestamp;

	return 0;
}

460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527
static void __queue_sample_end(struct sample_queue *new, struct list_head *head)
{
	struct sample_queue *iter;

	list_for_each_entry_reverse(iter, head, list) {
		if (iter->timestamp < new->timestamp) {
			list_add(&new->list, &iter->list);
			return;
		}
	}

	list_add(&new->list, head);
}

static void __queue_sample_before(struct sample_queue *new,
				  struct sample_queue *iter,
				  struct list_head *head)
{
	list_for_each_entry_continue_reverse(iter, head, list) {
		if (iter->timestamp < new->timestamp) {
			list_add(&new->list, &iter->list);
			return;
		}
	}

	list_add(&new->list, head);
}

static void __queue_sample_after(struct sample_queue *new,
				 struct sample_queue *iter,
				 struct list_head *head)
{
	list_for_each_entry_continue(iter, head, list) {
		if (iter->timestamp > new->timestamp) {
			list_add_tail(&new->list, &iter->list);
			return;
		}
	}
	list_add_tail(&new->list, head);
}

/* The queue is ordered by time */
static void __queue_sample_event(struct sample_queue *new,
				 struct perf_session *s)
{
	struct sample_queue *last_inserted = s->ordered_samples.last_inserted;
	struct list_head *head = &s->ordered_samples.samples_head;


	if (!last_inserted) {
		__queue_sample_end(new, head);
		return;
	}

	/*
	 * Most of the time the current event has a timestamp
	 * very close to the last event inserted, unless we just switched
	 * to another event buffer. Having a sorting based on a list and
	 * on the last inserted event that is close to the current one is
	 * probably more efficient than an rbtree based sorting.
	 */
	if (last_inserted->timestamp >= new->timestamp)
		__queue_sample_before(new, last_inserted, head);
	else
		__queue_sample_after(new, last_inserted, head);
}

static int queue_sample_event(event_t *event, struct sample_data *data,
528
			      struct perf_session *s)
529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555
{
	u64 timestamp = data->time;
	struct sample_queue *new;


	if (timestamp < s->ordered_samples.last_flush) {
		printf("Warning: Timestamp below last timeslice flush\n");
		return -EINVAL;
	}

	new = malloc(sizeof(*new));
	if (!new)
		return -ENOMEM;

	new->timestamp = timestamp;

	new->event = malloc(event->header.size);
	if (!new->event) {
		free(new);
		return -ENOMEM;
	}

	memcpy(new->event, event, event->header.size);

	__queue_sample_event(new, s);
	s->ordered_samples.last_inserted = new;

556 557
	if (new->timestamp > s->ordered_samples.max_timestamp)
		s->ordered_samples.max_timestamp = new->timestamp;
558 559 560 561 562 563 564 565 566 567 568 569 570 571 572

	return 0;
}

static int perf_session__process_sample(event_t *event, struct perf_session *s,
					struct perf_event_ops *ops)
{
	struct sample_data data;

	if (!ops->ordered_samples)
		return ops->sample(event, s);

	bzero(&data, sizeof(struct sample_data));
	event__parse_sample(event, s->sample_type, &data);

573
	queue_sample_event(event, &data, s);
574 575 576 577

	return 0;
}

578 579 580
static int perf_session__process_event(struct perf_session *self,
				       event_t *event,
				       struct perf_event_ops *ops,
581
				       u64 offset, u64 head)
582 583 584
{
	trace_event(event);

585
	if (event->header.type < PERF_RECORD_HEADER_MAX) {
586
		dump_printf("%#Lx [%#x]: PERF_RECORD_%s",
587
			    offset + head, event->header.size,
588
			    event__name[event->header.type]);
589
		hists__inc_nr_events(&self->hists, event->header.type);
590 591
	}

592 593 594
	if (self->header.needs_swap && event__swap_ops[event->header.type])
		event__swap_ops[event->header.type](event);

595 596
	switch (event->header.type) {
	case PERF_RECORD_SAMPLE:
597
		return perf_session__process_sample(event, self, ops);
598
	case PERF_RECORD_MMAP:
599
		return ops->mmap(event, self);
600
	case PERF_RECORD_COMM:
601
		return ops->comm(event, self);
602
	case PERF_RECORD_FORK:
603
		return ops->fork(event, self);
604
	case PERF_RECORD_EXIT:
605
		return ops->exit(event, self);
606
	case PERF_RECORD_LOST:
607
		return ops->lost(event, self);
608
	case PERF_RECORD_READ:
609
		return ops->read(event, self);
610
	case PERF_RECORD_THROTTLE:
611
		return ops->throttle(event, self);
612
	case PERF_RECORD_UNTHROTTLE:
613
		return ops->unthrottle(event, self);
614 615
	case PERF_RECORD_HEADER_ATTR:
		return ops->attr(event, self);
616 617
	case PERF_RECORD_HEADER_EVENT_TYPE:
		return ops->event_type(event, self);
618 619 620 621
	case PERF_RECORD_HEADER_TRACING_DATA:
		/* setup for reading amidst mmap */
		lseek(self->fd, offset + head, SEEK_SET);
		return ops->tracing_data(event, self);
622 623
	case PERF_RECORD_HEADER_BUILD_ID:
		return ops->build_id(event, self);
624 625
	case PERF_RECORD_FINISHED_ROUND:
		return ops->finished_round(event, self, ops);
626
	default:
627
		++self->hists.stats.nr_unknown_events;
628 629 630 631
		return -1;
	}
}

632 633 634 635 636 637 638
void perf_event_header__bswap(struct perf_event_header *self)
{
	self->type = bswap_32(self->type);
	self->misc = bswap_16(self->misc);
	self->size = bswap_16(self->size);
}

639 640 641 642 643 644 645 646 647 648 649 650
static struct thread *perf_session__register_idle_thread(struct perf_session *self)
{
	struct thread *thread = perf_session__findnew(self, 0);

	if (thread == NULL || thread__set_comm(thread, "swapper")) {
		pr_err("problem inserting idle task.\n");
		thread = NULL;
	}

	return thread;
}

651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703
int do_read(int fd, void *buf, size_t size)
{
	void *buf_start = buf;

	while (size) {
		int ret = read(fd, buf, size);

		if (ret <= 0)
			return ret;

		size -= ret;
		buf += ret;
	}

	return buf - buf_start;
}

#define session_done()	(*(volatile int *)(&session_done))
volatile int session_done;

static int __perf_session__process_pipe_events(struct perf_session *self,
					       struct perf_event_ops *ops)
{
	event_t event;
	uint32_t size;
	int skip = 0;
	u64 head;
	int err;
	void *p;

	perf_event_ops__fill_defaults(ops);

	head = 0;
more:
	err = do_read(self->fd, &event, sizeof(struct perf_event_header));
	if (err <= 0) {
		if (err == 0)
			goto done;

		pr_err("failed to read event header\n");
		goto out_err;
	}

	if (self->header.needs_swap)
		perf_event_header__bswap(&event.header);

	size = event.header.size;
	if (size == 0)
		size = 8;

	p = &event;
	p += sizeof(struct perf_event_header);

704 705 706 707 708 709 710 711
	if (size - sizeof(struct perf_event_header)) {
		err = do_read(self->fd, p,
			      size - sizeof(struct perf_event_header));
		if (err <= 0) {
			if (err == 0) {
				pr_err("unexpected end of event stream\n");
				goto done;
			}
712

713 714 715
			pr_err("failed to read event data\n");
			goto out_err;
		}
716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748
	}

	if (size == 0 ||
	    (skip = perf_session__process_event(self, &event, ops,
						0, head)) < 0) {
		dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
			    head, event.header.size, event.header.type);
		/*
		 * assume we lost track of the stream, check alignment, and
		 * increment a single u64 in the hope to catch on again 'soon'.
		 */
		if (unlikely(head & 7))
			head &= ~7ULL;

		size = 8;
	}

	head += size;

	dump_printf("\n%#Lx [%#x]: event: %d\n",
		    head, event.header.size, event.header.type);

	if (skip > 0)
		head += skip;

	if (!session_done())
		goto more;
done:
	err = 0;
out_err:
	return err;
}

749 750 751
int __perf_session__process_events(struct perf_session *self,
				   u64 data_offset, u64 data_size,
				   u64 file_size, struct perf_event_ops *ops)
752
{
753 754 755
	int err, mmap_prot, mmap_flags;
	u64 head, shift;
	u64 offset = 0;
756 757 758 759
	size_t	page_size;
	event_t *event;
	uint32_t size;
	char *buf;
760 761 762 763
	struct ui_progress *progress = ui_progress__new("Processing events...",
							self->size);
	if (progress == NULL)
		return -1;
764 765 766

	perf_event_ops__fill_defaults(ops);

767
	page_size = sysconf(_SC_PAGESIZE);
768

769
	head = data_offset;
770 771 772 773
	shift = page_size * (head / page_size);
	offset += shift;
	head -= shift;

774 775 776 777 778 779 780
	mmap_prot  = PROT_READ;
	mmap_flags = MAP_SHARED;

	if (self->header.needs_swap) {
		mmap_prot  |= PROT_WRITE;
		mmap_flags = MAP_PRIVATE;
	}
781
remap:
782 783
	buf = mmap(NULL, page_size * self->mmap_window, mmap_prot,
		   mmap_flags, self->fd, offset);
784 785 786 787 788 789 790 791
	if (buf == MAP_FAILED) {
		pr_err("failed to mmap file\n");
		err = -errno;
		goto out_err;
	}

more:
	event = (event_t *)(buf + head);
792
	ui_progress__update(progress, offset);
793

794 795
	if (self->header.needs_swap)
		perf_event_header__bswap(&event->header);
796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814
	size = event->header.size;
	if (size == 0)
		size = 8;

	if (head + event->header.size >= page_size * self->mmap_window) {
		int munmap_ret;

		shift = page_size * (head / page_size);

		munmap_ret = munmap(buf, page_size * self->mmap_window);
		assert(munmap_ret == 0);

		offset += shift;
		head -= shift;
		goto remap;
	}

	size = event->header.size;

815
	dump_printf("\n%#Lx [%#x]: event: %d\n",
816
		    offset + head, event->header.size, event->header.type);
817 818 819

	if (size == 0 ||
	    perf_session__process_event(self, event, ops, offset, head) < 0) {
820
		dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
821
			    offset + head, event->header.size,
822 823 824 825 826 827 828 829 830 831 832 833 834
			    event->header.type);
		/*
		 * assume we lost track of the stream, check alignment, and
		 * increment a single u64 in the hope to catch on again 'soon'.
		 */
		if (unlikely(head & 7))
			head &= ~7ULL;

		size = 8;
	}

	head += size;

835
	if (offset + head >= data_offset + data_size)
836 837
		goto done;

838
	if (offset + head < file_size)
839 840 841
		goto more;
done:
	err = 0;
842
	/* do the final flush for ordered samples */
843
	self->ordered_samples.next_flush = ULLONG_MAX;
844
	flush_sample_queue(self, ops);
845
out_err:
846
	ui_progress__delete(progress);
847 848
	return err;
}
849

850 851 852 853 854 855 856 857
int perf_session__process_events(struct perf_session *self,
				 struct perf_event_ops *ops)
{
	int err;

	if (perf_session__register_idle_thread(self) == NULL)
		return -ENOMEM;

858 859 860 861 862 863 864
	if (!self->fd_pipe)
		err = __perf_session__process_events(self,
						     self->header.data_offset,
						     self->header.data_size,
						     self->size, ops);
	else
		err = __perf_session__process_pipe_events(self, ops);
865

866 867 868
	return err;
}

869
bool perf_session__has_traces(struct perf_session *self, const char *msg)
870 871
{
	if (!(self->sample_type & PERF_SAMPLE_RAW)) {
872 873
		pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
		return false;
874 875
	}

876
	return true;
877
}
878

879
int perf_session__set_kallsyms_ref_reloc_sym(struct map **maps,
880 881 882 883
					     const char *symbol_name,
					     u64 addr)
{
	char *bracket;
884
	enum map_type i;
885 886 887 888 889
	struct ref_reloc_sym *ref;

	ref = zalloc(sizeof(struct ref_reloc_sym));
	if (ref == NULL)
		return -ENOMEM;
890

891 892 893
	ref->name = strdup(symbol_name);
	if (ref->name == NULL) {
		free(ref);
894
		return -ENOMEM;
895
	}
896

897
	bracket = strchr(ref->name, ']');
898 899 900
	if (bracket)
		*bracket = '\0';

901
	ref->addr = addr;
902 903

	for (i = 0; i < MAP__NR_TYPES; ++i) {
904 905
		struct kmap *kmap = map__kmap(maps[i]);
		kmap->ref_reloc_sym = ref;
906 907
	}

908 909
	return 0;
}
910 911 912 913 914 915 916

size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp)
{
	return __dsos__fprintf(&self->host_machine.kernel_dsos, fp) +
	       __dsos__fprintf(&self->host_machine.user_dsos, fp) +
	       machines__fprintf_dsos(&self->machines, fp);
}
917 918 919 920 921 922 923

size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp,
					  bool with_hits)
{
	size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits);
	return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits);
}