session.c 22.3 KB
Newer Older
1 2
#define _FILE_OFFSET_BITS 64

3 4
#include <linux/kernel.h>

5
#include <byteswap.h>
6 7
#include <unistd.h>
#include <sys/types.h>
8
#include <sys/mman.h>
9 10

#include "session.h"
11
#include "sort.h"
12 13 14 15 16 17
#include "util.h"

static int perf_session__open(struct perf_session *self, bool force)
{
	struct stat input_stat;

18 19 20 21 22 23 24 25 26 27
	if (!strcmp(self->filename, "-")) {
		self->fd_pipe = true;
		self->fd = STDIN_FILENO;

		if (perf_header__read(self, self->fd) < 0)
			pr_err("incompatible file format");

		return 0;
	}

28
	self->fd = open(self->filename, O_RDONLY);
29
	if (self->fd < 0) {
30 31 32 33
		int err = errno;

		pr_err("failed to open %s: %s", self->filename, strerror(err));
		if (err == ENOENT && !strcmp(self->filename, "perf.data"))
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53
			pr_err("  (try 'perf record' first)");
		pr_err("\n");
		return -errno;
	}

	if (fstat(self->fd, &input_stat) < 0)
		goto out_close;

	if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
		pr_err("file %s not owned by current user or root\n",
		       self->filename);
		goto out_close;
	}

	if (!input_stat.st_size) {
		pr_info("zero-sized file (%s), nothing to do!\n",
			self->filename);
		goto out_close;
	}

54
	if (perf_header__read(self, self->fd) < 0) {
55 56 57 58 59 60 61 62 63 64 65 66 67
		pr_err("incompatible file format");
		goto out_close;
	}

	self->size = input_stat.st_size;
	return 0;

out_close:
	close(self->fd);
	self->fd = -1;
	return -1;
}

68 69 70 71 72
void perf_session__update_sample_type(struct perf_session *self)
{
	self->sample_type = perf_header__sample_type(&self->header);
}

73 74
int perf_session__create_kernel_maps(struct perf_session *self)
{
75
	int ret = machine__create_kernel_maps(&self->host_machine);
76 77

	if (ret >= 0)
78
		ret = machines__create_guest_kernel_maps(&self->machines);
79 80 81
	return ret;
}

T
Tom Zanussi 已提交
82
struct perf_session *perf_session__new(const char *filename, int mode, bool force, bool repipe)
83
{
84
	size_t len = filename ? strlen(filename) + 1 : 0;
85 86 87 88 89 90
	struct perf_session *self = zalloc(sizeof(*self) + len);

	if (self == NULL)
		goto out;

	if (perf_header__init(&self->header) < 0)
91
		goto out_free;
92 93

	memcpy(self->filename, filename, len);
94
	self->threads = RB_ROOT;
95
	INIT_LIST_HEAD(&self->dead_threads);
96
	self->hists_tree = RB_ROOT;
97
	self->last_match = NULL;
98 99 100
	self->mmap_window = 32;
	self->cwd = NULL;
	self->cwdlen = 0;
101
	self->machines = RB_ROOT;
T
Tom Zanussi 已提交
102
	self->repipe = repipe;
103
	INIT_LIST_HEAD(&self->ordered_samples.samples_head);
104
	machine__init(&self->host_machine, "", HOST_KERNEL_ID);
105

106 107 108 109 110 111 112 113 114 115 116
	if (mode == O_RDONLY) {
		if (perf_session__open(self, force) < 0)
			goto out_delete;
	} else if (mode == O_WRONLY) {
		/*
		 * In O_RDONLY mode this will be performed when reading the
		 * kernel MMAP event, in event__process_mmap().
		 */
		if (perf_session__create_kernel_maps(self) < 0)
			goto out_delete;
	}
117

118
	perf_session__update_sample_type(self);
119 120
out:
	return self;
121
out_free:
122 123
	free(self);
	return NULL;
124 125 126
out_delete:
	perf_session__delete(self);
	return NULL;
127 128 129 130 131 132
}

void perf_session__delete(struct perf_session *self)
{
	perf_header__exit(&self->header);
	close(self->fd);
133
	free(self->cwd);
134 135
	free(self);
}
136

137 138 139 140 141 142 143 144 145 146
void perf_session__remove_thread(struct perf_session *self, struct thread *th)
{
	rb_erase(&th->rb_node, &self->threads);
	/*
	 * We may have references to this thread, for instance in some hist_entry
	 * instances, so just move them to a separate list.
	 */
	list_add_tail(&th->node, &self->dead_threads);
}

147 148 149 150 151 152 153 154
static bool symbol__match_parent_regex(struct symbol *sym)
{
	if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
		return 1;

	return 0;
}

155 156 157 158
struct map_symbol *perf_session__resolve_callchain(struct perf_session *self,
						   struct thread *thread,
						   struct ip_callchain *chain,
						   struct symbol **parent)
159 160 161
{
	u8 cpumode = PERF_RECORD_MISC_USER;
	unsigned int i;
162
	struct map_symbol *syms = calloc(chain->nr, sizeof(*syms));
163

164 165
	if (!syms)
		return NULL;
166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184

	for (i = 0; i < chain->nr; i++) {
		u64 ip = chain->ips[i];
		struct addr_location al;

		if (ip >= PERF_CONTEXT_MAX) {
			switch (ip) {
			case PERF_CONTEXT_HV:
				cpumode = PERF_RECORD_MISC_HYPERVISOR;	break;
			case PERF_CONTEXT_KERNEL:
				cpumode = PERF_RECORD_MISC_KERNEL;	break;
			case PERF_CONTEXT_USER:
				cpumode = PERF_RECORD_MISC_USER;	break;
			default:
				break;
			}
			continue;
		}

185
		al.filtered = false;
186
		thread__find_addr_location(thread, self, cpumode,
187
				MAP__FUNCTION, thread->pid, ip, &al, NULL);
188 189 190 191
		if (al.sym != NULL) {
			if (sort__has_parent && !*parent &&
			    symbol__match_parent_regex(al.sym))
				*parent = al.sym;
192
			if (!symbol_conf.use_callchain)
193
				break;
194 195
			syms[i].map = al.map;
			syms[i].sym = al.sym;
196 197 198 199 200
		}
	}

	return syms;
}
201 202 203 204 205 206 207 208

static int process_event_stub(event_t *event __used,
			      struct perf_session *session __used)
{
	dump_printf(": unhandled!\n");
	return 0;
}

209 210 211 212 213 214 215 216 217 218 219 220
static int process_finished_round_stub(event_t *event __used,
				       struct perf_session *session __used,
				       struct perf_event_ops *ops __used)
{
	dump_printf(": unhandled!\n");
	return 0;
}

static int process_finished_round(event_t *event,
				  struct perf_session *session,
				  struct perf_event_ops *ops);

221 222
static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
{
223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240
	if (handler->sample == NULL)
		handler->sample = process_event_stub;
	if (handler->mmap == NULL)
		handler->mmap = process_event_stub;
	if (handler->comm == NULL)
		handler->comm = process_event_stub;
	if (handler->fork == NULL)
		handler->fork = process_event_stub;
	if (handler->exit == NULL)
		handler->exit = process_event_stub;
	if (handler->lost == NULL)
		handler->lost = process_event_stub;
	if (handler->read == NULL)
		handler->read = process_event_stub;
	if (handler->throttle == NULL)
		handler->throttle = process_event_stub;
	if (handler->unthrottle == NULL)
		handler->unthrottle = process_event_stub;
241 242
	if (handler->attr == NULL)
		handler->attr = process_event_stub;
243 244
	if (handler->event_type == NULL)
		handler->event_type = process_event_stub;
245 246
	if (handler->tracing_data == NULL)
		handler->tracing_data = process_event_stub;
247 248
	if (handler->build_id == NULL)
		handler->build_id = process_event_stub;
249 250 251 252 253 254
	if (handler->finished_round == NULL) {
		if (handler->ordered_samples)
			handler->finished_round = process_finished_round;
		else
			handler->finished_round = process_finished_round_stub;
	}
255 256
}

257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307
void mem_bswap_64(void *src, int byte_size)
{
	u64 *m = src;

	while (byte_size > 0) {
		*m = bswap_64(*m);
		byte_size -= sizeof(u64);
		++m;
	}
}

static void event__all64_swap(event_t *self)
{
	struct perf_event_header *hdr = &self->header;
	mem_bswap_64(hdr + 1, self->header.size - sizeof(*hdr));
}

static void event__comm_swap(event_t *self)
{
	self->comm.pid = bswap_32(self->comm.pid);
	self->comm.tid = bswap_32(self->comm.tid);
}

static void event__mmap_swap(event_t *self)
{
	self->mmap.pid	 = bswap_32(self->mmap.pid);
	self->mmap.tid	 = bswap_32(self->mmap.tid);
	self->mmap.start = bswap_64(self->mmap.start);
	self->mmap.len	 = bswap_64(self->mmap.len);
	self->mmap.pgoff = bswap_64(self->mmap.pgoff);
}

static void event__task_swap(event_t *self)
{
	self->fork.pid	= bswap_32(self->fork.pid);
	self->fork.tid	= bswap_32(self->fork.tid);
	self->fork.ppid	= bswap_32(self->fork.ppid);
	self->fork.ptid	= bswap_32(self->fork.ptid);
	self->fork.time	= bswap_64(self->fork.time);
}

static void event__read_swap(event_t *self)
{
	self->read.pid		= bswap_32(self->read.pid);
	self->read.tid		= bswap_32(self->read.tid);
	self->read.value	= bswap_64(self->read.value);
	self->read.time_enabled	= bswap_64(self->read.time_enabled);
	self->read.time_running	= bswap_64(self->read.time_running);
	self->read.id		= bswap_64(self->read.id);
}

308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327
static void event__attr_swap(event_t *self)
{
	size_t size;

	self->attr.attr.type		= bswap_32(self->attr.attr.type);
	self->attr.attr.size		= bswap_32(self->attr.attr.size);
	self->attr.attr.config		= bswap_64(self->attr.attr.config);
	self->attr.attr.sample_period	= bswap_64(self->attr.attr.sample_period);
	self->attr.attr.sample_type	= bswap_64(self->attr.attr.sample_type);
	self->attr.attr.read_format	= bswap_64(self->attr.attr.read_format);
	self->attr.attr.wakeup_events	= bswap_32(self->attr.attr.wakeup_events);
	self->attr.attr.bp_type		= bswap_32(self->attr.attr.bp_type);
	self->attr.attr.bp_addr		= bswap_64(self->attr.attr.bp_addr);
	self->attr.attr.bp_len		= bswap_64(self->attr.attr.bp_len);

	size = self->header.size;
	size -= (void *)&self->attr.id - (void *)self;
	mem_bswap_64(self->attr.id, size);
}

328 329 330 331 332 333
static void event__event_type_swap(event_t *self)
{
	self->event_type.event_type.event_id =
		bswap_64(self->event_type.event_type.event_id);
}

334 335 336 337 338
static void event__tracing_data_swap(event_t *self)
{
	self->tracing_data.size = bswap_32(self->tracing_data.size);
}

339 340 341 342 343 344 345 346 347 348
typedef void (*event__swap_op)(event_t *self);

static event__swap_op event__swap_ops[] = {
	[PERF_RECORD_MMAP]   = event__mmap_swap,
	[PERF_RECORD_COMM]   = event__comm_swap,
	[PERF_RECORD_FORK]   = event__task_swap,
	[PERF_RECORD_EXIT]   = event__task_swap,
	[PERF_RECORD_LOST]   = event__all64_swap,
	[PERF_RECORD_READ]   = event__read_swap,
	[PERF_RECORD_SAMPLE] = event__all64_swap,
349
	[PERF_RECORD_HEADER_ATTR]   = event__attr_swap,
350
	[PERF_RECORD_HEADER_EVENT_TYPE]   = event__event_type_swap,
351
	[PERF_RECORD_HEADER_TRACING_DATA]   = event__tracing_data_swap,
352
	[PERF_RECORD_HEADER_BUILD_ID]   = NULL,
353
	[PERF_RECORD_HEADER_MAX]    = NULL,
354 355
};

356 357 358 359 360 361 362 363 364 365
struct sample_queue {
	u64			timestamp;
	struct sample_event	*event;
	struct list_head	list;
};

static void flush_sample_queue(struct perf_session *s,
			       struct perf_event_ops *ops)
{
	struct list_head *head = &s->ordered_samples.samples_head;
366
	u64 limit = s->ordered_samples.next_flush;
367 368
	struct sample_queue *tmp, *iter;

369
	if (!ops->ordered_samples || !limit)
370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387
		return;

	list_for_each_entry_safe(iter, tmp, head, list) {
		if (iter->timestamp > limit)
			return;

		if (iter == s->ordered_samples.last_inserted)
			s->ordered_samples.last_inserted = NULL;

		ops->sample((event_t *)iter->event, s);

		s->ordered_samples.last_flush = iter->timestamp;
		list_del(&iter->list);
		free(iter->event);
		free(iter);
	}
}

388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436
/*
 * When perf record finishes a pass on every buffers, it records this pseudo
 * event.
 * We record the max timestamp t found in the pass n.
 * Assuming these timestamps are monotonic across cpus, we know that if
 * a buffer still has events with timestamps below t, they will be all
 * available and then read in the pass n + 1.
 * Hence when we start to read the pass n + 2, we can safely flush every
 * events with timestamps below t.
 *
 *    ============ PASS n =================
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          1          |         2
 *          2          |         3
 *          -          |         4  <--- max recorded
 *
 *    ============ PASS n + 1 ==============
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          3          |         5
 *          4          |         6
 *          5          |         7 <---- max recorded
 *
 *      Flush every events below timestamp 4
 *
 *    ============ PASS n + 2 ==============
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          6          |         8
 *          7          |         9
 *          -          |         10
 *
 *      Flush every events below timestamp 7
 *      etc...
 */
static int process_finished_round(event_t *event __used,
				  struct perf_session *session,
				  struct perf_event_ops *ops)
{
	flush_sample_queue(session, ops);
	session->ordered_samples.next_flush = session->ordered_samples.max_timestamp;

	return 0;
}

437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504
static void __queue_sample_end(struct sample_queue *new, struct list_head *head)
{
	struct sample_queue *iter;

	list_for_each_entry_reverse(iter, head, list) {
		if (iter->timestamp < new->timestamp) {
			list_add(&new->list, &iter->list);
			return;
		}
	}

	list_add(&new->list, head);
}

static void __queue_sample_before(struct sample_queue *new,
				  struct sample_queue *iter,
				  struct list_head *head)
{
	list_for_each_entry_continue_reverse(iter, head, list) {
		if (iter->timestamp < new->timestamp) {
			list_add(&new->list, &iter->list);
			return;
		}
	}

	list_add(&new->list, head);
}

static void __queue_sample_after(struct sample_queue *new,
				 struct sample_queue *iter,
				 struct list_head *head)
{
	list_for_each_entry_continue(iter, head, list) {
		if (iter->timestamp > new->timestamp) {
			list_add_tail(&new->list, &iter->list);
			return;
		}
	}
	list_add_tail(&new->list, head);
}

/* The queue is ordered by time */
static void __queue_sample_event(struct sample_queue *new,
				 struct perf_session *s)
{
	struct sample_queue *last_inserted = s->ordered_samples.last_inserted;
	struct list_head *head = &s->ordered_samples.samples_head;


	if (!last_inserted) {
		__queue_sample_end(new, head);
		return;
	}

	/*
	 * Most of the time the current event has a timestamp
	 * very close to the last event inserted, unless we just switched
	 * to another event buffer. Having a sorting based on a list and
	 * on the last inserted event that is close to the current one is
	 * probably more efficient than an rbtree based sorting.
	 */
	if (last_inserted->timestamp >= new->timestamp)
		__queue_sample_before(new, last_inserted, head);
	else
		__queue_sample_after(new, last_inserted, head);
}

static int queue_sample_event(event_t *event, struct sample_data *data,
505
			      struct perf_session *s)
506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532
{
	u64 timestamp = data->time;
	struct sample_queue *new;


	if (timestamp < s->ordered_samples.last_flush) {
		printf("Warning: Timestamp below last timeslice flush\n");
		return -EINVAL;
	}

	new = malloc(sizeof(*new));
	if (!new)
		return -ENOMEM;

	new->timestamp = timestamp;

	new->event = malloc(event->header.size);
	if (!new->event) {
		free(new);
		return -ENOMEM;
	}

	memcpy(new->event, event, event->header.size);

	__queue_sample_event(new, s);
	s->ordered_samples.last_inserted = new;

533 534
	if (new->timestamp > s->ordered_samples.max_timestamp)
		s->ordered_samples.max_timestamp = new->timestamp;
535 536 537 538 539 540 541 542 543 544 545 546 547 548 549

	return 0;
}

static int perf_session__process_sample(event_t *event, struct perf_session *s,
					struct perf_event_ops *ops)
{
	struct sample_data data;

	if (!ops->ordered_samples)
		return ops->sample(event, s);

	bzero(&data, sizeof(struct sample_data));
	event__parse_sample(event, s->sample_type, &data);

550
	queue_sample_event(event, &data, s);
551 552 553 554

	return 0;
}

555 556 557
static int perf_session__process_event(struct perf_session *self,
				       event_t *event,
				       struct perf_event_ops *ops,
558
				       u64 offset, u64 head)
559 560 561
{
	trace_event(event);

562
	if (event->header.type < PERF_RECORD_HEADER_MAX) {
563
		dump_printf("%#Lx [%#x]: PERF_RECORD_%s",
564
			    offset + head, event->header.size,
565
			    event__name[event->header.type]);
566
		hists__inc_nr_events(&self->hists, event->header.type);
567 568
	}

569 570 571
	if (self->header.needs_swap && event__swap_ops[event->header.type])
		event__swap_ops[event->header.type](event);

572 573
	switch (event->header.type) {
	case PERF_RECORD_SAMPLE:
574
		return perf_session__process_sample(event, self, ops);
575
	case PERF_RECORD_MMAP:
576
		return ops->mmap(event, self);
577
	case PERF_RECORD_COMM:
578
		return ops->comm(event, self);
579
	case PERF_RECORD_FORK:
580
		return ops->fork(event, self);
581
	case PERF_RECORD_EXIT:
582
		return ops->exit(event, self);
583
	case PERF_RECORD_LOST:
584
		return ops->lost(event, self);
585
	case PERF_RECORD_READ:
586
		return ops->read(event, self);
587
	case PERF_RECORD_THROTTLE:
588
		return ops->throttle(event, self);
589
	case PERF_RECORD_UNTHROTTLE:
590
		return ops->unthrottle(event, self);
591 592
	case PERF_RECORD_HEADER_ATTR:
		return ops->attr(event, self);
593 594
	case PERF_RECORD_HEADER_EVENT_TYPE:
		return ops->event_type(event, self);
595 596 597 598
	case PERF_RECORD_HEADER_TRACING_DATA:
		/* setup for reading amidst mmap */
		lseek(self->fd, offset + head, SEEK_SET);
		return ops->tracing_data(event, self);
599 600
	case PERF_RECORD_HEADER_BUILD_ID:
		return ops->build_id(event, self);
601 602
	case PERF_RECORD_FINISHED_ROUND:
		return ops->finished_round(event, self, ops);
603
	default:
604
		++self->hists.stats.nr_unknown_events;
605 606 607 608
		return -1;
	}
}

609 610 611 612 613 614 615
void perf_event_header__bswap(struct perf_event_header *self)
{
	self->type = bswap_32(self->type);
	self->misc = bswap_16(self->misc);
	self->size = bswap_16(self->size);
}

616 617 618 619 620 621 622 623 624 625 626 627
static struct thread *perf_session__register_idle_thread(struct perf_session *self)
{
	struct thread *thread = perf_session__findnew(self, 0);

	if (thread == NULL || thread__set_comm(thread, "swapper")) {
		pr_err("problem inserting idle task.\n");
		thread = NULL;
	}

	return thread;
}

628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680
int do_read(int fd, void *buf, size_t size)
{
	void *buf_start = buf;

	while (size) {
		int ret = read(fd, buf, size);

		if (ret <= 0)
			return ret;

		size -= ret;
		buf += ret;
	}

	return buf - buf_start;
}

#define session_done()	(*(volatile int *)(&session_done))
volatile int session_done;

static int __perf_session__process_pipe_events(struct perf_session *self,
					       struct perf_event_ops *ops)
{
	event_t event;
	uint32_t size;
	int skip = 0;
	u64 head;
	int err;
	void *p;

	perf_event_ops__fill_defaults(ops);

	head = 0;
more:
	err = do_read(self->fd, &event, sizeof(struct perf_event_header));
	if (err <= 0) {
		if (err == 0)
			goto done;

		pr_err("failed to read event header\n");
		goto out_err;
	}

	if (self->header.needs_swap)
		perf_event_header__bswap(&event.header);

	size = event.header.size;
	if (size == 0)
		size = 8;

	p = &event;
	p += sizeof(struct perf_event_header);

681 682 683 684 685 686 687 688
	if (size - sizeof(struct perf_event_header)) {
		err = do_read(self->fd, p,
			      size - sizeof(struct perf_event_header));
		if (err <= 0) {
			if (err == 0) {
				pr_err("unexpected end of event stream\n");
				goto done;
			}
689

690 691 692
			pr_err("failed to read event data\n");
			goto out_err;
		}
693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725
	}

	if (size == 0 ||
	    (skip = perf_session__process_event(self, &event, ops,
						0, head)) < 0) {
		dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
			    head, event.header.size, event.header.type);
		/*
		 * assume we lost track of the stream, check alignment, and
		 * increment a single u64 in the hope to catch on again 'soon'.
		 */
		if (unlikely(head & 7))
			head &= ~7ULL;

		size = 8;
	}

	head += size;

	dump_printf("\n%#Lx [%#x]: event: %d\n",
		    head, event.header.size, event.header.type);

	if (skip > 0)
		head += skip;

	if (!session_done())
		goto more;
done:
	err = 0;
out_err:
	return err;
}

726 727 728
int __perf_session__process_events(struct perf_session *self,
				   u64 data_offset, u64 data_size,
				   u64 file_size, struct perf_event_ops *ops)
729
{
730 731 732
	int err, mmap_prot, mmap_flags;
	u64 head, shift;
	u64 offset = 0;
733 734 735 736
	size_t	page_size;
	event_t *event;
	uint32_t size;
	char *buf;
737 738 739 740
	struct ui_progress *progress = ui_progress__new("Processing events...",
							self->size);
	if (progress == NULL)
		return -1;
741 742 743

	perf_event_ops__fill_defaults(ops);

744
	page_size = sysconf(_SC_PAGESIZE);
745

746
	head = data_offset;
747 748 749 750
	shift = page_size * (head / page_size);
	offset += shift;
	head -= shift;

751 752 753 754 755 756 757
	mmap_prot  = PROT_READ;
	mmap_flags = MAP_SHARED;

	if (self->header.needs_swap) {
		mmap_prot  |= PROT_WRITE;
		mmap_flags = MAP_PRIVATE;
	}
758
remap:
759 760
	buf = mmap(NULL, page_size * self->mmap_window, mmap_prot,
		   mmap_flags, self->fd, offset);
761 762 763 764 765 766 767 768
	if (buf == MAP_FAILED) {
		pr_err("failed to mmap file\n");
		err = -errno;
		goto out_err;
	}

more:
	event = (event_t *)(buf + head);
769
	ui_progress__update(progress, offset);
770

771 772
	if (self->header.needs_swap)
		perf_event_header__bswap(&event->header);
773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791
	size = event->header.size;
	if (size == 0)
		size = 8;

	if (head + event->header.size >= page_size * self->mmap_window) {
		int munmap_ret;

		shift = page_size * (head / page_size);

		munmap_ret = munmap(buf, page_size * self->mmap_window);
		assert(munmap_ret == 0);

		offset += shift;
		head -= shift;
		goto remap;
	}

	size = event->header.size;

792
	dump_printf("\n%#Lx [%#x]: event: %d\n",
793
		    offset + head, event->header.size, event->header.type);
794 795 796

	if (size == 0 ||
	    perf_session__process_event(self, event, ops, offset, head) < 0) {
797
		dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
798
			    offset + head, event->header.size,
799 800 801 802 803 804 805 806 807 808 809 810 811
			    event->header.type);
		/*
		 * assume we lost track of the stream, check alignment, and
		 * increment a single u64 in the hope to catch on again 'soon'.
		 */
		if (unlikely(head & 7))
			head &= ~7ULL;

		size = 8;
	}

	head += size;

812
	if (offset + head >= data_offset + data_size)
813 814
		goto done;

815
	if (offset + head < file_size)
816 817 818
		goto more;
done:
	err = 0;
819
	/* do the final flush for ordered samples */
820
	self->ordered_samples.next_flush = ULLONG_MAX;
821
	flush_sample_queue(self, ops);
822
out_err:
823
	ui_progress__delete(progress);
824 825
	return err;
}
826

827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851
int perf_session__process_events(struct perf_session *self,
				 struct perf_event_ops *ops)
{
	int err;

	if (perf_session__register_idle_thread(self) == NULL)
		return -ENOMEM;

	if (!symbol_conf.full_paths) {
		char bf[PATH_MAX];

		if (getcwd(bf, sizeof(bf)) == NULL) {
			err = -errno;
out_getcwd_err:
			pr_err("failed to get the current directory\n");
			goto out_err;
		}
		self->cwd = strdup(bf);
		if (self->cwd == NULL) {
			err = -ENOMEM;
			goto out_getcwd_err;
		}
		self->cwdlen = strlen(self->cwd);
	}

852 853 854 855 856 857 858
	if (!self->fd_pipe)
		err = __perf_session__process_events(self,
						     self->header.data_offset,
						     self->header.data_size,
						     self->size, ops);
	else
		err = __perf_session__process_pipe_events(self, ops);
859 860 861 862
out_err:
	return err;
}

863
bool perf_session__has_traces(struct perf_session *self, const char *msg)
864 865
{
	if (!(self->sample_type & PERF_SAMPLE_RAW)) {
866 867
		pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
		return false;
868 869
	}

870
	return true;
871
}
872

873
int perf_session__set_kallsyms_ref_reloc_sym(struct map **maps,
874 875 876 877
					     const char *symbol_name,
					     u64 addr)
{
	char *bracket;
878
	enum map_type i;
879 880 881 882 883
	struct ref_reloc_sym *ref;

	ref = zalloc(sizeof(struct ref_reloc_sym));
	if (ref == NULL)
		return -ENOMEM;
884

885 886 887
	ref->name = strdup(symbol_name);
	if (ref->name == NULL) {
		free(ref);
888
		return -ENOMEM;
889
	}
890

891
	bracket = strchr(ref->name, ']');
892 893 894
	if (bracket)
		*bracket = '\0';

895
	ref->addr = addr;
896 897

	for (i = 0; i < MAP__NR_TYPES; ++i) {
898 899
		struct kmap *kmap = map__kmap(maps[i]);
		kmap->ref_reloc_sym = ref;
900 901
	}

902 903
	return 0;
}
904 905 906 907 908 909 910

size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp)
{
	return __dsos__fprintf(&self->host_machine.kernel_dsos, fp) +
	       __dsos__fprintf(&self->host_machine.user_dsos, fp) +
	       machines__fprintf_dsos(&self->machines, fp);
}
911 912 913 914 915 916 917

size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp,
					  bool with_hits)
{
	size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits);
	return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits);
}