session.c 43.0 KB
Newer Older
1 2
#define _FILE_OFFSET_BITS 64

3 4
#include <linux/kernel.h>

5
#include <byteswap.h>
6 7
#include <unistd.h>
#include <sys/types.h>
8
#include <sys/mman.h>
9

10 11
#include "evlist.h"
#include "evsel.h"
12
#include "session.h"
13
#include "tool.h"
14
#include "sort.h"
15
#include "util.h"
16
#include "cpumap.h"
17
#include "event-parse.h"
18

19 20 21 22 23 24 25
int perf_session__parse_sample(struct perf_session *session,
			       const union perf_event *event,
			       struct perf_sample *sample)
{
	struct perf_evsel *first;
	first = list_entry(session->evlist->entries.next, struct perf_evsel, node);

26
	return perf_event__parse_sample(event, first->attr.sample_type,
27
					first->sample_size,
28
					first->attr.sample_id_all, sample,
29 30 31
					session->header.needs_swap);
}

32 33 34 35 36 37 38 39
int perf_session__synthesize_sample(struct perf_session *session,
				    union perf_event *event,
				    const struct perf_sample *sample)
{
	return perf_event__synthesize_sample(event, perf_evlist__sample_type(session->evlist),
					     sample, session->header.needs_swap);
}

40 41 42 43
static int perf_session__open(struct perf_session *self, bool force)
{
	struct stat input_stat;

44 45 46 47
	if (!strcmp(self->filename, "-")) {
		self->fd_pipe = true;
		self->fd = STDIN_FILENO;

48
		if (perf_session__read_header(self, self->fd) < 0)
49
			pr_err("incompatible file format (rerun with -v to learn more)");
50 51 52 53

		return 0;
	}

54
	self->fd = open(self->filename, O_RDONLY);
55
	if (self->fd < 0) {
56 57 58 59
		int err = errno;

		pr_err("failed to open %s: %s", self->filename, strerror(err));
		if (err == ENOENT && !strcmp(self->filename, "perf.data"))
60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
			pr_err("  (try 'perf record' first)");
		pr_err("\n");
		return -errno;
	}

	if (fstat(self->fd, &input_stat) < 0)
		goto out_close;

	if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
		pr_err("file %s not owned by current user or root\n",
		       self->filename);
		goto out_close;
	}

	if (!input_stat.st_size) {
		pr_info("zero-sized file (%s), nothing to do!\n",
			self->filename);
		goto out_close;
	}

80
	if (perf_session__read_header(self, self->fd) < 0) {
81
		pr_err("incompatible file format (rerun with -v to learn more)");
82 83 84
		goto out_close;
	}

85 86 87 88 89 90 91 92 93 94
	if (!perf_evlist__valid_sample_type(self->evlist)) {
		pr_err("non matching sample_type");
		goto out_close;
	}

	if (!perf_evlist__valid_sample_id_all(self->evlist)) {
		pr_err("non matching sample_id_all");
		goto out_close;
	}

95 96 97 98 99 100 101 102 103
	self->size = input_stat.st_size;
	return 0;

out_close:
	close(self->fd);
	self->fd = -1;
	return -1;
}

104
void perf_session__set_id_hdr_size(struct perf_session *session)
105
{
106 107 108 109
	u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);

	session->host_machine.id_hdr_size = id_hdr_size;
	machines__set_id_hdr_size(&session->machines, id_hdr_size);
110 111
}

112 113
int perf_session__create_kernel_maps(struct perf_session *self)
{
114
	int ret = machine__create_kernel_maps(&self->host_machine);
115 116

	if (ret >= 0)
117
		ret = machines__create_guest_kernel_maps(&self->machines);
118 119 120
	return ret;
}

121 122 123 124 125 126
static void perf_session__destroy_kernel_maps(struct perf_session *self)
{
	machine__destroy_kernel_maps(&self->host_machine);
	machines__destroy_guest_kernel_maps(&self->machines);
}

127 128
struct perf_session *perf_session__new(const char *filename, int mode,
				       bool force, bool repipe,
129
				       struct perf_tool *tool)
130
{
131 132 133 134 135 136 137 138 139 140 141 142 143
	struct perf_session *self;
	struct stat st;
	size_t len;

	if (!filename || !strlen(filename)) {
		if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode))
			filename = "-";
		else
			filename = "perf.data";
	}

	len = strlen(filename);
	self = zalloc(sizeof(*self) + len);
144 145 146 147 148

	if (self == NULL)
		goto out;

	memcpy(self->filename, filename, len);
149 150 151 152 153 154 155 156 157
	/*
	 * On 64bit we can mmap the data file in one go. No need for tiny mmap
	 * slices. On 32bit we use 32MB.
	 */
#if BITS_PER_LONG == 64
	self->mmap_window = ULLONG_MAX;
#else
	self->mmap_window = 32 * 1024 * 1024ULL;
#endif
158
	self->machines = RB_ROOT;
T
Tom Zanussi 已提交
159
	self->repipe = repipe;
160
	INIT_LIST_HEAD(&self->ordered_samples.samples);
161
	INIT_LIST_HEAD(&self->ordered_samples.sample_cache);
162
	INIT_LIST_HEAD(&self->ordered_samples.to_free);
163
	machine__init(&self->host_machine, "", HOST_KERNEL_ID);
164
	hists__init(&self->hists);
165

166 167 168
	if (mode == O_RDONLY) {
		if (perf_session__open(self, force) < 0)
			goto out_delete;
169
		perf_session__set_id_hdr_size(self);
170 171 172
	} else if (mode == O_WRONLY) {
		/*
		 * In O_RDONLY mode this will be performed when reading the
173
		 * kernel MMAP event, in perf_event__process_mmap().
174 175 176 177
		 */
		if (perf_session__create_kernel_maps(self) < 0)
			goto out_delete;
	}
178

179
	if (tool && tool->ordering_requires_timestamps &&
180
	    tool->ordered_samples && !perf_evlist__sample_id_all(self->evlist)) {
181
		dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
182
		tool->ordered_samples = false;
183 184
	}

185 186
out:
	return self;
187 188 189
out_delete:
	perf_session__delete(self);
	return NULL;
190 191
}

192
static void machine__delete_dead_threads(struct machine *machine)
193 194 195
{
	struct thread *n, *t;

196
	list_for_each_entry_safe(t, n, &machine->dead_threads, node) {
197 198 199 200 201
		list_del(&t->node);
		thread__delete(t);
	}
}

202 203 204 205 206 207
static void perf_session__delete_dead_threads(struct perf_session *session)
{
	machine__delete_dead_threads(&session->host_machine);
}

static void machine__delete_threads(struct machine *self)
208 209 210 211 212 213 214 215 216 217 218 219
{
	struct rb_node *nd = rb_first(&self->threads);

	while (nd) {
		struct thread *t = rb_entry(nd, struct thread, rb_node);

		rb_erase(&t->rb_node, &self->threads);
		nd = rb_next(nd);
		thread__delete(t);
	}
}

220 221 222 223 224
static void perf_session__delete_threads(struct perf_session *session)
{
	machine__delete_threads(&session->host_machine);
}

225 226
void perf_session__delete(struct perf_session *self)
{
227
	perf_session__destroy_kernel_maps(self);
228 229 230
	perf_session__delete_dead_threads(self);
	perf_session__delete_threads(self);
	machine__exit(&self->host_machine);
231 232 233
	close(self->fd);
	free(self);
}
234

235
void machine__remove_thread(struct machine *self, struct thread *th)
236
{
237
	self->last_match = NULL;
238 239 240 241 242 243 244 245
	rb_erase(&th->rb_node, &self->threads);
	/*
	 * We may have references to this thread, for instance in some hist_entry
	 * instances, so just move them to a separate list.
	 */
	list_add_tail(&th->node, &self->dead_threads);
}

246 247 248 249 250 251 252 253
static bool symbol__match_parent_regex(struct symbol *sym)
{
	if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
		return 1;

	return 0;
}

254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287
static const u8 cpumodes[] = {
	PERF_RECORD_MISC_USER,
	PERF_RECORD_MISC_KERNEL,
	PERF_RECORD_MISC_GUEST_USER,
	PERF_RECORD_MISC_GUEST_KERNEL
};
#define NCPUMODES (sizeof(cpumodes)/sizeof(u8))

static void ip__resolve_ams(struct machine *self, struct thread *thread,
			    struct addr_map_symbol *ams,
			    u64 ip)
{
	struct addr_location al;
	size_t i;
	u8 m;

	memset(&al, 0, sizeof(al));

	for (i = 0; i < NCPUMODES; i++) {
		m = cpumodes[i];
		/*
		 * We cannot use the header.misc hint to determine whether a
		 * branch stack address is user, kernel, guest, hypervisor.
		 * Branches may straddle the kernel/user/hypervisor boundaries.
		 * Thus, we have to try consecutively until we find a match
		 * or else, the symbol is unknown
		 */
		thread__find_addr_location(thread, self, m, MAP__FUNCTION,
				ip, &al, NULL);
		if (al.sym)
			goto found;
	}
found:
	ams->addr = ip;
288
	ams->al_addr = al.addr;
289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311
	ams->sym = al.sym;
	ams->map = al.map;
}

struct branch_info *machine__resolve_bstack(struct machine *self,
					    struct thread *thr,
					    struct branch_stack *bs)
{
	struct branch_info *bi;
	unsigned int i;

	bi = calloc(bs->nr, sizeof(struct branch_info));
	if (!bi)
		return NULL;

	for (i = 0; i < bs->nr; i++) {
		ip__resolve_ams(self, thr, &bi[i].to, bs->entries[i].to);
		ip__resolve_ams(self, thr, &bi[i].from, bs->entries[i].from);
		bi[i].flags = bs->entries[i].flags;
	}
	return bi;
}

312
int machine__resolve_callchain(struct machine *self,
313 314 315
			       struct thread *thread,
			       struct ip_callchain *chain,
			       struct symbol **parent)
316 317 318
{
	u8 cpumode = PERF_RECORD_MISC_USER;
	unsigned int i;
319
	int err;
320

321
	callchain_cursor_reset(&callchain_cursor);
322

323 324 325 326 327
	if (chain->nr > PERF_MAX_STACK_DEPTH) {
		pr_warning("corrupted callchain. skipping...\n");
		return 0;
	}

328
	for (i = 0; i < chain->nr; i++) {
329
		u64 ip;
330 331
		struct addr_location al;

332 333 334 335 336
		if (callchain_param.order == ORDER_CALLEE)
			ip = chain->ips[i];
		else
			ip = chain->ips[chain->nr - i - 1];

337 338 339 340 341 342 343 344 345
		if (ip >= PERF_CONTEXT_MAX) {
			switch (ip) {
			case PERF_CONTEXT_HV:
				cpumode = PERF_RECORD_MISC_HYPERVISOR;	break;
			case PERF_CONTEXT_KERNEL:
				cpumode = PERF_RECORD_MISC_KERNEL;	break;
			case PERF_CONTEXT_USER:
				cpumode = PERF_RECORD_MISC_USER;	break;
			default:
346 347 348 349 350 351 352 353
				pr_debug("invalid callchain context: "
					 "%"PRId64"\n", (s64) ip);
				/*
				 * It seems the callchain is corrupted.
				 * Discard all.
				 */
				callchain_cursor_reset(&callchain_cursor);
				return 0;
354 355 356 357
			}
			continue;
		}

358
		al.filtered = false;
359
		thread__find_addr_location(thread, self, cpumode,
360
					   MAP__FUNCTION, ip, &al, NULL);
361 362 363 364
		if (al.sym != NULL) {
			if (sort__has_parent && !*parent &&
			    symbol__match_parent_regex(al.sym))
				*parent = al.sym;
365
			if (!symbol_conf.use_callchain)
366 367
				break;
		}
368

369
		err = callchain_cursor_append(&callchain_cursor,
370 371 372
					      ip, al.map, al.sym);
		if (err)
			return err;
373 374
	}

375
	return 0;
376
}
377

378 379 380 381 382 383 384
static int process_event_synth_tracing_data_stub(union perf_event *event __used,
						 struct perf_session *session __used)
{
	dump_printf(": unhandled!\n");
	return 0;
}

385 386 387 388 389 390 391
static int process_event_synth_attr_stub(union perf_event *event __used,
					 struct perf_evlist **pevlist __used)
{
	dump_printf(": unhandled!\n");
	return 0;
}

392
static int process_event_sample_stub(struct perf_tool *tool __used,
393
				     union perf_event *event __used,
394 395
				     struct perf_sample *sample __used,
				     struct perf_evsel *evsel __used,
396
				     struct machine *machine __used)
397 398 399 400 401
{
	dump_printf(": unhandled!\n");
	return 0;
}

402
static int process_event_stub(struct perf_tool *tool __used,
403
			      union perf_event *event __used,
404
			      struct perf_sample *sample __used,
405
			      struct machine *machine __used)
406 407 408 409 410
{
	dump_printf(": unhandled!\n");
	return 0;
}

411
static int process_finished_round_stub(struct perf_tool *tool __used,
412
				       union perf_event *event __used,
413 414 415 416 417 418
				       struct perf_session *perf_session __used)
{
	dump_printf(": unhandled!\n");
	return 0;
}

419
static int process_event_type_stub(struct perf_tool *tool __used,
420
				   union perf_event *event __used)
421 422 423 424 425
{
	dump_printf(": unhandled!\n");
	return 0;
}

426
static int process_finished_round(struct perf_tool *tool,
427 428
				  union perf_event *event,
				  struct perf_session *session);
429

430
static void perf_tool__fill_defaults(struct perf_tool *tool)
431
{
432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460
	if (tool->sample == NULL)
		tool->sample = process_event_sample_stub;
	if (tool->mmap == NULL)
		tool->mmap = process_event_stub;
	if (tool->comm == NULL)
		tool->comm = process_event_stub;
	if (tool->fork == NULL)
		tool->fork = process_event_stub;
	if (tool->exit == NULL)
		tool->exit = process_event_stub;
	if (tool->lost == NULL)
		tool->lost = perf_event__process_lost;
	if (tool->read == NULL)
		tool->read = process_event_sample_stub;
	if (tool->throttle == NULL)
		tool->throttle = process_event_stub;
	if (tool->unthrottle == NULL)
		tool->unthrottle = process_event_stub;
	if (tool->attr == NULL)
		tool->attr = process_event_synth_attr_stub;
	if (tool->event_type == NULL)
		tool->event_type = process_event_type_stub;
	if (tool->tracing_data == NULL)
		tool->tracing_data = process_event_synth_tracing_data_stub;
	if (tool->build_id == NULL)
		tool->build_id = process_finished_round_stub;
	if (tool->finished_round == NULL) {
		if (tool->ordered_samples)
			tool->finished_round = process_finished_round;
461
		else
462
			tool->finished_round = process_finished_round_stub;
463
	}
464
}
465 466 467 468 469 470 471 472 473 474
 
void mem_bswap_32(void *src, int byte_size)
{
	u32 *m = src;
	while (byte_size > 0) {
		*m = bswap_32(*m);
		byte_size -= sizeof(u32);
		++m;
	}
}
475

476 477 478 479 480 481 482 483 484 485 486
void mem_bswap_64(void *src, int byte_size)
{
	u64 *m = src;

	while (byte_size > 0) {
		*m = bswap_64(*m);
		byte_size -= sizeof(u64);
		++m;
	}
}

487 488 489 490 491 492 493 494 495 496 497
static void swap_sample_id_all(union perf_event *event, void *data)
{
	void *end = (void *) event + event->header.size;
	int size = end - data;

	BUG_ON(size % sizeof(u64));
	mem_bswap_64(data, size);
}

static void perf_event__all64_swap(union perf_event *event,
				   bool sample_id_all __used)
498
{
499 500
	struct perf_event_header *hdr = &event->header;
	mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
501 502
}

503
static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
504
{
505 506
	event->comm.pid = bswap_32(event->comm.pid);
	event->comm.tid = bswap_32(event->comm.tid);
507 508 509 510 511 512 513

	if (sample_id_all) {
		void *data = &event->comm.comm;

		data += ALIGN(strlen(data) + 1, sizeof(u64));
		swap_sample_id_all(event, data);
	}
514 515
}

516 517
static void perf_event__mmap_swap(union perf_event *event,
				  bool sample_id_all)
518
{
519 520 521 522 523
	event->mmap.pid	  = bswap_32(event->mmap.pid);
	event->mmap.tid	  = bswap_32(event->mmap.tid);
	event->mmap.start = bswap_64(event->mmap.start);
	event->mmap.len	  = bswap_64(event->mmap.len);
	event->mmap.pgoff = bswap_64(event->mmap.pgoff);
524 525 526 527 528 529 530

	if (sample_id_all) {
		void *data = &event->mmap.filename;

		data += ALIGN(strlen(data) + 1, sizeof(u64));
		swap_sample_id_all(event, data);
	}
531 532
}

533
static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
534
{
535 536 537 538 539
	event->fork.pid	 = bswap_32(event->fork.pid);
	event->fork.tid	 = bswap_32(event->fork.tid);
	event->fork.ppid = bswap_32(event->fork.ppid);
	event->fork.ptid = bswap_32(event->fork.ptid);
	event->fork.time = bswap_64(event->fork.time);
540 541 542

	if (sample_id_all)
		swap_sample_id_all(event, &event->fork + 1);
543 544
}

545
static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
546
{
547 548 549 550 551 552
	event->read.pid		 = bswap_32(event->read.pid);
	event->read.tid		 = bswap_32(event->read.tid);
	event->read.value	 = bswap_64(event->read.value);
	event->read.time_enabled = bswap_64(event->read.time_enabled);
	event->read.time_running = bswap_64(event->read.time_running);
	event->read.id		 = bswap_64(event->read.id);
553 554 555

	if (sample_id_all)
		swap_sample_id_all(event, &event->read + 1);
556 557
}

558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589
static u8 revbyte(u8 b)
{
	int rev = (b >> 4) | ((b & 0xf) << 4);
	rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
	rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
	return (u8) rev;
}

/*
 * XXX this is hack in attempt to carry flags bitfield
 * throught endian village. ABI says:
 *
 * Bit-fields are allocated from right to left (least to most significant)
 * on little-endian implementations and from left to right (most to least
 * significant) on big-endian implementations.
 *
 * The above seems to be byte specific, so we need to reverse each
 * byte of the bitfield. 'Internet' also says this might be implementation
 * specific and we probably need proper fix and carry perf_event_attr
 * bitfield flags in separate data file FEAT_ section. Thought this seems
 * to work for now.
 */
static void swap_bitfield(u8 *p, unsigned len)
{
	unsigned i;

	for (i = 0; i < len; i++) {
		*p = revbyte(*p);
		p++;
	}
}

590 591 592 593 594 595 596 597 598 599 600 601 602
/* exported for swapping attributes in file header */
void perf_event__attr_swap(struct perf_event_attr *attr)
{
	attr->type		= bswap_32(attr->type);
	attr->size		= bswap_32(attr->size);
	attr->config		= bswap_64(attr->config);
	attr->sample_period	= bswap_64(attr->sample_period);
	attr->sample_type	= bswap_64(attr->sample_type);
	attr->read_format	= bswap_64(attr->read_format);
	attr->wakeup_events	= bswap_32(attr->wakeup_events);
	attr->bp_type		= bswap_32(attr->bp_type);
	attr->bp_addr		= bswap_64(attr->bp_addr);
	attr->bp_len		= bswap_64(attr->bp_len);
603 604

	swap_bitfield((u8 *) (&attr->read_format + 1), sizeof(u64));
605 606
}

607 608
static void perf_event__hdr_attr_swap(union perf_event *event,
				      bool sample_id_all __used)
609 610 611
{
	size_t size;

612
	perf_event__attr_swap(&event->attr.attr);
613

614 615 616
	size = event->header.size;
	size -= (void *)&event->attr.id - (void *)event;
	mem_bswap_64(event->attr.id, size);
617 618
}

619 620
static void perf_event__event_type_swap(union perf_event *event,
					bool sample_id_all __used)
621
{
622 623
	event->event_type.event_type.event_id =
		bswap_64(event->event_type.event_type.event_id);
624 625
}

626 627
static void perf_event__tracing_data_swap(union perf_event *event,
					  bool sample_id_all __used)
628
{
629
	event->tracing_data.size = bswap_32(event->tracing_data.size);
630 631
}

632 633
typedef void (*perf_event__swap_op)(union perf_event *event,
				    bool sample_id_all);
634

635 636 637 638 639 640 641 642
static perf_event__swap_op perf_event__swap_ops[] = {
	[PERF_RECORD_MMAP]		  = perf_event__mmap_swap,
	[PERF_RECORD_COMM]		  = perf_event__comm_swap,
	[PERF_RECORD_FORK]		  = perf_event__task_swap,
	[PERF_RECORD_EXIT]		  = perf_event__task_swap,
	[PERF_RECORD_LOST]		  = perf_event__all64_swap,
	[PERF_RECORD_READ]		  = perf_event__read_swap,
	[PERF_RECORD_SAMPLE]		  = perf_event__all64_swap,
643
	[PERF_RECORD_HEADER_ATTR]	  = perf_event__hdr_attr_swap,
644 645 646 647
	[PERF_RECORD_HEADER_EVENT_TYPE]	  = perf_event__event_type_swap,
	[PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
	[PERF_RECORD_HEADER_BUILD_ID]	  = NULL,
	[PERF_RECORD_HEADER_MAX]	  = NULL,
648 649
};

650 651
struct sample_queue {
	u64			timestamp;
652
	u64			file_offset;
653
	union perf_event	*event;
654 655 656
	struct list_head	list;
};

657 658 659 660
static void perf_session_free_sample_buffers(struct perf_session *session)
{
	struct ordered_samples *os = &session->ordered_samples;

661
	while (!list_empty(&os->to_free)) {
662 663
		struct sample_queue *sq;

664
		sq = list_entry(os->to_free.next, struct sample_queue, list);
665 666 667 668 669
		list_del(&sq->list);
		free(sq);
	}
}

670
static int perf_session_deliver_event(struct perf_session *session,
671
				      union perf_event *event,
672
				      struct perf_sample *sample,
673
				      struct perf_tool *tool,
674
				      u64 file_offset);
675

676
static void flush_sample_queue(struct perf_session *s,
677
			       struct perf_tool *tool)
678
{
679 680
	struct ordered_samples *os = &s->ordered_samples;
	struct list_head *head = &os->samples;
681
	struct sample_queue *tmp, *iter;
682
	struct perf_sample sample;
683 684
	u64 limit = os->next_flush;
	u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL;
685
	unsigned idx = 0, progress_next = os->nr_samples / 16;
686
	int ret;
687

688
	if (!tool->ordered_samples || !limit)
689 690 691 692
		return;

	list_for_each_entry_safe(iter, tmp, head, list) {
		if (iter->timestamp > limit)
693
			break;
694

695 696 697 698
		ret = perf_session__parse_sample(s, iter->event, &sample);
		if (ret)
			pr_err("Can't parse sample, err = %d\n", ret);
		else
699
			perf_session_deliver_event(s, iter->event, &sample, tool,
700
						   iter->file_offset);
701

702
		os->last_flush = iter->timestamp;
703
		list_del(&iter->list);
704
		list_add(&iter->list, &os->sample_cache);
705 706 707 708 709
		if (++idx >= progress_next) {
			progress_next += os->nr_samples / 16;
			ui_progress__update(idx, os->nr_samples,
					    "Processing time ordered events...");
		}
710
	}
711 712 713 714 715 716 717

	if (list_empty(head)) {
		os->last_sample = NULL;
	} else if (last_ts <= limit) {
		os->last_sample =
			list_entry(head->prev, struct sample_queue, list);
	}
718 719

	os->nr_samples = 0;
720 721
}

722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760
/*
 * When perf record finishes a pass on every buffers, it records this pseudo
 * event.
 * We record the max timestamp t found in the pass n.
 * Assuming these timestamps are monotonic across cpus, we know that if
 * a buffer still has events with timestamps below t, they will be all
 * available and then read in the pass n + 1.
 * Hence when we start to read the pass n + 2, we can safely flush every
 * events with timestamps below t.
 *
 *    ============ PASS n =================
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          1          |         2
 *          2          |         3
 *          -          |         4  <--- max recorded
 *
 *    ============ PASS n + 1 ==============
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          3          |         5
 *          4          |         6
 *          5          |         7 <---- max recorded
 *
 *      Flush every events below timestamp 4
 *
 *    ============ PASS n + 2 ==============
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          6          |         8
 *          7          |         9
 *          -          |         10
 *
 *      Flush every events below timestamp 7
 *      etc...
 */
761
static int process_finished_round(struct perf_tool *tool,
762 763
				  union perf_event *event __used,
				  struct perf_session *session)
764
{
765
	flush_sample_queue(session, tool);
766 767 768 769 770
	session->ordered_samples.next_flush = session->ordered_samples.max_timestamp;

	return 0;
}

771
/* The queue is ordered by time */
772
static void __queue_event(struct sample_queue *new, struct perf_session *s)
773
{
774 775 776 777
	struct ordered_samples *os = &s->ordered_samples;
	struct sample_queue *sample = os->last_sample;
	u64 timestamp = new->timestamp;
	struct list_head *p;
778

779
	++os->nr_samples;
780
	os->last_sample = new;
781

782 783 784
	if (!sample) {
		list_add(&new->list, &os->samples);
		os->max_timestamp = timestamp;
785 786 787 788
		return;
	}

	/*
789 790 791
	 * last_sample might point to some random place in the list as it's
	 * the last queued event. We expect that the new event is close to
	 * this.
792
	 */
793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814
	if (sample->timestamp <= timestamp) {
		while (sample->timestamp <= timestamp) {
			p = sample->list.next;
			if (p == &os->samples) {
				list_add_tail(&new->list, &os->samples);
				os->max_timestamp = timestamp;
				return;
			}
			sample = list_entry(p, struct sample_queue, list);
		}
		list_add_tail(&new->list, &sample->list);
	} else {
		while (sample->timestamp > timestamp) {
			p = sample->list.prev;
			if (p == &os->samples) {
				list_add(&new->list, &os->samples);
				return;
			}
			sample = list_entry(p, struct sample_queue, list);
		}
		list_add(&new->list, &sample->list);
	}
815 816
}

817 818
#define MAX_SAMPLE_BUFFER	(64 * 1024 / sizeof(struct sample_queue))

819
static int perf_session_queue_event(struct perf_session *s, union perf_event *event,
820
				    struct perf_sample *sample, u64 file_offset)
821
{
822 823
	struct ordered_samples *os = &s->ordered_samples;
	struct list_head *sc = &os->sample_cache;
824
	u64 timestamp = sample->time;
825 826
	struct sample_queue *new;

827
	if (!timestamp || timestamp == ~0ULL)
828 829
		return -ETIME;

830 831 832 833 834
	if (timestamp < s->ordered_samples.last_flush) {
		printf("Warning: Timestamp below last timeslice flush\n");
		return -EINVAL;
	}

835 836 837
	if (!list_empty(sc)) {
		new = list_entry(sc->next, struct sample_queue, list);
		list_del(&new->list);
838 839 840 841
	} else if (os->sample_buffer) {
		new = os->sample_buffer + os->sample_buffer_idx;
		if (++os->sample_buffer_idx == MAX_SAMPLE_BUFFER)
			os->sample_buffer = NULL;
842
	} else {
843 844
		os->sample_buffer = malloc(MAX_SAMPLE_BUFFER * sizeof(*new));
		if (!os->sample_buffer)
845
			return -ENOMEM;
846 847 848
		list_add(&os->sample_buffer->list, &os->to_free);
		os->sample_buffer_idx = 2;
		new = os->sample_buffer + 1;
849
	}
850 851

	new->timestamp = timestamp;
852
	new->file_offset = file_offset;
853
	new->event = event;
854

855
	__queue_event(new, s);
856 857 858

	return 0;
}
859

860
static void callchain__printf(struct perf_sample *sample)
861 862
{
	unsigned int i;
863

864
	printf("... chain: nr:%" PRIu64 "\n", sample->callchain->nr);
865 866

	for (i = 0; i < sample->callchain->nr; i++)
867 868
		printf("..... %2d: %016" PRIx64 "\n",
		       i, sample->callchain->ips[i]);
869 870
}

871 872 873 874 875 876 877 878 879 880 881 882
static void branch_stack__printf(struct perf_sample *sample)
{
	uint64_t i;

	printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr);

	for (i = 0; i < sample->branch_stack->nr; i++)
		printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 "\n",
			i, sample->branch_stack->entries[i].from,
			sample->branch_stack->entries[i].to);
}

883
static void perf_session__print_tstamp(struct perf_session *session,
884
				       union perf_event *event,
885
				       struct perf_sample *sample)
886
{
887 888
	u64 sample_type = perf_evlist__sample_type(session->evlist);

889
	if (event->header.type != PERF_RECORD_SAMPLE &&
890
	    !perf_evlist__sample_id_all(session->evlist)) {
891 892 893 894
		fputs("-1 -1 ", stdout);
		return;
	}

895
	if ((sample_type & PERF_SAMPLE_CPU))
896 897
		printf("%u ", sample->cpu);

898
	if (sample_type & PERF_SAMPLE_TIME)
899
		printf("%" PRIu64 " ", sample->time);
900 901
}

902
static void dump_event(struct perf_session *session, union perf_event *event,
903
		       u64 file_offset, struct perf_sample *sample)
904 905 906 907
{
	if (!dump_trace)
		return;

908 909
	printf("\n%#" PRIx64 " [%#x]: event: %d\n",
	       file_offset, event->header.size, event->header.type);
910 911 912 913 914 915

	trace_event(event);

	if (sample)
		perf_session__print_tstamp(session, event, sample);

916
	printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
917
	       event->header.size, perf_event__name(event->header.type));
918 919
}

920
static void dump_sample(struct perf_session *session, union perf_event *event,
921
			struct perf_sample *sample)
922
{
923 924
	u64 sample_type;

925 926 927
	if (!dump_trace)
		return;

928
	printf("(IP, %d): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
929
	       event->header.misc, sample->pid, sample->tid, sample->ip,
930
	       sample->period, sample->addr);
931

932 933 934
	sample_type = perf_evlist__sample_type(session->evlist);

	if (sample_type & PERF_SAMPLE_CALLCHAIN)
935
		callchain__printf(sample);
936

937
	if (sample_type & PERF_SAMPLE_BRANCH_STACK)
938
		branch_stack__printf(sample);
939 940
}

941 942 943 944 945 946
static struct machine *
	perf_session__find_machine_for_cpumode(struct perf_session *session,
					       union perf_event *event)
{
	const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;

947 948 949
	if (perf_guest &&
	    ((cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
	     (cpumode == PERF_RECORD_MISC_GUEST_USER))) {
950 951 952 953 954 955 956
		u32 pid;

		if (event->header.type == PERF_RECORD_MMAP)
			pid = event->mmap.pid;
		else
			pid = event->ip.pid;

957
		return perf_session__findnew_machine(session, pid);
958
	}
959 960 961 962

	return perf_session__find_host_machine(session);
}

963
static int perf_session_deliver_event(struct perf_session *session,
964
				      union perf_event *event,
965
				      struct perf_sample *sample,
966
				      struct perf_tool *tool,
967
				      u64 file_offset)
968
{
969
	struct perf_evsel *evsel;
970
	struct machine *machine;
971

972 973
	dump_event(session, event, file_offset, sample);

974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991
	evsel = perf_evlist__id2evsel(session->evlist, sample->id);
	if (evsel != NULL && event->header.type != PERF_RECORD_SAMPLE) {
		/*
		 * XXX We're leaving PERF_RECORD_SAMPLE unnacounted here
		 * because the tools right now may apply filters, discarding
		 * some of the samples. For consistency, in the future we
		 * should have something like nr_filtered_samples and remove
		 * the sample->period from total_sample_period, etc, KISS for
		 * now tho.
		 *
		 * Also testing against NULL allows us to handle files without
		 * attr.sample_id_all and/or without PERF_SAMPLE_ID. In the
		 * future probably it'll be a good idea to restrict event
		 * processing via perf_session to files with both set.
		 */
		hists__inc_nr_events(&evsel->hists, event->header.type);
	}

992 993
	machine = perf_session__find_machine_for_cpumode(session, event);

994 995
	switch (event->header.type) {
	case PERF_RECORD_SAMPLE:
996
		dump_sample(session, event, sample);
997 998
		if (evsel == NULL) {
			++session->hists.stats.nr_unknown_id;
999
			return 0;
1000
		}
1001 1002
		if (machine == NULL) {
			++session->hists.stats.nr_unprocessable_samples;
1003
			return 0;
1004
		}
1005
		return tool->sample(tool, event, sample, evsel, machine);
1006
	case PERF_RECORD_MMAP:
1007
		return tool->mmap(tool, event, sample, machine);
1008
	case PERF_RECORD_COMM:
1009
		return tool->comm(tool, event, sample, machine);
1010
	case PERF_RECORD_FORK:
1011
		return tool->fork(tool, event, sample, machine);
1012
	case PERF_RECORD_EXIT:
1013
		return tool->exit(tool, event, sample, machine);
1014
	case PERF_RECORD_LOST:
1015
		if (tool->lost == perf_event__process_lost)
1016
			session->hists.stats.total_lost += event->lost.lost;
1017
		return tool->lost(tool, event, sample, machine);
1018
	case PERF_RECORD_READ:
1019
		return tool->read(tool, event, sample, evsel, machine);
1020
	case PERF_RECORD_THROTTLE:
1021
		return tool->throttle(tool, event, sample, machine);
1022
	case PERF_RECORD_UNTHROTTLE:
1023
		return tool->unthrottle(tool, event, sample, machine);
1024 1025 1026 1027 1028 1029
	default:
		++session->hists.stats.nr_unknown_events;
		return -1;
	}
}

1030
static int perf_session__preprocess_sample(struct perf_session *session,
1031
					   union perf_event *event, struct perf_sample *sample)
1032 1033
{
	if (event->header.type != PERF_RECORD_SAMPLE ||
1034
	    !(perf_evlist__sample_type(session->evlist) & PERF_SAMPLE_CALLCHAIN))
1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045
		return 0;

	if (!ip_callchain__valid(sample->callchain, event)) {
		pr_debug("call-chain problem with event, skipping it.\n");
		++session->hists.stats.nr_invalid_chains;
		session->hists.stats.total_invalid_chains += sample->period;
		return -EINVAL;
	}
	return 0;
}

1046
static int perf_session__process_user_event(struct perf_session *session, union perf_event *event,
1047
					    struct perf_tool *tool, u64 file_offset)
1048
{
1049 1050
	int err;

1051
	dump_event(session, event, file_offset, NULL);
1052

1053
	/* These events are processed right away */
1054
	switch (event->header.type) {
1055
	case PERF_RECORD_HEADER_ATTR:
1056
		err = tool->attr(event, &session->evlist);
1057
		if (err == 0)
1058
			perf_session__set_id_hdr_size(session);
1059
		return err;
1060
	case PERF_RECORD_HEADER_EVENT_TYPE:
1061
		return tool->event_type(tool, event);
1062 1063
	case PERF_RECORD_HEADER_TRACING_DATA:
		/* setup for reading amidst mmap */
1064
		lseek(session->fd, file_offset, SEEK_SET);
1065
		return tool->tracing_data(event, session);
1066
	case PERF_RECORD_HEADER_BUILD_ID:
1067
		return tool->build_id(tool, event, session);
1068
	case PERF_RECORD_FINISHED_ROUND:
1069
		return tool->finished_round(tool, event, session);
1070
	default:
1071
		return -EINVAL;
1072
	}
1073 1074
}

1075 1076 1077 1078 1079 1080 1081 1082 1083
static void event_swap(union perf_event *event, bool sample_id_all)
{
	perf_event__swap_op swap;

	swap = perf_event__swap_ops[event->header.type];
	if (swap)
		swap(event, sample_id_all);
}

1084
static int perf_session__process_event(struct perf_session *session,
1085
				       union perf_event *event,
1086
				       struct perf_tool *tool,
1087 1088
				       u64 file_offset)
{
1089
	struct perf_sample sample;
1090 1091
	int ret;

1092
	if (session->header.needs_swap)
1093
		event_swap(event, perf_evlist__sample_id_all(session->evlist));
1094 1095 1096 1097 1098 1099 1100

	if (event->header.type >= PERF_RECORD_HEADER_MAX)
		return -EINVAL;

	hists__inc_nr_events(&session->hists, event->header.type);

	if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1101
		return perf_session__process_user_event(session, event, tool, file_offset);
1102

1103 1104 1105
	/*
	 * For all kernel events we get the sample data
	 */
1106 1107 1108
	ret = perf_session__parse_sample(session, event, &sample);
	if (ret)
		return ret;
1109 1110 1111 1112 1113

	/* Preprocess sample records - precheck callchains */
	if (perf_session__preprocess_sample(session, event, &sample))
		return 0;

1114
	if (tool->ordered_samples) {
1115 1116
		ret = perf_session_queue_event(session, event, &sample,
					       file_offset);
1117 1118 1119 1120
		if (ret != -ETIME)
			return ret;
	}

1121
	return perf_session_deliver_event(session, event, &sample, tool,
1122
					  file_offset);
1123 1124
}

1125 1126 1127 1128 1129 1130 1131
void perf_event_header__bswap(struct perf_event_header *self)
{
	self->type = bswap_32(self->type);
	self->misc = bswap_16(self->misc);
	self->size = bswap_16(self->size);
}

1132 1133 1134 1135 1136
struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
{
	return machine__findnew_thread(&session->host_machine, pid);
}

1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148
static struct thread *perf_session__register_idle_thread(struct perf_session *self)
{
	struct thread *thread = perf_session__findnew(self, 0);

	if (thread == NULL || thread__set_comm(thread, "swapper")) {
		pr_err("problem inserting idle task.\n");
		thread = NULL;
	}

	return thread;
}

1149
static void perf_session__warn_about_errors(const struct perf_session *session,
1150
					    const struct perf_tool *tool)
1151
{
1152
	if (tool->lost == perf_event__process_lost &&
1153 1154 1155 1156 1157
	    session->hists.stats.nr_events[PERF_RECORD_LOST] != 0) {
		ui__warning("Processed %d events and lost %d chunks!\n\n"
			    "Check IO/CPU overload!\n\n",
			    session->hists.stats.nr_events[0],
			    session->hists.stats.nr_events[PERF_RECORD_LOST]);
1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168
	}

	if (session->hists.stats.nr_unknown_events != 0) {
		ui__warning("Found %u unknown events!\n\n"
			    "Is this an older tool processing a perf.data "
			    "file generated by a more recent tool?\n\n"
			    "If that is not the case, consider "
			    "reporting to linux-kernel@vger.kernel.org.\n\n",
			    session->hists.stats.nr_unknown_events);
	}

1169 1170 1171 1172 1173
	if (session->hists.stats.nr_unknown_id != 0) {
		ui__warning("%u samples with id not present in the header\n",
			    session->hists.stats.nr_unknown_id);
	}

1174 1175 1176 1177 1178 1179 1180
 	if (session->hists.stats.nr_invalid_chains != 0) {
 		ui__warning("Found invalid callchains!\n\n"
 			    "%u out of %u events were discarded for this reason.\n\n"
 			    "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
 			    session->hists.stats.nr_invalid_chains,
 			    session->hists.stats.nr_events[PERF_RECORD_SAMPLE]);
 	}
1181 1182 1183 1184 1185 1186

	if (session->hists.stats.nr_unprocessable_samples != 0) {
		ui__warning("%u unprocessable samples recorded.\n"
			    "Do you have a KVM guest running and not using 'perf kvm'?\n",
			    session->hists.stats.nr_unprocessable_samples);
	}
1187 1188
}

1189 1190 1191 1192
#define session_done()	(*(volatile int *)(&session_done))
volatile int session_done;

static int __perf_session__process_pipe_events(struct perf_session *self,
1193
					       struct perf_tool *tool)
1194
{
1195 1196 1197
	union perf_event *event;
	uint32_t size, cur_size = 0;
	void *buf = NULL;
1198 1199 1200 1201 1202
	int skip = 0;
	u64 head;
	int err;
	void *p;

1203
	perf_tool__fill_defaults(tool);
1204 1205

	head = 0;
1206 1207 1208 1209 1210
	cur_size = sizeof(union perf_event);

	buf = malloc(cur_size);
	if (!buf)
		return -errno;
1211
more:
1212 1213
	event = buf;
	err = readn(self->fd, event, sizeof(struct perf_event_header));
1214 1215 1216 1217 1218 1219 1220 1221 1222
	if (err <= 0) {
		if (err == 0)
			goto done;

		pr_err("failed to read event header\n");
		goto out_err;
	}

	if (self->header.needs_swap)
1223
		perf_event_header__bswap(&event->header);
1224

1225
	size = event->header.size;
1226 1227 1228
	if (size == 0)
		size = 8;

1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239
	if (size > cur_size) {
		void *new = realloc(buf, size);
		if (!new) {
			pr_err("failed to allocate memory to read event\n");
			goto out_err;
		}
		buf = new;
		cur_size = size;
		event = buf;
	}
	p = event;
1240 1241
	p += sizeof(struct perf_event_header);

1242
	if (size - sizeof(struct perf_event_header)) {
1243
		err = readn(self->fd, p, size - sizeof(struct perf_event_header));
1244 1245 1246 1247 1248
		if (err <= 0) {
			if (err == 0) {
				pr_err("unexpected end of event stream\n");
				goto done;
			}
1249

1250 1251 1252
			pr_err("failed to read event data\n");
			goto out_err;
		}
1253 1254
	}

1255
	if ((skip = perf_session__process_event(self, event, tool, head)) < 0) {
1256
		pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1257
		       head, event->header.size, event->header.type);
1258 1259
		err = -EINVAL;
		goto out_err;
1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271
	}

	head += size;

	if (skip > 0)
		head += skip;

	if (!session_done())
		goto more;
done:
	err = 0;
out_err:
1272
	free(buf);
1273
	perf_session__warn_about_errors(self, tool);
1274
	perf_session_free_sample_buffers(self);
1275 1276 1277
	return err;
}

1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301
static union perf_event *
fetch_mmaped_event(struct perf_session *session,
		   u64 head, size_t mmap_size, char *buf)
{
	union perf_event *event;

	/*
	 * Ensure we have enough space remaining to read
	 * the size of the event in the headers.
	 */
	if (head + sizeof(event->header) > mmap_size)
		return NULL;

	event = (union perf_event *)(buf + head);

	if (session->header.needs_swap)
		perf_event_header__bswap(&event->header);

	if (head + event->header.size > mmap_size)
		return NULL;

	return event;
}

1302
int __perf_session__process_events(struct perf_session *session,
1303
				   u64 data_offset, u64 data_size,
1304
				   u64 file_size, struct perf_tool *tool)
1305
{
1306
	u64 head, page_offset, file_offset, file_pos, progress_next;
1307
	int err, mmap_prot, mmap_flags, map_idx = 0;
1308
	size_t	page_size, mmap_size;
1309
	char *buf, *mmaps[8];
1310
	union perf_event *event;
1311
	uint32_t size;
1312

1313
	perf_tool__fill_defaults(tool);
1314

1315
	page_size = sysconf(_SC_PAGESIZE);
1316

1317 1318 1319
	page_offset = page_size * (data_offset / page_size);
	file_offset = page_offset;
	head = data_offset - page_offset;
1320

1321 1322 1323
	if (data_offset + data_size < file_size)
		file_size = data_offset + data_size;

1324 1325 1326 1327 1328 1329
	progress_next = file_size / 16;

	mmap_size = session->mmap_window;
	if (mmap_size > file_size)
		mmap_size = file_size;

1330 1331
	memset(mmaps, 0, sizeof(mmaps));

1332 1333 1334
	mmap_prot  = PROT_READ;
	mmap_flags = MAP_SHARED;

1335
	if (session->header.needs_swap) {
1336 1337 1338
		mmap_prot  |= PROT_WRITE;
		mmap_flags = MAP_PRIVATE;
	}
1339
remap:
1340 1341
	buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, session->fd,
		   file_offset);
1342 1343 1344 1345 1346
	if (buf == MAP_FAILED) {
		pr_err("failed to mmap file\n");
		err = -errno;
		goto out_err;
	}
1347 1348
	mmaps[map_idx] = buf;
	map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
1349
	file_pos = file_offset + head;
1350 1351

more:
1352 1353
	event = fetch_mmaped_event(session, head, mmap_size, buf);
	if (!event) {
1354 1355 1356 1357
		if (mmaps[map_idx]) {
			munmap(mmaps[map_idx], mmap_size);
			mmaps[map_idx] = NULL;
		}
1358

1359 1360 1361
		page_offset = page_size * (head / page_size);
		file_offset += page_offset;
		head -= page_offset;
1362 1363 1364 1365 1366
		goto remap;
	}

	size = event->header.size;

1367
	if (size == 0 ||
1368
	    perf_session__process_event(session, event, tool, file_pos) < 0) {
1369 1370 1371 1372 1373
		pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
		       file_offset + head, event->header.size,
		       event->header.type);
		err = -EINVAL;
		goto out_err;
1374 1375 1376
	}

	head += size;
1377
	file_pos += size;
1378

1379 1380
	if (file_pos >= progress_next) {
		progress_next += file_size / 16;
1381 1382
		ui_progress__update(file_pos, file_size,
				    "Processing events...");
1383 1384
	}

1385
	if (file_pos < file_size)
1386
		goto more;
1387

1388
	err = 0;
1389
	/* do the final flush for ordered samples */
1390
	session->ordered_samples.next_flush = ULLONG_MAX;
1391
	flush_sample_queue(session, tool);
1392
out_err:
1393
	perf_session__warn_about_errors(session, tool);
1394
	perf_session_free_sample_buffers(session);
1395 1396
	return err;
}
1397

1398
int perf_session__process_events(struct perf_session *self,
1399
				 struct perf_tool *tool)
1400 1401 1402 1403 1404 1405
{
	int err;

	if (perf_session__register_idle_thread(self) == NULL)
		return -ENOMEM;

1406 1407 1408 1409
	if (!self->fd_pipe)
		err = __perf_session__process_events(self,
						     self->header.data_offset,
						     self->header.data_size,
1410
						     self->size, tool);
1411
	else
1412
		err = __perf_session__process_pipe_events(self, tool);
1413

1414 1415 1416
	return err;
}

1417
bool perf_session__has_traces(struct perf_session *session, const char *msg)
1418
{
1419
	if (!(perf_evlist__sample_type(session->evlist) & PERF_SAMPLE_RAW)) {
1420 1421
		pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
		return false;
1422 1423
	}

1424
	return true;
1425
}
1426

1427 1428
int maps__set_kallsyms_ref_reloc_sym(struct map **maps,
				     const char *symbol_name, u64 addr)
1429 1430
{
	char *bracket;
1431
	enum map_type i;
1432 1433 1434 1435 1436
	struct ref_reloc_sym *ref;

	ref = zalloc(sizeof(struct ref_reloc_sym));
	if (ref == NULL)
		return -ENOMEM;
1437

1438 1439 1440
	ref->name = strdup(symbol_name);
	if (ref->name == NULL) {
		free(ref);
1441
		return -ENOMEM;
1442
	}
1443

1444
	bracket = strchr(ref->name, ']');
1445 1446 1447
	if (bracket)
		*bracket = '\0';

1448
	ref->addr = addr;
1449 1450

	for (i = 0; i < MAP__NR_TYPES; ++i) {
1451 1452
		struct kmap *kmap = map__kmap(maps[i]);
		kmap->ref_reloc_sym = ref;
1453 1454
	}

1455 1456
	return 0;
}
1457 1458 1459 1460 1461 1462 1463

size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp)
{
	return __dsos__fprintf(&self->host_machine.kernel_dsos, fp) +
	       __dsos__fprintf(&self->host_machine.user_dsos, fp) +
	       machines__fprintf_dsos(&self->machines, fp);
}
1464 1465 1466 1467 1468 1469 1470

size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp,
					  bool with_hits)
{
	size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits);
	return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits);
}
1471 1472 1473 1474 1475 1476 1477 1478 1479

size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
{
	struct perf_evsel *pos;
	size_t ret = fprintf(fp, "Aggregated stats:\n");

	ret += hists__fprintf_nr_events(&session->hists, fp);

	list_for_each_entry(pos, &session->evlist->entries, node) {
1480
		ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
1481 1482 1483 1484 1485
		ret += hists__fprintf_nr_events(&pos->hists, fp);
	}

	return ret;
}
1486

1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507
size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
{
	/*
	 * FIXME: Here we have to actually print all the machines in this
	 * session, not just the host...
	 */
	return machine__fprintf(&session->host_machine, fp);
}

void perf_session__remove_thread(struct perf_session *session,
				 struct thread *th)
{
	/*
	 * FIXME: This one makes no sense, we need to remove the thread from
	 * the machine it belongs to, perf_session can have many machines, so
	 * doing it always on ->host_machine is wrong.  Fix when auditing all
	 * the 'perf kvm' code.
	 */
	machine__remove_thread(&session->host_machine, th);
}

1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519
struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
					      unsigned int type)
{
	struct perf_evsel *pos;

	list_for_each_entry(pos, &session->evlist->entries, node) {
		if (pos->attr.type == type)
			return pos;
	}
	return NULL;
}

1520
void perf_event__print_ip(union perf_event *event, struct perf_sample *sample,
1521 1522
			  struct machine *machine, int print_sym,
			  int print_dso, int print_symoffset)
1523 1524 1525 1526
{
	struct addr_location al;
	struct callchain_cursor_node *node;

1527
	if (perf_event__preprocess_sample(event, machine, &al, sample,
1528 1529 1530 1531 1532 1533 1534 1535
					  NULL) < 0) {
		error("problem processing %d event, skipping it.\n",
			event->header.type);
		return;
	}

	if (symbol_conf.use_callchain && sample->callchain) {

1536
		if (machine__resolve_callchain(machine, al.thread,
1537 1538 1539 1540 1541
						sample->callchain, NULL) != 0) {
			if (verbose)
				error("Failed to resolve callchain. Skipping\n");
			return;
		}
1542
		callchain_cursor_commit(&callchain_cursor);
1543 1544

		while (1) {
1545
			node = callchain_cursor_current(&callchain_cursor);
1546 1547 1548
			if (!node)
				break;

1549 1550
			printf("\t%16" PRIx64, node->ip);
			if (print_sym) {
1551 1552
				printf(" ");
				symbol__fprintf_symname(node->sym, stdout);
1553 1554
			}
			if (print_dso) {
1555
				printf(" (");
1556
				map__fprintf_dsoname(node->map, stdout);
1557
				printf(")");
1558 1559
			}
			printf("\n");
1560

1561
			callchain_cursor_advance(&callchain_cursor);
1562 1563 1564
		}

	} else {
1565
		printf("%16" PRIx64, sample->ip);
1566
		if (print_sym) {
1567
			printf(" ");
1568 1569 1570 1571 1572
			if (print_symoffset)
				symbol__fprintf_symname_offs(al.sym, &al,
							     stdout);
			else
				symbol__fprintf_symname(al.sym, stdout);
1573 1574 1575
		}

		if (print_dso) {
1576 1577 1578
			printf(" (");
			map__fprintf_dsoname(al.map, stdout);
			printf(")");
1579
		}
1580 1581
	}
}
1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603

int perf_session__cpu_bitmap(struct perf_session *session,
			     const char *cpu_list, unsigned long *cpu_bitmap)
{
	int i;
	struct cpu_map *map;

	for (i = 0; i < PERF_TYPE_MAX; ++i) {
		struct perf_evsel *evsel;

		evsel = perf_session__find_first_evtype(session, i);
		if (!evsel)
			continue;

		if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) {
			pr_err("File does not contain CPU events. "
			       "Remove -c option to proceed.\n");
			return -1;
		}
	}

	map = cpu_map__new(cpu_list);
1604 1605 1606 1607
	if (map == NULL) {
		pr_err("Invalid cpu_list\n");
		return -1;
	}
1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622

	for (i = 0; i < map->nr; i++) {
		int cpu = map->map[i];

		if (cpu >= MAX_NR_CPUS) {
			pr_err("Requested CPU %d too large. "
			       "Consider raising MAX_NR_CPUS\n", cpu);
			return -1;
		}

		set_bit(cpu, cpu_bitmap);
	}

	return 0;
}
1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641

void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
				bool full)
{
	struct stat st;
	int ret;

	if (session == NULL || fp == NULL)
		return;

	ret = fstat(session->fd, &st);
	if (ret == -1)
		return;

	fprintf(fp, "# ========\n");
	fprintf(fp, "# captured on: %s", ctime(&st.st_ctime));
	perf_header__fprintf_info(session, fp, full);
	fprintf(fp, "# ========\n#\n");
}
1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696


int __perf_session__set_tracepoints_handlers(struct perf_session *session,
					     const struct perf_evsel_str_handler *assocs,
					     size_t nr_assocs)
{
	struct perf_evlist *evlist = session->evlist;
	struct event_format *format;
	struct perf_evsel *evsel;
	char *tracepoint, *name;
	size_t i;
	int err;

	for (i = 0; i < nr_assocs; i++) {
		err = -ENOMEM;
		tracepoint = strdup(assocs[i].name);
		if (tracepoint == NULL)
			goto out;

		err = -ENOENT;
		name = strchr(tracepoint, ':');
		if (name == NULL)
			goto out_free;

		*name++ = '\0';
		format = pevent_find_event_by_name(session->pevent,
						   tracepoint, name);
		if (format == NULL) {
			/*
			 * Adding a handler for an event not in the session,
			 * just ignore it.
			 */
			goto next;
		}

		evsel = perf_evlist__find_tracepoint_by_id(evlist, format->id);
		if (evsel == NULL)
			goto next;

		err = -EEXIST;
		if (evsel->handler.func != NULL)
			goto out_free;
		evsel->handler.func = assocs[i].handler;
next:
		free(tracepoint);
	}

	err = 0;
out:
	return err;

out_free:
	free(tracepoint);
	goto out;
}