session.c 45.0 KB
Newer Older
1 2
#define _FILE_OFFSET_BITS 64

3 4
#include <linux/kernel.h>

5
#include <byteswap.h>
6 7
#include <unistd.h>
#include <sys/types.h>
8
#include <sys/mman.h>
9

10 11
#include "evlist.h"
#include "evsel.h"
12
#include "session.h"
13
#include "tool.h"
14
#include "sort.h"
15
#include "util.h"
16
#include "cpumap.h"
17
#include "event-parse.h"
18
#include "perf_regs.h"
19
#include "unwind.h"
20
#include "vdso.h"
21 22 23 24 25

static int perf_session__open(struct perf_session *self, bool force)
{
	struct stat input_stat;

26 27 28 29
	if (!strcmp(self->filename, "-")) {
		self->fd_pipe = true;
		self->fd = STDIN_FILENO;

30
		if (perf_session__read_header(self, self->fd) < 0)
31
			pr_err("incompatible file format (rerun with -v to learn more)");
32 33 34 35

		return 0;
	}

36
	self->fd = open(self->filename, O_RDONLY);
37
	if (self->fd < 0) {
38 39 40 41
		int err = errno;

		pr_err("failed to open %s: %s", self->filename, strerror(err));
		if (err == ENOENT && !strcmp(self->filename, "perf.data"))
42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61
			pr_err("  (try 'perf record' first)");
		pr_err("\n");
		return -errno;
	}

	if (fstat(self->fd, &input_stat) < 0)
		goto out_close;

	if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
		pr_err("file %s not owned by current user or root\n",
		       self->filename);
		goto out_close;
	}

	if (!input_stat.st_size) {
		pr_info("zero-sized file (%s), nothing to do!\n",
			self->filename);
		goto out_close;
	}

62
	if (perf_session__read_header(self, self->fd) < 0) {
63
		pr_err("incompatible file format (rerun with -v to learn more)");
64 65 66
		goto out_close;
	}

67 68 69 70 71 72 73 74 75 76
	if (!perf_evlist__valid_sample_type(self->evlist)) {
		pr_err("non matching sample_type");
		goto out_close;
	}

	if (!perf_evlist__valid_sample_id_all(self->evlist)) {
		pr_err("non matching sample_id_all");
		goto out_close;
	}

77 78 79 80 81 82 83 84 85
	self->size = input_stat.st_size;
	return 0;

out_close:
	close(self->fd);
	self->fd = -1;
	return -1;
}

86
void perf_session__set_id_hdr_size(struct perf_session *session)
87
{
88 89 90 91
	u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);

	session->host_machine.id_hdr_size = id_hdr_size;
	machines__set_id_hdr_size(&session->machines, id_hdr_size);
92 93
}

94 95
int perf_session__create_kernel_maps(struct perf_session *self)
{
96
	int ret = machine__create_kernel_maps(&self->host_machine);
97 98

	if (ret >= 0)
99
		ret = machines__create_guest_kernel_maps(&self->machines);
100 101 102
	return ret;
}

103 104 105 106 107 108
static void perf_session__destroy_kernel_maps(struct perf_session *self)
{
	machine__destroy_kernel_maps(&self->host_machine);
	machines__destroy_guest_kernel_maps(&self->machines);
}

109 110
struct perf_session *perf_session__new(const char *filename, int mode,
				       bool force, bool repipe,
111
				       struct perf_tool *tool)
112
{
113 114 115 116 117 118 119 120 121 122 123 124 125
	struct perf_session *self;
	struct stat st;
	size_t len;

	if (!filename || !strlen(filename)) {
		if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode))
			filename = "-";
		else
			filename = "perf.data";
	}

	len = strlen(filename);
	self = zalloc(sizeof(*self) + len);
126 127 128 129 130

	if (self == NULL)
		goto out;

	memcpy(self->filename, filename, len);
131
	self->machines = RB_ROOT;
T
Tom Zanussi 已提交
132
	self->repipe = repipe;
133
	INIT_LIST_HEAD(&self->ordered_samples.samples);
134
	INIT_LIST_HEAD(&self->ordered_samples.sample_cache);
135
	INIT_LIST_HEAD(&self->ordered_samples.to_free);
136
	machine__init(&self->host_machine, "", HOST_KERNEL_ID);
137
	hists__init(&self->hists);
138

139 140 141
	if (mode == O_RDONLY) {
		if (perf_session__open(self, force) < 0)
			goto out_delete;
142
		perf_session__set_id_hdr_size(self);
143 144 145
	} else if (mode == O_WRONLY) {
		/*
		 * In O_RDONLY mode this will be performed when reading the
146
		 * kernel MMAP event, in perf_event__process_mmap().
147 148 149 150
		 */
		if (perf_session__create_kernel_maps(self) < 0)
			goto out_delete;
	}
151

152
	if (tool && tool->ordering_requires_timestamps &&
153
	    tool->ordered_samples && !perf_evlist__sample_id_all(self->evlist)) {
154
		dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
155
		tool->ordered_samples = false;
156 157
	}

158 159
out:
	return self;
160 161 162
out_delete:
	perf_session__delete(self);
	return NULL;
163 164
}

165
static void machine__delete_dead_threads(struct machine *machine)
166 167 168
{
	struct thread *n, *t;

169
	list_for_each_entry_safe(t, n, &machine->dead_threads, node) {
170 171 172 173 174
		list_del(&t->node);
		thread__delete(t);
	}
}

175 176 177 178 179 180
static void perf_session__delete_dead_threads(struct perf_session *session)
{
	machine__delete_dead_threads(&session->host_machine);
}

static void machine__delete_threads(struct machine *self)
181 182 183 184 185 186 187 188 189 190 191 192
{
	struct rb_node *nd = rb_first(&self->threads);

	while (nd) {
		struct thread *t = rb_entry(nd, struct thread, rb_node);

		rb_erase(&t->rb_node, &self->threads);
		nd = rb_next(nd);
		thread__delete(t);
	}
}

193 194 195 196 197
static void perf_session__delete_threads(struct perf_session *session)
{
	machine__delete_threads(&session->host_machine);
}

198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213
static void perf_session_env__delete(struct perf_session_env *env)
{
	free(env->hostname);
	free(env->os_release);
	free(env->version);
	free(env->arch);
	free(env->cpu_desc);
	free(env->cpuid);

	free(env->cmdline);
	free(env->sibling_cores);
	free(env->sibling_threads);
	free(env->numa_nodes);
	free(env->pmu_mappings);
}

214 215
void perf_session__delete(struct perf_session *self)
{
216
	perf_session__destroy_kernel_maps(self);
217 218
	perf_session__delete_dead_threads(self);
	perf_session__delete_threads(self);
219
	perf_session_env__delete(&self->header.env);
220
	machine__exit(&self->host_machine);
221 222
	close(self->fd);
	free(self);
223
	vdso__exit();
224
}
225

226
void machine__remove_thread(struct machine *self, struct thread *th)
227
{
228
	self->last_match = NULL;
229 230 231 232 233 234 235 236
	rb_erase(&th->rb_node, &self->threads);
	/*
	 * We may have references to this thread, for instance in some hist_entry
	 * instances, so just move them to a separate list.
	 */
	list_add_tail(&th->node, &self->dead_threads);
}

237 238 239 240 241 242 243 244
static bool symbol__match_parent_regex(struct symbol *sym)
{
	if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
		return 1;

	return 0;
}

245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278
static const u8 cpumodes[] = {
	PERF_RECORD_MISC_USER,
	PERF_RECORD_MISC_KERNEL,
	PERF_RECORD_MISC_GUEST_USER,
	PERF_RECORD_MISC_GUEST_KERNEL
};
#define NCPUMODES (sizeof(cpumodes)/sizeof(u8))

static void ip__resolve_ams(struct machine *self, struct thread *thread,
			    struct addr_map_symbol *ams,
			    u64 ip)
{
	struct addr_location al;
	size_t i;
	u8 m;

	memset(&al, 0, sizeof(al));

	for (i = 0; i < NCPUMODES; i++) {
		m = cpumodes[i];
		/*
		 * We cannot use the header.misc hint to determine whether a
		 * branch stack address is user, kernel, guest, hypervisor.
		 * Branches may straddle the kernel/user/hypervisor boundaries.
		 * Thus, we have to try consecutively until we find a match
		 * or else, the symbol is unknown
		 */
		thread__find_addr_location(thread, self, m, MAP__FUNCTION,
				ip, &al, NULL);
		if (al.sym)
			goto found;
	}
found:
	ams->addr = ip;
279
	ams->al_addr = al.addr;
280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302
	ams->sym = al.sym;
	ams->map = al.map;
}

struct branch_info *machine__resolve_bstack(struct machine *self,
					    struct thread *thr,
					    struct branch_stack *bs)
{
	struct branch_info *bi;
	unsigned int i;

	bi = calloc(bs->nr, sizeof(struct branch_info));
	if (!bi)
		return NULL;

	for (i = 0; i < bs->nr; i++) {
		ip__resolve_ams(self, thr, &bi[i].to, bs->entries[i].to);
		ip__resolve_ams(self, thr, &bi[i].from, bs->entries[i].from);
		bi[i].flags = bs->entries[i].flags;
	}
	return bi;
}

303 304 305 306 307
static int machine__resolve_callchain_sample(struct machine *machine,
					     struct thread *thread,
					     struct ip_callchain *chain,
					     struct symbol **parent)

308 309 310
{
	u8 cpumode = PERF_RECORD_MISC_USER;
	unsigned int i;
311
	int err;
312

313
	callchain_cursor_reset(&callchain_cursor);
314

315 316 317 318 319
	if (chain->nr > PERF_MAX_STACK_DEPTH) {
		pr_warning("corrupted callchain. skipping...\n");
		return 0;
	}

320
	for (i = 0; i < chain->nr; i++) {
321
		u64 ip;
322 323
		struct addr_location al;

324 325 326 327 328
		if (callchain_param.order == ORDER_CALLEE)
			ip = chain->ips[i];
		else
			ip = chain->ips[chain->nr - i - 1];

329 330 331
		if (ip >= PERF_CONTEXT_MAX) {
			switch (ip) {
			case PERF_CONTEXT_HV:
332 333
				cpumode = PERF_RECORD_MISC_HYPERVISOR;
				break;
334
			case PERF_CONTEXT_KERNEL:
335 336
				cpumode = PERF_RECORD_MISC_KERNEL;
				break;
337
			case PERF_CONTEXT_USER:
338 339
				cpumode = PERF_RECORD_MISC_USER;
				break;
340
			default:
341 342 343 344 345 346 347 348
				pr_debug("invalid callchain context: "
					 "%"PRId64"\n", (s64) ip);
				/*
				 * It seems the callchain is corrupted.
				 * Discard all.
				 */
				callchain_cursor_reset(&callchain_cursor);
				return 0;
349 350 351 352
			}
			continue;
		}

353
		al.filtered = false;
354
		thread__find_addr_location(thread, machine, cpumode,
355
					   MAP__FUNCTION, ip, &al, NULL);
356 357 358 359
		if (al.sym != NULL) {
			if (sort__has_parent && !*parent &&
			    symbol__match_parent_regex(al.sym))
				*parent = al.sym;
360
			if (!symbol_conf.use_callchain)
361 362
				break;
		}
363

364
		err = callchain_cursor_append(&callchain_cursor,
365 366 367
					      ip, al.map, al.sym);
		if (err)
			return err;
368 369
	}

370
	return 0;
371
}
372

373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400
static int unwind_entry(struct unwind_entry *entry, void *arg)
{
	struct callchain_cursor *cursor = arg;
	return callchain_cursor_append(cursor, entry->ip,
				       entry->map, entry->sym);
}

int machine__resolve_callchain(struct machine *machine,
			       struct perf_evsel *evsel,
			       struct thread *thread,
			       struct perf_sample *sample,
			       struct symbol **parent)

{
	int ret;

	callchain_cursor_reset(&callchain_cursor);

	ret = machine__resolve_callchain_sample(machine, thread,
						sample->callchain, parent);
	if (ret)
		return ret;

	/* Can we do dwarf post unwind? */
	if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) &&
	      (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER)))
		return 0;

401 402 403 404 405
	/* Bail out if nothing was captured. */
	if ((!sample->user_regs.regs) ||
	    (!sample->user_stack.size))
		return 0;

406 407 408 409 410 411
	return unwind__get_entries(unwind_entry, &callchain_cursor, machine,
				   thread, evsel->attr.sample_regs_user,
				   sample);

}

412 413 414 415
static int process_event_synth_tracing_data_stub(union perf_event *event
						 __maybe_unused,
						 struct perf_session *session
						__maybe_unused)
416 417 418 419 420
{
	dump_printf(": unhandled!\n");
	return 0;
}

421 422 423
static int process_event_synth_attr_stub(union perf_event *event __maybe_unused,
					 struct perf_evlist **pevlist
					 __maybe_unused)
424 425 426 427 428
{
	dump_printf(": unhandled!\n");
	return 0;
}

429 430 431 432 433
static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
				     union perf_event *event __maybe_unused,
				     struct perf_sample *sample __maybe_unused,
				     struct perf_evsel *evsel __maybe_unused,
				     struct machine *machine __maybe_unused)
434 435 436 437 438
{
	dump_printf(": unhandled!\n");
	return 0;
}

439 440 441 442
static int process_event_stub(struct perf_tool *tool __maybe_unused,
			      union perf_event *event __maybe_unused,
			      struct perf_sample *sample __maybe_unused,
			      struct machine *machine __maybe_unused)
443 444 445 446 447
{
	dump_printf(": unhandled!\n");
	return 0;
}

448 449 450 451
static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
				       union perf_event *event __maybe_unused,
				       struct perf_session *perf_session
				       __maybe_unused)
452 453 454 455 456
{
	dump_printf(": unhandled!\n");
	return 0;
}

457 458
static int process_event_type_stub(struct perf_tool *tool __maybe_unused,
				   union perf_event *event __maybe_unused)
459 460 461 462 463
{
	dump_printf(": unhandled!\n");
	return 0;
}

464
static int process_finished_round(struct perf_tool *tool,
465 466
				  union perf_event *event,
				  struct perf_session *session);
467

468
static void perf_tool__fill_defaults(struct perf_tool *tool)
469
{
470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498
	if (tool->sample == NULL)
		tool->sample = process_event_sample_stub;
	if (tool->mmap == NULL)
		tool->mmap = process_event_stub;
	if (tool->comm == NULL)
		tool->comm = process_event_stub;
	if (tool->fork == NULL)
		tool->fork = process_event_stub;
	if (tool->exit == NULL)
		tool->exit = process_event_stub;
	if (tool->lost == NULL)
		tool->lost = perf_event__process_lost;
	if (tool->read == NULL)
		tool->read = process_event_sample_stub;
	if (tool->throttle == NULL)
		tool->throttle = process_event_stub;
	if (tool->unthrottle == NULL)
		tool->unthrottle = process_event_stub;
	if (tool->attr == NULL)
		tool->attr = process_event_synth_attr_stub;
	if (tool->event_type == NULL)
		tool->event_type = process_event_type_stub;
	if (tool->tracing_data == NULL)
		tool->tracing_data = process_event_synth_tracing_data_stub;
	if (tool->build_id == NULL)
		tool->build_id = process_finished_round_stub;
	if (tool->finished_round == NULL) {
		if (tool->ordered_samples)
			tool->finished_round = process_finished_round;
499
		else
500
			tool->finished_round = process_finished_round_stub;
501
	}
502
}
503 504 505 506 507 508 509 510 511 512
 
void mem_bswap_32(void *src, int byte_size)
{
	u32 *m = src;
	while (byte_size > 0) {
		*m = bswap_32(*m);
		byte_size -= sizeof(u32);
		++m;
	}
}
513

514 515 516 517 518 519 520 521 522 523 524
void mem_bswap_64(void *src, int byte_size)
{
	u64 *m = src;

	while (byte_size > 0) {
		*m = bswap_64(*m);
		byte_size -= sizeof(u64);
		++m;
	}
}

525 526 527 528 529 530 531 532 533 534
static void swap_sample_id_all(union perf_event *event, void *data)
{
	void *end = (void *) event + event->header.size;
	int size = end - data;

	BUG_ON(size % sizeof(u64));
	mem_bswap_64(data, size);
}

static void perf_event__all64_swap(union perf_event *event,
535
				   bool sample_id_all __maybe_unused)
536
{
537 538
	struct perf_event_header *hdr = &event->header;
	mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
539 540
}

541
static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
542
{
543 544
	event->comm.pid = bswap_32(event->comm.pid);
	event->comm.tid = bswap_32(event->comm.tid);
545 546 547 548

	if (sample_id_all) {
		void *data = &event->comm.comm;

549
		data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
550 551
		swap_sample_id_all(event, data);
	}
552 553
}

554 555
static void perf_event__mmap_swap(union perf_event *event,
				  bool sample_id_all)
556
{
557 558 559 560 561
	event->mmap.pid	  = bswap_32(event->mmap.pid);
	event->mmap.tid	  = bswap_32(event->mmap.tid);
	event->mmap.start = bswap_64(event->mmap.start);
	event->mmap.len	  = bswap_64(event->mmap.len);
	event->mmap.pgoff = bswap_64(event->mmap.pgoff);
562 563 564 565

	if (sample_id_all) {
		void *data = &event->mmap.filename;

566
		data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
567 568
		swap_sample_id_all(event, data);
	}
569 570
}

571
static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
572
{
573 574 575 576 577
	event->fork.pid	 = bswap_32(event->fork.pid);
	event->fork.tid	 = bswap_32(event->fork.tid);
	event->fork.ppid = bswap_32(event->fork.ppid);
	event->fork.ptid = bswap_32(event->fork.ptid);
	event->fork.time = bswap_64(event->fork.time);
578 579 580

	if (sample_id_all)
		swap_sample_id_all(event, &event->fork + 1);
581 582
}

583
static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
584
{
585 586 587 588 589 590
	event->read.pid		 = bswap_32(event->read.pid);
	event->read.tid		 = bswap_32(event->read.tid);
	event->read.value	 = bswap_64(event->read.value);
	event->read.time_enabled = bswap_64(event->read.time_enabled);
	event->read.time_running = bswap_64(event->read.time_running);
	event->read.id		 = bswap_64(event->read.id);
591 592 593

	if (sample_id_all)
		swap_sample_id_all(event, &event->read + 1);
594 595
}

596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627
static u8 revbyte(u8 b)
{
	int rev = (b >> 4) | ((b & 0xf) << 4);
	rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
	rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
	return (u8) rev;
}

/*
 * XXX this is hack in attempt to carry flags bitfield
 * throught endian village. ABI says:
 *
 * Bit-fields are allocated from right to left (least to most significant)
 * on little-endian implementations and from left to right (most to least
 * significant) on big-endian implementations.
 *
 * The above seems to be byte specific, so we need to reverse each
 * byte of the bitfield. 'Internet' also says this might be implementation
 * specific and we probably need proper fix and carry perf_event_attr
 * bitfield flags in separate data file FEAT_ section. Thought this seems
 * to work for now.
 */
static void swap_bitfield(u8 *p, unsigned len)
{
	unsigned i;

	for (i = 0; i < len; i++) {
		*p = revbyte(*p);
		p++;
	}
}

628 629 630 631 632 633 634 635 636 637 638 639 640
/* exported for swapping attributes in file header */
void perf_event__attr_swap(struct perf_event_attr *attr)
{
	attr->type		= bswap_32(attr->type);
	attr->size		= bswap_32(attr->size);
	attr->config		= bswap_64(attr->config);
	attr->sample_period	= bswap_64(attr->sample_period);
	attr->sample_type	= bswap_64(attr->sample_type);
	attr->read_format	= bswap_64(attr->read_format);
	attr->wakeup_events	= bswap_32(attr->wakeup_events);
	attr->bp_type		= bswap_32(attr->bp_type);
	attr->bp_addr		= bswap_64(attr->bp_addr);
	attr->bp_len		= bswap_64(attr->bp_len);
641 642

	swap_bitfield((u8 *) (&attr->read_format + 1), sizeof(u64));
643 644
}

645
static void perf_event__hdr_attr_swap(union perf_event *event,
646
				      bool sample_id_all __maybe_unused)
647 648 649
{
	size_t size;

650
	perf_event__attr_swap(&event->attr.attr);
651

652 653 654
	size = event->header.size;
	size -= (void *)&event->attr.id - (void *)event;
	mem_bswap_64(event->attr.id, size);
655 656
}

657
static void perf_event__event_type_swap(union perf_event *event,
658
					bool sample_id_all __maybe_unused)
659
{
660 661
	event->event_type.event_type.event_id =
		bswap_64(event->event_type.event_type.event_id);
662 663
}

664
static void perf_event__tracing_data_swap(union perf_event *event,
665
					  bool sample_id_all __maybe_unused)
666
{
667
	event->tracing_data.size = bswap_32(event->tracing_data.size);
668 669
}

670 671
typedef void (*perf_event__swap_op)(union perf_event *event,
				    bool sample_id_all);
672

673 674 675 676 677 678 679 680
static perf_event__swap_op perf_event__swap_ops[] = {
	[PERF_RECORD_MMAP]		  = perf_event__mmap_swap,
	[PERF_RECORD_COMM]		  = perf_event__comm_swap,
	[PERF_RECORD_FORK]		  = perf_event__task_swap,
	[PERF_RECORD_EXIT]		  = perf_event__task_swap,
	[PERF_RECORD_LOST]		  = perf_event__all64_swap,
	[PERF_RECORD_READ]		  = perf_event__read_swap,
	[PERF_RECORD_SAMPLE]		  = perf_event__all64_swap,
681
	[PERF_RECORD_HEADER_ATTR]	  = perf_event__hdr_attr_swap,
682 683 684 685
	[PERF_RECORD_HEADER_EVENT_TYPE]	  = perf_event__event_type_swap,
	[PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
	[PERF_RECORD_HEADER_BUILD_ID]	  = NULL,
	[PERF_RECORD_HEADER_MAX]	  = NULL,
686 687
};

688 689
struct sample_queue {
	u64			timestamp;
690
	u64			file_offset;
691
	union perf_event	*event;
692 693 694
	struct list_head	list;
};

695 696 697 698
static void perf_session_free_sample_buffers(struct perf_session *session)
{
	struct ordered_samples *os = &session->ordered_samples;

699
	while (!list_empty(&os->to_free)) {
700 701
		struct sample_queue *sq;

702
		sq = list_entry(os->to_free.next, struct sample_queue, list);
703 704 705 706 707
		list_del(&sq->list);
		free(sq);
	}
}

708
static int perf_session_deliver_event(struct perf_session *session,
709
				      union perf_event *event,
710
				      struct perf_sample *sample,
711
				      struct perf_tool *tool,
712
				      u64 file_offset);
713

714
static int flush_sample_queue(struct perf_session *s,
715
			       struct perf_tool *tool)
716
{
717 718
	struct ordered_samples *os = &s->ordered_samples;
	struct list_head *head = &os->samples;
719
	struct sample_queue *tmp, *iter;
720
	struct perf_sample sample;
721 722
	u64 limit = os->next_flush;
	u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL;
723
	unsigned idx = 0, progress_next = os->nr_samples / 16;
724
	int ret;
725

726
	if (!tool->ordered_samples || !limit)
727
		return 0;
728 729 730

	list_for_each_entry_safe(iter, tmp, head, list) {
		if (iter->timestamp > limit)
731
			break;
732

733
		ret = perf_evlist__parse_sample(s->evlist, iter->event, &sample);
734 735
		if (ret)
			pr_err("Can't parse sample, err = %d\n", ret);
736 737 738 739 740 741
		else {
			ret = perf_session_deliver_event(s, iter->event, &sample, tool,
							 iter->file_offset);
			if (ret)
				return ret;
		}
742

743
		os->last_flush = iter->timestamp;
744
		list_del(&iter->list);
745
		list_add(&iter->list, &os->sample_cache);
746 747 748 749 750
		if (++idx >= progress_next) {
			progress_next += os->nr_samples / 16;
			ui_progress__update(idx, os->nr_samples,
					    "Processing time ordered events...");
		}
751
	}
752 753 754 755 756 757 758

	if (list_empty(head)) {
		os->last_sample = NULL;
	} else if (last_ts <= limit) {
		os->last_sample =
			list_entry(head->prev, struct sample_queue, list);
	}
759 760

	os->nr_samples = 0;
761 762

	return 0;
763 764
}

765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803
/*
 * When perf record finishes a pass on every buffers, it records this pseudo
 * event.
 * We record the max timestamp t found in the pass n.
 * Assuming these timestamps are monotonic across cpus, we know that if
 * a buffer still has events with timestamps below t, they will be all
 * available and then read in the pass n + 1.
 * Hence when we start to read the pass n + 2, we can safely flush every
 * events with timestamps below t.
 *
 *    ============ PASS n =================
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          1          |         2
 *          2          |         3
 *          -          |         4  <--- max recorded
 *
 *    ============ PASS n + 1 ==============
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          3          |         5
 *          4          |         6
 *          5          |         7 <---- max recorded
 *
 *      Flush every events below timestamp 4
 *
 *    ============ PASS n + 2 ==============
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          6          |         8
 *          7          |         9
 *          -          |         10
 *
 *      Flush every events below timestamp 7
 *      etc...
 */
804
static int process_finished_round(struct perf_tool *tool,
805
				  union perf_event *event __maybe_unused,
806
				  struct perf_session *session)
807
{
808 809 810
	int ret = flush_sample_queue(session, tool);
	if (!ret)
		session->ordered_samples.next_flush = session->ordered_samples.max_timestamp;
811

812
	return ret;
813 814
}

815
/* The queue is ordered by time */
816
static void __queue_event(struct sample_queue *new, struct perf_session *s)
817
{
818 819 820 821
	struct ordered_samples *os = &s->ordered_samples;
	struct sample_queue *sample = os->last_sample;
	u64 timestamp = new->timestamp;
	struct list_head *p;
822

823
	++os->nr_samples;
824
	os->last_sample = new;
825

826 827 828
	if (!sample) {
		list_add(&new->list, &os->samples);
		os->max_timestamp = timestamp;
829 830 831 832
		return;
	}

	/*
833 834 835
	 * last_sample might point to some random place in the list as it's
	 * the last queued event. We expect that the new event is close to
	 * this.
836
	 */
837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858
	if (sample->timestamp <= timestamp) {
		while (sample->timestamp <= timestamp) {
			p = sample->list.next;
			if (p == &os->samples) {
				list_add_tail(&new->list, &os->samples);
				os->max_timestamp = timestamp;
				return;
			}
			sample = list_entry(p, struct sample_queue, list);
		}
		list_add_tail(&new->list, &sample->list);
	} else {
		while (sample->timestamp > timestamp) {
			p = sample->list.prev;
			if (p == &os->samples) {
				list_add(&new->list, &os->samples);
				return;
			}
			sample = list_entry(p, struct sample_queue, list);
		}
		list_add(&new->list, &sample->list);
	}
859 860
}

861 862
#define MAX_SAMPLE_BUFFER	(64 * 1024 / sizeof(struct sample_queue))

863
static int perf_session_queue_event(struct perf_session *s, union perf_event *event,
864
				    struct perf_sample *sample, u64 file_offset)
865
{
866 867
	struct ordered_samples *os = &s->ordered_samples;
	struct list_head *sc = &os->sample_cache;
868
	u64 timestamp = sample->time;
869 870
	struct sample_queue *new;

871
	if (!timestamp || timestamp == ~0ULL)
872 873
		return -ETIME;

874 875 876 877 878
	if (timestamp < s->ordered_samples.last_flush) {
		printf("Warning: Timestamp below last timeslice flush\n");
		return -EINVAL;
	}

879 880 881
	if (!list_empty(sc)) {
		new = list_entry(sc->next, struct sample_queue, list);
		list_del(&new->list);
882 883 884 885
	} else if (os->sample_buffer) {
		new = os->sample_buffer + os->sample_buffer_idx;
		if (++os->sample_buffer_idx == MAX_SAMPLE_BUFFER)
			os->sample_buffer = NULL;
886
	} else {
887 888
		os->sample_buffer = malloc(MAX_SAMPLE_BUFFER * sizeof(*new));
		if (!os->sample_buffer)
889
			return -ENOMEM;
890 891 892
		list_add(&os->sample_buffer->list, &os->to_free);
		os->sample_buffer_idx = 2;
		new = os->sample_buffer + 1;
893
	}
894 895

	new->timestamp = timestamp;
896
	new->file_offset = file_offset;
897
	new->event = event;
898

899
	__queue_event(new, s);
900 901 902

	return 0;
}
903

904
static void callchain__printf(struct perf_sample *sample)
905 906
{
	unsigned int i;
907

908
	printf("... chain: nr:%" PRIu64 "\n", sample->callchain->nr);
909 910

	for (i = 0; i < sample->callchain->nr; i++)
911 912
		printf("..... %2d: %016" PRIx64 "\n",
		       i, sample->callchain->ips[i]);
913 914
}

915 916 917 918 919 920 921 922 923 924 925 926
static void branch_stack__printf(struct perf_sample *sample)
{
	uint64_t i;

	printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr);

	for (i = 0; i < sample->branch_stack->nr; i++)
		printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 "\n",
			i, sample->branch_stack->entries[i].from,
			sample->branch_stack->entries[i].to);
}

927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954
static void regs_dump__printf(u64 mask, u64 *regs)
{
	unsigned rid, i = 0;

	for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
		u64 val = regs[i++];

		printf(".... %-5s 0x%" PRIx64 "\n",
		       perf_reg_name(rid), val);
	}
}

static void regs_user__printf(struct perf_sample *sample, u64 mask)
{
	struct regs_dump *user_regs = &sample->user_regs;

	if (user_regs->regs) {
		printf("... user regs: mask 0x%" PRIx64 "\n", mask);
		regs_dump__printf(mask, user_regs->regs);
	}
}

static void stack_user__printf(struct stack_dump *dump)
{
	printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
	       dump->size, dump->offset);
}

955
static void perf_session__print_tstamp(struct perf_session *session,
956
				       union perf_event *event,
957
				       struct perf_sample *sample)
958
{
959 960
	u64 sample_type = perf_evlist__sample_type(session->evlist);

961
	if (event->header.type != PERF_RECORD_SAMPLE &&
962
	    !perf_evlist__sample_id_all(session->evlist)) {
963 964 965 966
		fputs("-1 -1 ", stdout);
		return;
	}

967
	if ((sample_type & PERF_SAMPLE_CPU))
968 969
		printf("%u ", sample->cpu);

970
	if (sample_type & PERF_SAMPLE_TIME)
971
		printf("%" PRIu64 " ", sample->time);
972 973
}

974
static void dump_event(struct perf_session *session, union perf_event *event,
975
		       u64 file_offset, struct perf_sample *sample)
976 977 978 979
{
	if (!dump_trace)
		return;

980 981
	printf("\n%#" PRIx64 " [%#x]: event: %d\n",
	       file_offset, event->header.size, event->header.type);
982 983 984 985 986 987

	trace_event(event);

	if (sample)
		perf_session__print_tstamp(session, event, sample);

988
	printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
989
	       event->header.size, perf_event__name(event->header.type));
990 991
}

992
static void dump_sample(struct perf_evsel *evsel, union perf_event *event,
993
			struct perf_sample *sample)
994
{
995 996
	u64 sample_type;

997 998 999
	if (!dump_trace)
		return;

1000
	printf("(IP, %d): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
1001
	       event->header.misc, sample->pid, sample->tid, sample->ip,
1002
	       sample->period, sample->addr);
1003

1004
	sample_type = evsel->attr.sample_type;
1005 1006

	if (sample_type & PERF_SAMPLE_CALLCHAIN)
1007
		callchain__printf(sample);
1008

1009
	if (sample_type & PERF_SAMPLE_BRANCH_STACK)
1010
		branch_stack__printf(sample);
1011 1012 1013 1014 1015 1016

	if (sample_type & PERF_SAMPLE_REGS_USER)
		regs_user__printf(sample, evsel->attr.sample_regs_user);

	if (sample_type & PERF_SAMPLE_STACK_USER)
		stack_user__printf(&sample->user_stack);
1017 1018
}

1019 1020 1021 1022 1023 1024
static struct machine *
	perf_session__find_machine_for_cpumode(struct perf_session *session,
					       union perf_event *event)
{
	const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;

1025 1026 1027
	if (perf_guest &&
	    ((cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
	     (cpumode == PERF_RECORD_MISC_GUEST_USER))) {
1028 1029 1030 1031 1032 1033 1034
		u32 pid;

		if (event->header.type == PERF_RECORD_MMAP)
			pid = event->mmap.pid;
		else
			pid = event->ip.pid;

1035
		return perf_session__findnew_machine(session, pid);
1036
	}
1037 1038 1039 1040

	return perf_session__find_host_machine(session);
}

1041
static int perf_session_deliver_event(struct perf_session *session,
1042
				      union perf_event *event,
1043
				      struct perf_sample *sample,
1044
				      struct perf_tool *tool,
1045
				      u64 file_offset)
1046
{
1047
	struct perf_evsel *evsel;
1048
	struct machine *machine;
1049

1050 1051
	dump_event(session, event, file_offset, sample);

1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069
	evsel = perf_evlist__id2evsel(session->evlist, sample->id);
	if (evsel != NULL && event->header.type != PERF_RECORD_SAMPLE) {
		/*
		 * XXX We're leaving PERF_RECORD_SAMPLE unnacounted here
		 * because the tools right now may apply filters, discarding
		 * some of the samples. For consistency, in the future we
		 * should have something like nr_filtered_samples and remove
		 * the sample->period from total_sample_period, etc, KISS for
		 * now tho.
		 *
		 * Also testing against NULL allows us to handle files without
		 * attr.sample_id_all and/or without PERF_SAMPLE_ID. In the
		 * future probably it'll be a good idea to restrict event
		 * processing via perf_session to files with both set.
		 */
		hists__inc_nr_events(&evsel->hists, event->header.type);
	}

1070 1071
	machine = perf_session__find_machine_for_cpumode(session, event);

1072 1073
	switch (event->header.type) {
	case PERF_RECORD_SAMPLE:
1074
		dump_sample(evsel, event, sample);
1075 1076
		if (evsel == NULL) {
			++session->hists.stats.nr_unknown_id;
1077
			return 0;
1078
		}
1079 1080
		if (machine == NULL) {
			++session->hists.stats.nr_unprocessable_samples;
1081
			return 0;
1082
		}
1083
		return tool->sample(tool, event, sample, evsel, machine);
1084
	case PERF_RECORD_MMAP:
1085
		return tool->mmap(tool, event, sample, machine);
1086
	case PERF_RECORD_COMM:
1087
		return tool->comm(tool, event, sample, machine);
1088
	case PERF_RECORD_FORK:
1089
		return tool->fork(tool, event, sample, machine);
1090
	case PERF_RECORD_EXIT:
1091
		return tool->exit(tool, event, sample, machine);
1092
	case PERF_RECORD_LOST:
1093
		if (tool->lost == perf_event__process_lost)
1094
			session->hists.stats.total_lost += event->lost.lost;
1095
		return tool->lost(tool, event, sample, machine);
1096
	case PERF_RECORD_READ:
1097
		return tool->read(tool, event, sample, evsel, machine);
1098
	case PERF_RECORD_THROTTLE:
1099
		return tool->throttle(tool, event, sample, machine);
1100
	case PERF_RECORD_UNTHROTTLE:
1101
		return tool->unthrottle(tool, event, sample, machine);
1102 1103 1104 1105 1106 1107
	default:
		++session->hists.stats.nr_unknown_events;
		return -1;
	}
}

1108
static int perf_session__preprocess_sample(struct perf_session *session,
1109
					   union perf_event *event, struct perf_sample *sample)
1110 1111
{
	if (event->header.type != PERF_RECORD_SAMPLE ||
1112
	    !(perf_evlist__sample_type(session->evlist) & PERF_SAMPLE_CALLCHAIN))
1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123
		return 0;

	if (!ip_callchain__valid(sample->callchain, event)) {
		pr_debug("call-chain problem with event, skipping it.\n");
		++session->hists.stats.nr_invalid_chains;
		session->hists.stats.total_invalid_chains += sample->period;
		return -EINVAL;
	}
	return 0;
}

1124
static int perf_session__process_user_event(struct perf_session *session, union perf_event *event,
1125
					    struct perf_tool *tool, u64 file_offset)
1126
{
1127 1128
	int err;

1129
	dump_event(session, event, file_offset, NULL);
1130

1131
	/* These events are processed right away */
1132
	switch (event->header.type) {
1133
	case PERF_RECORD_HEADER_ATTR:
1134
		err = tool->attr(event, &session->evlist);
1135
		if (err == 0)
1136
			perf_session__set_id_hdr_size(session);
1137
		return err;
1138
	case PERF_RECORD_HEADER_EVENT_TYPE:
1139
		return tool->event_type(tool, event);
1140 1141
	case PERF_RECORD_HEADER_TRACING_DATA:
		/* setup for reading amidst mmap */
1142
		lseek(session->fd, file_offset, SEEK_SET);
1143
		return tool->tracing_data(event, session);
1144
	case PERF_RECORD_HEADER_BUILD_ID:
1145
		return tool->build_id(tool, event, session);
1146
	case PERF_RECORD_FINISHED_ROUND:
1147
		return tool->finished_round(tool, event, session);
1148
	default:
1149
		return -EINVAL;
1150
	}
1151 1152
}

1153 1154 1155 1156 1157 1158 1159 1160 1161
static void event_swap(union perf_event *event, bool sample_id_all)
{
	perf_event__swap_op swap;

	swap = perf_event__swap_ops[event->header.type];
	if (swap)
		swap(event, sample_id_all);
}

1162
static int perf_session__process_event(struct perf_session *session,
1163
				       union perf_event *event,
1164
				       struct perf_tool *tool,
1165 1166
				       u64 file_offset)
{
1167
	struct perf_sample sample;
1168 1169
	int ret;

1170
	if (session->header.needs_swap)
1171
		event_swap(event, perf_evlist__sample_id_all(session->evlist));
1172 1173 1174 1175 1176 1177 1178

	if (event->header.type >= PERF_RECORD_HEADER_MAX)
		return -EINVAL;

	hists__inc_nr_events(&session->hists, event->header.type);

	if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1179
		return perf_session__process_user_event(session, event, tool, file_offset);
1180

1181 1182 1183
	/*
	 * For all kernel events we get the sample data
	 */
1184
	ret = perf_evlist__parse_sample(session->evlist, event, &sample);
1185 1186
	if (ret)
		return ret;
1187 1188 1189 1190 1191

	/* Preprocess sample records - precheck callchains */
	if (perf_session__preprocess_sample(session, event, &sample))
		return 0;

1192
	if (tool->ordered_samples) {
1193 1194
		ret = perf_session_queue_event(session, event, &sample,
					       file_offset);
1195 1196 1197 1198
		if (ret != -ETIME)
			return ret;
	}

1199
	return perf_session_deliver_event(session, event, &sample, tool,
1200
					  file_offset);
1201 1202
}

1203 1204 1205 1206 1207 1208 1209
void perf_event_header__bswap(struct perf_event_header *self)
{
	self->type = bswap_32(self->type);
	self->misc = bswap_16(self->misc);
	self->size = bswap_16(self->size);
}

1210 1211 1212 1213 1214
struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
{
	return machine__findnew_thread(&session->host_machine, pid);
}

1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226
static struct thread *perf_session__register_idle_thread(struct perf_session *self)
{
	struct thread *thread = perf_session__findnew(self, 0);

	if (thread == NULL || thread__set_comm(thread, "swapper")) {
		pr_err("problem inserting idle task.\n");
		thread = NULL;
	}

	return thread;
}

1227
static void perf_session__warn_about_errors(const struct perf_session *session,
1228
					    const struct perf_tool *tool)
1229
{
1230
	if (tool->lost == perf_event__process_lost &&
1231 1232 1233 1234 1235
	    session->hists.stats.nr_events[PERF_RECORD_LOST] != 0) {
		ui__warning("Processed %d events and lost %d chunks!\n\n"
			    "Check IO/CPU overload!\n\n",
			    session->hists.stats.nr_events[0],
			    session->hists.stats.nr_events[PERF_RECORD_LOST]);
1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246
	}

	if (session->hists.stats.nr_unknown_events != 0) {
		ui__warning("Found %u unknown events!\n\n"
			    "Is this an older tool processing a perf.data "
			    "file generated by a more recent tool?\n\n"
			    "If that is not the case, consider "
			    "reporting to linux-kernel@vger.kernel.org.\n\n",
			    session->hists.stats.nr_unknown_events);
	}

1247 1248 1249 1250 1251
	if (session->hists.stats.nr_unknown_id != 0) {
		ui__warning("%u samples with id not present in the header\n",
			    session->hists.stats.nr_unknown_id);
	}

1252 1253 1254 1255 1256 1257 1258
 	if (session->hists.stats.nr_invalid_chains != 0) {
 		ui__warning("Found invalid callchains!\n\n"
 			    "%u out of %u events were discarded for this reason.\n\n"
 			    "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
 			    session->hists.stats.nr_invalid_chains,
 			    session->hists.stats.nr_events[PERF_RECORD_SAMPLE]);
 	}
1259 1260 1261 1262 1263 1264

	if (session->hists.stats.nr_unprocessable_samples != 0) {
		ui__warning("%u unprocessable samples recorded.\n"
			    "Do you have a KVM guest running and not using 'perf kvm'?\n",
			    session->hists.stats.nr_unprocessable_samples);
	}
1265 1266
}

1267 1268 1269 1270
#define session_done()	(*(volatile int *)(&session_done))
volatile int session_done;

static int __perf_session__process_pipe_events(struct perf_session *self,
1271
					       struct perf_tool *tool)
1272
{
1273 1274 1275
	union perf_event *event;
	uint32_t size, cur_size = 0;
	void *buf = NULL;
1276 1277 1278 1279 1280
	int skip = 0;
	u64 head;
	int err;
	void *p;

1281
	perf_tool__fill_defaults(tool);
1282 1283

	head = 0;
1284 1285 1286 1287 1288
	cur_size = sizeof(union perf_event);

	buf = malloc(cur_size);
	if (!buf)
		return -errno;
1289
more:
1290 1291
	event = buf;
	err = readn(self->fd, event, sizeof(struct perf_event_header));
1292 1293 1294 1295 1296 1297 1298 1299 1300
	if (err <= 0) {
		if (err == 0)
			goto done;

		pr_err("failed to read event header\n");
		goto out_err;
	}

	if (self->header.needs_swap)
1301
		perf_event_header__bswap(&event->header);
1302

1303
	size = event->header.size;
1304 1305 1306
	if (size == 0)
		size = 8;

1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317
	if (size > cur_size) {
		void *new = realloc(buf, size);
		if (!new) {
			pr_err("failed to allocate memory to read event\n");
			goto out_err;
		}
		buf = new;
		cur_size = size;
		event = buf;
	}
	p = event;
1318 1319
	p += sizeof(struct perf_event_header);

1320
	if (size - sizeof(struct perf_event_header)) {
1321
		err = readn(self->fd, p, size - sizeof(struct perf_event_header));
1322 1323 1324 1325 1326
		if (err <= 0) {
			if (err == 0) {
				pr_err("unexpected end of event stream\n");
				goto done;
			}
1327

1328 1329 1330
			pr_err("failed to read event data\n");
			goto out_err;
		}
1331 1332
	}

1333
	if ((skip = perf_session__process_event(self, event, tool, head)) < 0) {
1334
		pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1335
		       head, event->header.size, event->header.type);
1336 1337
		err = -EINVAL;
		goto out_err;
1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349
	}

	head += size;

	if (skip > 0)
		head += skip;

	if (!session_done())
		goto more;
done:
	err = 0;
out_err:
1350
	free(buf);
1351
	perf_session__warn_about_errors(self, tool);
1352
	perf_session_free_sample_buffers(self);
1353 1354 1355
	return err;
}

1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379
static union perf_event *
fetch_mmaped_event(struct perf_session *session,
		   u64 head, size_t mmap_size, char *buf)
{
	union perf_event *event;

	/*
	 * Ensure we have enough space remaining to read
	 * the size of the event in the headers.
	 */
	if (head + sizeof(event->header) > mmap_size)
		return NULL;

	event = (union perf_event *)(buf + head);

	if (session->header.needs_swap)
		perf_event_header__bswap(&event->header);

	if (head + event->header.size > mmap_size)
		return NULL;

	return event;
}

1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391
/*
 * On 64bit we can mmap the data file in one go. No need for tiny mmap
 * slices. On 32bit we use 32MB.
 */
#if BITS_PER_LONG == 64
#define MMAP_SIZE ULLONG_MAX
#define NUM_MMAPS 1
#else
#define MMAP_SIZE (32 * 1024 * 1024ULL)
#define NUM_MMAPS 128
#endif

1392
int __perf_session__process_events(struct perf_session *session,
1393
				   u64 data_offset, u64 data_size,
1394
				   u64 file_size, struct perf_tool *tool)
1395
{
1396
	u64 head, page_offset, file_offset, file_pos, progress_next;
1397
	int err, mmap_prot, mmap_flags, map_idx = 0;
1398
	size_t	mmap_size;
1399
	char *buf, *mmaps[NUM_MMAPS];
1400
	union perf_event *event;
1401
	uint32_t size;
1402

1403
	perf_tool__fill_defaults(tool);
1404

1405 1406 1407
	page_offset = page_size * (data_offset / page_size);
	file_offset = page_offset;
	head = data_offset - page_offset;
1408

1409 1410 1411
	if (data_offset + data_size < file_size)
		file_size = data_offset + data_size;

1412 1413
	progress_next = file_size / 16;

1414
	mmap_size = MMAP_SIZE;
1415 1416 1417
	if (mmap_size > file_size)
		mmap_size = file_size;

1418 1419
	memset(mmaps, 0, sizeof(mmaps));

1420 1421 1422
	mmap_prot  = PROT_READ;
	mmap_flags = MAP_SHARED;

1423
	if (session->header.needs_swap) {
1424 1425 1426
		mmap_prot  |= PROT_WRITE;
		mmap_flags = MAP_PRIVATE;
	}
1427
remap:
1428 1429
	buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, session->fd,
		   file_offset);
1430 1431 1432 1433 1434
	if (buf == MAP_FAILED) {
		pr_err("failed to mmap file\n");
		err = -errno;
		goto out_err;
	}
1435 1436
	mmaps[map_idx] = buf;
	map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
1437
	file_pos = file_offset + head;
1438 1439

more:
1440 1441
	event = fetch_mmaped_event(session, head, mmap_size, buf);
	if (!event) {
1442 1443 1444 1445
		if (mmaps[map_idx]) {
			munmap(mmaps[map_idx], mmap_size);
			mmaps[map_idx] = NULL;
		}
1446

1447 1448 1449
		page_offset = page_size * (head / page_size);
		file_offset += page_offset;
		head -= page_offset;
1450 1451 1452 1453 1454
		goto remap;
	}

	size = event->header.size;

1455
	if (size == 0 ||
1456
	    perf_session__process_event(session, event, tool, file_pos) < 0) {
1457 1458 1459 1460 1461
		pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
		       file_offset + head, event->header.size,
		       event->header.type);
		err = -EINVAL;
		goto out_err;
1462 1463 1464
	}

	head += size;
1465
	file_pos += size;
1466

1467 1468
	if (file_pos >= progress_next) {
		progress_next += file_size / 16;
1469 1470
		ui_progress__update(file_pos, file_size,
				    "Processing events...");
1471 1472
	}

1473
	if (file_pos < file_size)
1474
		goto more;
1475

1476
	err = 0;
1477
	/* do the final flush for ordered samples */
1478
	session->ordered_samples.next_flush = ULLONG_MAX;
1479
	err = flush_sample_queue(session, tool);
1480
out_err:
N
Namhyung Kim 已提交
1481
	ui_progress__finish();
1482
	perf_session__warn_about_errors(session, tool);
1483
	perf_session_free_sample_buffers(session);
1484 1485
	return err;
}
1486

1487
int perf_session__process_events(struct perf_session *self,
1488
				 struct perf_tool *tool)
1489 1490 1491 1492 1493 1494
{
	int err;

	if (perf_session__register_idle_thread(self) == NULL)
		return -ENOMEM;

1495 1496 1497 1498
	if (!self->fd_pipe)
		err = __perf_session__process_events(self,
						     self->header.data_offset,
						     self->header.data_size,
1499
						     self->size, tool);
1500
	else
1501
		err = __perf_session__process_pipe_events(self, tool);
1502

1503 1504 1505
	return err;
}

1506
bool perf_session__has_traces(struct perf_session *session, const char *msg)
1507
{
1508
	if (!(perf_evlist__sample_type(session->evlist) & PERF_SAMPLE_RAW)) {
1509 1510
		pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
		return false;
1511 1512
	}

1513
	return true;
1514
}
1515

1516 1517
int maps__set_kallsyms_ref_reloc_sym(struct map **maps,
				     const char *symbol_name, u64 addr)
1518 1519
{
	char *bracket;
1520
	enum map_type i;
1521 1522 1523 1524 1525
	struct ref_reloc_sym *ref;

	ref = zalloc(sizeof(struct ref_reloc_sym));
	if (ref == NULL)
		return -ENOMEM;
1526

1527 1528 1529
	ref->name = strdup(symbol_name);
	if (ref->name == NULL) {
		free(ref);
1530
		return -ENOMEM;
1531
	}
1532

1533
	bracket = strchr(ref->name, ']');
1534 1535 1536
	if (bracket)
		*bracket = '\0';

1537
	ref->addr = addr;
1538 1539

	for (i = 0; i < MAP__NR_TYPES; ++i) {
1540 1541
		struct kmap *kmap = map__kmap(maps[i]);
		kmap->ref_reloc_sym = ref;
1542 1543
	}

1544 1545
	return 0;
}
1546 1547 1548 1549 1550 1551 1552

size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp)
{
	return __dsos__fprintf(&self->host_machine.kernel_dsos, fp) +
	       __dsos__fprintf(&self->host_machine.user_dsos, fp) +
	       machines__fprintf_dsos(&self->machines, fp);
}
1553 1554

size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp,
1555
					  bool (skip)(struct dso *dso, int parm), int parm)
1556
{
1557 1558
	size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, skip, parm);
	return ret + machines__fprintf_dsos_buildid(&self->machines, fp, skip, parm);
1559
}
1560 1561 1562 1563 1564 1565 1566 1567 1568

size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
{
	struct perf_evsel *pos;
	size_t ret = fprintf(fp, "Aggregated stats:\n");

	ret += hists__fprintf_nr_events(&session->hists, fp);

	list_for_each_entry(pos, &session->evlist->entries, node) {
1569
		ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
1570 1571 1572 1573 1574
		ret += hists__fprintf_nr_events(&pos->hists, fp);
	}

	return ret;
}
1575

1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596
size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
{
	/*
	 * FIXME: Here we have to actually print all the machines in this
	 * session, not just the host...
	 */
	return machine__fprintf(&session->host_machine, fp);
}

void perf_session__remove_thread(struct perf_session *session,
				 struct thread *th)
{
	/*
	 * FIXME: This one makes no sense, we need to remove the thread from
	 * the machine it belongs to, perf_session can have many machines, so
	 * doing it always on ->host_machine is wrong.  Fix when auditing all
	 * the 'perf kvm' code.
	 */
	machine__remove_thread(&session->host_machine, th);
}

1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608
struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
					      unsigned int type)
{
	struct perf_evsel *pos;

	list_for_each_entry(pos, &session->evlist->entries, node) {
		if (pos->attr.type == type)
			return pos;
	}
	return NULL;
}

1609 1610 1611
void perf_evsel__print_ip(struct perf_evsel *evsel, union perf_event *event,
			  struct perf_sample *sample, struct machine *machine,
			  int print_sym, int print_dso, int print_symoffset)
1612 1613 1614 1615
{
	struct addr_location al;
	struct callchain_cursor_node *node;

1616
	if (perf_event__preprocess_sample(event, machine, &al, sample,
1617 1618 1619 1620 1621 1622 1623 1624
					  NULL) < 0) {
		error("problem processing %d event, skipping it.\n",
			event->header.type);
		return;
	}

	if (symbol_conf.use_callchain && sample->callchain) {

1625 1626 1627

		if (machine__resolve_callchain(machine, evsel, al.thread,
					       sample, NULL) != 0) {
1628 1629 1630 1631
			if (verbose)
				error("Failed to resolve callchain. Skipping\n");
			return;
		}
1632
		callchain_cursor_commit(&callchain_cursor);
1633 1634

		while (1) {
1635
			node = callchain_cursor_current(&callchain_cursor);
1636 1637 1638
			if (!node)
				break;

1639 1640
			printf("\t%16" PRIx64, node->ip);
			if (print_sym) {
1641 1642
				printf(" ");
				symbol__fprintf_symname(node->sym, stdout);
1643 1644
			}
			if (print_dso) {
1645
				printf(" (");
1646
				map__fprintf_dsoname(node->map, stdout);
1647
				printf(")");
1648 1649
			}
			printf("\n");
1650

1651
			callchain_cursor_advance(&callchain_cursor);
1652 1653 1654
		}

	} else {
1655
		printf("%16" PRIx64, sample->ip);
1656
		if (print_sym) {
1657
			printf(" ");
1658 1659 1660 1661 1662
			if (print_symoffset)
				symbol__fprintf_symname_offs(al.sym, &al,
							     stdout);
			else
				symbol__fprintf_symname(al.sym, stdout);
1663 1664 1665
		}

		if (print_dso) {
1666 1667 1668
			printf(" (");
			map__fprintf_dsoname(al.map, stdout);
			printf(")");
1669
		}
1670 1671
	}
}
1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693

int perf_session__cpu_bitmap(struct perf_session *session,
			     const char *cpu_list, unsigned long *cpu_bitmap)
{
	int i;
	struct cpu_map *map;

	for (i = 0; i < PERF_TYPE_MAX; ++i) {
		struct perf_evsel *evsel;

		evsel = perf_session__find_first_evtype(session, i);
		if (!evsel)
			continue;

		if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) {
			pr_err("File does not contain CPU events. "
			       "Remove -c option to proceed.\n");
			return -1;
		}
	}

	map = cpu_map__new(cpu_list);
1694 1695 1696 1697
	if (map == NULL) {
		pr_err("Invalid cpu_list\n");
		return -1;
	}
1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712

	for (i = 0; i < map->nr; i++) {
		int cpu = map->map[i];

		if (cpu >= MAX_NR_CPUS) {
			pr_err("Requested CPU %d too large. "
			       "Consider raising MAX_NR_CPUS\n", cpu);
			return -1;
		}

		set_bit(cpu, cpu_bitmap);
	}

	return 0;
}
1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731

void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
				bool full)
{
	struct stat st;
	int ret;

	if (session == NULL || fp == NULL)
		return;

	ret = fstat(session->fd, &st);
	if (ret == -1)
		return;

	fprintf(fp, "# ========\n");
	fprintf(fp, "# captured on: %s", ctime(&st.st_ctime));
	perf_header__fprintf_info(session, fp, full);
	fprintf(fp, "# ========\n#\n");
}
1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786


int __perf_session__set_tracepoints_handlers(struct perf_session *session,
					     const struct perf_evsel_str_handler *assocs,
					     size_t nr_assocs)
{
	struct perf_evlist *evlist = session->evlist;
	struct event_format *format;
	struct perf_evsel *evsel;
	char *tracepoint, *name;
	size_t i;
	int err;

	for (i = 0; i < nr_assocs; i++) {
		err = -ENOMEM;
		tracepoint = strdup(assocs[i].name);
		if (tracepoint == NULL)
			goto out;

		err = -ENOENT;
		name = strchr(tracepoint, ':');
		if (name == NULL)
			goto out_free;

		*name++ = '\0';
		format = pevent_find_event_by_name(session->pevent,
						   tracepoint, name);
		if (format == NULL) {
			/*
			 * Adding a handler for an event not in the session,
			 * just ignore it.
			 */
			goto next;
		}

		evsel = perf_evlist__find_tracepoint_by_id(evlist, format->id);
		if (evsel == NULL)
			goto next;

		err = -EEXIST;
		if (evsel->handler.func != NULL)
			goto out_free;
		evsel->handler.func = assocs[i].handler;
next:
		free(tracepoint);
	}

	err = 0;
out:
	return err;

out_free:
	free(tracepoint);
	goto out;
}