session.c 44.9 KB
Newer Older
1
#include <linux/kernel.h>
2
#include <traceevent/event-parse.h>
3

4
#include <byteswap.h>
5 6
#include <unistd.h>
#include <sys/types.h>
7
#include <sys/mman.h>
8

9 10
#include "evlist.h"
#include "evsel.h"
11
#include "session.h"
12
#include "tool.h"
13
#include "sort.h"
14
#include "util.h"
15
#include "cpumap.h"
16
#include "perf_regs.h"
17
#include "asm/bug.h"
18

19
static int perf_session__open(struct perf_session *session)
20
{
21
	struct perf_data_file *file = session->file;
22

23
	if (perf_session__read_header(session) < 0) {
24
		pr_err("incompatible file format (rerun with -v to learn more)");
25
		return -1;
26 27
	}

28 29 30
	if (perf_data_file__is_pipe(file))
		return 0;

31
	if (!perf_evlist__valid_sample_type(session->evlist)) {
32
		pr_err("non matching sample_type");
33
		return -1;
34 35
	}

36
	if (!perf_evlist__valid_sample_id_all(session->evlist)) {
37
		pr_err("non matching sample_id_all");
38
		return -1;
39 40
	}

41
	if (!perf_evlist__valid_read_format(session->evlist)) {
42
		pr_err("non matching read_format");
43
		return -1;
44 45
	}

46 47 48
	return 0;
}

49
void perf_session__set_id_hdr_size(struct perf_session *session)
50
{
51 52 53
	u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);

	machines__set_id_hdr_size(&session->machines, id_hdr_size);
54 55
}

56
int perf_session__create_kernel_maps(struct perf_session *session)
57
{
58
	int ret = machine__create_kernel_maps(&session->machines.host);
59 60

	if (ret >= 0)
61
		ret = machines__create_guest_kernel_maps(&session->machines);
62 63 64
	return ret;
}

65
static void perf_session__destroy_kernel_maps(struct perf_session *session)
66
{
67
	machines__destroy_kernel_maps(&session->machines);
68 69
}

70 71
struct perf_session *perf_session__new(struct perf_data_file *file,
				       bool repipe, struct perf_tool *tool)
72
{
73
	struct perf_session *session = zalloc(sizeof(*session));
74

75
	if (!session)
76 77
		goto out;

78
	session->repipe = repipe;
79 80
	INIT_LIST_HEAD(&session->ordered_events.events);
	INIT_LIST_HEAD(&session->ordered_events.cache);
81
	INIT_LIST_HEAD(&session->ordered_events.to_free);
82 83
	session->ordered_events.max_alloc_size = (u64) -1;
	session->ordered_events.cur_alloc_size = 0;
84
	machines__init(&session->machines);
85

86 87
	if (file) {
		if (perf_data_file__open(file))
88
			goto out_delete;
89

90
		session->file = file;
91 92

		if (perf_data_file__is_read(file)) {
93
			if (perf_session__open(session) < 0)
94 95
				goto out_close;

96
			perf_session__set_id_hdr_size(session);
97 98 99 100
		}
	}

	if (!file || perf_data_file__is_write(file)) {
101 102
		/*
		 * In O_RDONLY mode this will be performed when reading the
103
		 * kernel MMAP event, in perf_event__process_mmap().
104
		 */
105
		if (perf_session__create_kernel_maps(session) < 0)
106 107
			goto out_delete;
	}
108

109
	if (tool && tool->ordering_requires_timestamps &&
110
	    tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) {
111
		dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
112
		tool->ordered_events = false;
113 114
	}

115
	return session;
116 117 118 119

 out_close:
	perf_data_file__close(file);
 out_delete:
120
	perf_session__delete(session);
121
 out:
122
	return NULL;
123 124
}

125 126
static void perf_session__delete_dead_threads(struct perf_session *session)
{
127
	machine__delete_dead_threads(&session->machines.host);
128 129 130 131
}

static void perf_session__delete_threads(struct perf_session *session)
{
132
	machine__delete_threads(&session->machines.host);
133 134
}

135 136
static void perf_session_env__delete(struct perf_session_env *env)
{
137 138 139 140 141 142 143 144 145 146 147 148
	zfree(&env->hostname);
	zfree(&env->os_release);
	zfree(&env->version);
	zfree(&env->arch);
	zfree(&env->cpu_desc);
	zfree(&env->cpuid);

	zfree(&env->cmdline);
	zfree(&env->sibling_cores);
	zfree(&env->sibling_threads);
	zfree(&env->numa_nodes);
	zfree(&env->pmu_mappings);
149 150
}

151
void perf_session__delete(struct perf_session *session)
152
{
153 154 155 156 157 158 159 160
	perf_session__destroy_kernel_maps(session);
	perf_session__delete_dead_threads(session);
	perf_session__delete_threads(session);
	perf_session_env__delete(&session->header.env);
	machines__exit(&session->machines);
	if (session->file)
		perf_data_file__close(session->file);
	free(session);
161
}
162

163 164 165
static int process_event_synth_tracing_data_stub(struct perf_tool *tool
						 __maybe_unused,
						 union perf_event *event
166 167 168
						 __maybe_unused,
						 struct perf_session *session
						__maybe_unused)
169 170 171 172 173
{
	dump_printf(": unhandled!\n");
	return 0;
}

174 175
static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
					 union perf_event *event __maybe_unused,
176 177
					 struct perf_evlist **pevlist
					 __maybe_unused)
178 179 180 181 182
{
	dump_printf(": unhandled!\n");
	return 0;
}

183 184 185 186 187
static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
				     union perf_event *event __maybe_unused,
				     struct perf_sample *sample __maybe_unused,
				     struct perf_evsel *evsel __maybe_unused,
				     struct machine *machine __maybe_unused)
188 189 190 191 192
{
	dump_printf(": unhandled!\n");
	return 0;
}

193 194 195 196
static int process_event_stub(struct perf_tool *tool __maybe_unused,
			      union perf_event *event __maybe_unused,
			      struct perf_sample *sample __maybe_unused,
			      struct machine *machine __maybe_unused)
197 198 199 200 201
{
	dump_printf(": unhandled!\n");
	return 0;
}

202 203 204 205
static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
				       union perf_event *event __maybe_unused,
				       struct perf_session *perf_session
				       __maybe_unused)
206 207 208 209 210
{
	dump_printf(": unhandled!\n");
	return 0;
}

211
static int process_finished_round(struct perf_tool *tool,
212 213
				  union perf_event *event,
				  struct perf_session *session);
214

215
void perf_tool__fill_defaults(struct perf_tool *tool)
216
{
217 218 219 220
	if (tool->sample == NULL)
		tool->sample = process_event_sample_stub;
	if (tool->mmap == NULL)
		tool->mmap = process_event_stub;
221 222
	if (tool->mmap2 == NULL)
		tool->mmap2 = process_event_stub;
223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243
	if (tool->comm == NULL)
		tool->comm = process_event_stub;
	if (tool->fork == NULL)
		tool->fork = process_event_stub;
	if (tool->exit == NULL)
		tool->exit = process_event_stub;
	if (tool->lost == NULL)
		tool->lost = perf_event__process_lost;
	if (tool->read == NULL)
		tool->read = process_event_sample_stub;
	if (tool->throttle == NULL)
		tool->throttle = process_event_stub;
	if (tool->unthrottle == NULL)
		tool->unthrottle = process_event_stub;
	if (tool->attr == NULL)
		tool->attr = process_event_synth_attr_stub;
	if (tool->tracing_data == NULL)
		tool->tracing_data = process_event_synth_tracing_data_stub;
	if (tool->build_id == NULL)
		tool->build_id = process_finished_round_stub;
	if (tool->finished_round == NULL) {
244
		if (tool->ordered_events)
245
			tool->finished_round = process_finished_round;
246
		else
247
			tool->finished_round = process_finished_round_stub;
248
	}
249
}
250
 
251 252 253 254 255 256 257 258 259 260
static void swap_sample_id_all(union perf_event *event, void *data)
{
	void *end = (void *) event + event->header.size;
	int size = end - data;

	BUG_ON(size % sizeof(u64));
	mem_bswap_64(data, size);
}

static void perf_event__all64_swap(union perf_event *event,
261
				   bool sample_id_all __maybe_unused)
262
{
263 264
	struct perf_event_header *hdr = &event->header;
	mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
265 266
}

267
static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
268
{
269 270
	event->comm.pid = bswap_32(event->comm.pid);
	event->comm.tid = bswap_32(event->comm.tid);
271 272 273 274

	if (sample_id_all) {
		void *data = &event->comm.comm;

275
		data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
276 277
		swap_sample_id_all(event, data);
	}
278 279
}

280 281
static void perf_event__mmap_swap(union perf_event *event,
				  bool sample_id_all)
282
{
283 284 285 286 287
	event->mmap.pid	  = bswap_32(event->mmap.pid);
	event->mmap.tid	  = bswap_32(event->mmap.tid);
	event->mmap.start = bswap_64(event->mmap.start);
	event->mmap.len	  = bswap_64(event->mmap.len);
	event->mmap.pgoff = bswap_64(event->mmap.pgoff);
288 289 290 291

	if (sample_id_all) {
		void *data = &event->mmap.filename;

292
		data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
293 294
		swap_sample_id_all(event, data);
	}
295 296
}

297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
static void perf_event__mmap2_swap(union perf_event *event,
				  bool sample_id_all)
{
	event->mmap2.pid   = bswap_32(event->mmap2.pid);
	event->mmap2.tid   = bswap_32(event->mmap2.tid);
	event->mmap2.start = bswap_64(event->mmap2.start);
	event->mmap2.len   = bswap_64(event->mmap2.len);
	event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
	event->mmap2.maj   = bswap_32(event->mmap2.maj);
	event->mmap2.min   = bswap_32(event->mmap2.min);
	event->mmap2.ino   = bswap_64(event->mmap2.ino);

	if (sample_id_all) {
		void *data = &event->mmap2.filename;

		data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
		swap_sample_id_all(event, data);
	}
}
316
static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
317
{
318 319 320 321 322
	event->fork.pid	 = bswap_32(event->fork.pid);
	event->fork.tid	 = bswap_32(event->fork.tid);
	event->fork.ppid = bswap_32(event->fork.ppid);
	event->fork.ptid = bswap_32(event->fork.ptid);
	event->fork.time = bswap_64(event->fork.time);
323 324 325

	if (sample_id_all)
		swap_sample_id_all(event, &event->fork + 1);
326 327
}

328
static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
329
{
330 331 332 333 334 335
	event->read.pid		 = bswap_32(event->read.pid);
	event->read.tid		 = bswap_32(event->read.tid);
	event->read.value	 = bswap_64(event->read.value);
	event->read.time_enabled = bswap_64(event->read.time_enabled);
	event->read.time_running = bswap_64(event->read.time_running);
	event->read.id		 = bswap_64(event->read.id);
336 337 338

	if (sample_id_all)
		swap_sample_id_all(event, &event->read + 1);
339 340
}

341 342 343 344 345 346 347 348 349 350 351
static void perf_event__throttle_swap(union perf_event *event,
				      bool sample_id_all)
{
	event->throttle.time	  = bswap_64(event->throttle.time);
	event->throttle.id	  = bswap_64(event->throttle.id);
	event->throttle.stream_id = bswap_64(event->throttle.stream_id);

	if (sample_id_all)
		swap_sample_id_all(event, &event->throttle + 1);
}

352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383
static u8 revbyte(u8 b)
{
	int rev = (b >> 4) | ((b & 0xf) << 4);
	rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
	rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
	return (u8) rev;
}

/*
 * XXX this is hack in attempt to carry flags bitfield
 * throught endian village. ABI says:
 *
 * Bit-fields are allocated from right to left (least to most significant)
 * on little-endian implementations and from left to right (most to least
 * significant) on big-endian implementations.
 *
 * The above seems to be byte specific, so we need to reverse each
 * byte of the bitfield. 'Internet' also says this might be implementation
 * specific and we probably need proper fix and carry perf_event_attr
 * bitfield flags in separate data file FEAT_ section. Thought this seems
 * to work for now.
 */
static void swap_bitfield(u8 *p, unsigned len)
{
	unsigned i;

	for (i = 0; i < len; i++) {
		*p = revbyte(*p);
		p++;
	}
}

384 385 386 387 388 389 390 391 392 393 394 395 396
/* exported for swapping attributes in file header */
void perf_event__attr_swap(struct perf_event_attr *attr)
{
	attr->type		= bswap_32(attr->type);
	attr->size		= bswap_32(attr->size);
	attr->config		= bswap_64(attr->config);
	attr->sample_period	= bswap_64(attr->sample_period);
	attr->sample_type	= bswap_64(attr->sample_type);
	attr->read_format	= bswap_64(attr->read_format);
	attr->wakeup_events	= bswap_32(attr->wakeup_events);
	attr->bp_type		= bswap_32(attr->bp_type);
	attr->bp_addr		= bswap_64(attr->bp_addr);
	attr->bp_len		= bswap_64(attr->bp_len);
397 398 399
	attr->branch_sample_type = bswap_64(attr->branch_sample_type);
	attr->sample_regs_user	 = bswap_64(attr->sample_regs_user);
	attr->sample_stack_user  = bswap_32(attr->sample_stack_user);
400 401

	swap_bitfield((u8 *) (&attr->read_format + 1), sizeof(u64));
402 403
}

404
static void perf_event__hdr_attr_swap(union perf_event *event,
405
				      bool sample_id_all __maybe_unused)
406 407 408
{
	size_t size;

409
	perf_event__attr_swap(&event->attr.attr);
410

411 412 413
	size = event->header.size;
	size -= (void *)&event->attr.id - (void *)event;
	mem_bswap_64(event->attr.id, size);
414 415
}

416
static void perf_event__event_type_swap(union perf_event *event,
417
					bool sample_id_all __maybe_unused)
418
{
419 420
	event->event_type.event_type.event_id =
		bswap_64(event->event_type.event_type.event_id);
421 422
}

423
static void perf_event__tracing_data_swap(union perf_event *event,
424
					  bool sample_id_all __maybe_unused)
425
{
426
	event->tracing_data.size = bswap_32(event->tracing_data.size);
427 428
}

429 430
typedef void (*perf_event__swap_op)(union perf_event *event,
				    bool sample_id_all);
431

432 433
static perf_event__swap_op perf_event__swap_ops[] = {
	[PERF_RECORD_MMAP]		  = perf_event__mmap_swap,
434
	[PERF_RECORD_MMAP2]		  = perf_event__mmap2_swap,
435 436 437 438 439
	[PERF_RECORD_COMM]		  = perf_event__comm_swap,
	[PERF_RECORD_FORK]		  = perf_event__task_swap,
	[PERF_RECORD_EXIT]		  = perf_event__task_swap,
	[PERF_RECORD_LOST]		  = perf_event__all64_swap,
	[PERF_RECORD_READ]		  = perf_event__read_swap,
440 441
	[PERF_RECORD_THROTTLE]		  = perf_event__throttle_swap,
	[PERF_RECORD_UNTHROTTLE]	  = perf_event__throttle_swap,
442
	[PERF_RECORD_SAMPLE]		  = perf_event__all64_swap,
443
	[PERF_RECORD_HEADER_ATTR]	  = perf_event__hdr_attr_swap,
444 445 446 447
	[PERF_RECORD_HEADER_EVENT_TYPE]	  = perf_event__event_type_swap,
	[PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
	[PERF_RECORD_HEADER_BUILD_ID]	  = NULL,
	[PERF_RECORD_HEADER_MAX]	  = NULL,
448 449
};

450
struct ordered_event {
451
	u64			timestamp;
452
	u64			file_offset;
453
	union perf_event	*event;
454 455 456
	struct list_head	list;
};

457 458 459
enum oe_flush {
	OE_FLUSH__FINAL,
	OE_FLUSH__ROUND,
460
	OE_FLUSH__HALF,
461 462
};

463 464
static void perf_session_free_sample_buffers(struct perf_session *session)
{
465
	struct ordered_events *oe = &session->ordered_events;
466

467 468
	while (!list_empty(&oe->to_free)) {
		struct ordered_event *event;
469

470 471 472
		event = list_entry(oe->to_free.next, struct ordered_event, list);
		list_del(&event->list);
		free(event);
473 474 475
	}
}

476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524
/* The queue is ordered by time */
static void queue_event(struct ordered_events *oe, struct ordered_event *new)
{
	struct ordered_event *last = oe->last;
	u64 timestamp = new->timestamp;
	struct list_head *p;

	++oe->nr_events;
	oe->last = new;

	if (!last) {
		list_add(&new->list, &oe->events);
		oe->max_timestamp = timestamp;
		return;
	}

	/*
	 * last event might point to some random place in the list as it's
	 * the last queued event. We expect that the new event is close to
	 * this.
	 */
	if (last->timestamp <= timestamp) {
		while (last->timestamp <= timestamp) {
			p = last->list.next;
			if (p == &oe->events) {
				list_add_tail(&new->list, &oe->events);
				oe->max_timestamp = timestamp;
				return;
			}
			last = list_entry(p, struct ordered_event, list);
		}
		list_add_tail(&new->list, &last->list);
	} else {
		while (last->timestamp > timestamp) {
			p = last->list.prev;
			if (p == &oe->events) {
				list_add(&new->list, &oe->events);
				return;
			}
			last = list_entry(p, struct ordered_event, list);
		}
		list_add(&new->list, &last->list);
	}
}

#define MAX_SAMPLE_BUFFER	(64 * 1024 / sizeof(struct ordered_event))
static struct ordered_event *alloc_event(struct ordered_events *oe)
{
	struct list_head *cache = &oe->cache;
525
	struct ordered_event *new = NULL;
526 527 528 529 530 531 532 533

	if (!list_empty(cache)) {
		new = list_entry(cache->next, struct ordered_event, list);
		list_del(&new->list);
	} else if (oe->buffer) {
		new = oe->buffer + oe->buffer_idx;
		if (++oe->buffer_idx == MAX_SAMPLE_BUFFER)
			oe->buffer = NULL;
534 535 536 537
	} else if (oe->cur_alloc_size < oe->max_alloc_size) {
		size_t size = MAX_SAMPLE_BUFFER * sizeof(*new);

		oe->buffer = malloc(size);
538 539
		if (!oe->buffer)
			return NULL;
540 541

		oe->cur_alloc_size += size;
542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573
		list_add(&oe->buffer->list, &oe->to_free);

		/* First entry is abused to maintain the to_free list. */
		oe->buffer_idx = 2;
		new = oe->buffer + 1;
	}

	return new;
}

static struct ordered_event *
ordered_events__new(struct ordered_events *oe, u64 timestamp)
{
	struct ordered_event *new;

	new = alloc_event(oe);
	if (new) {
		new->timestamp = timestamp;
		queue_event(oe, new);
	}

	return new;
}

static void
ordered_events__delete(struct ordered_events *oe, struct ordered_event *event)
{
	list_del(&event->list);
	list_add(&event->list, &oe->cache);
	oe->nr_events--;
}

574
static int perf_session_deliver_event(struct perf_session *session,
575
				      union perf_event *event,
576
				      struct perf_sample *sample,
577
				      struct perf_tool *tool,
578
				      u64 file_offset);
579

580 581
static int __ordered_events__flush(struct perf_session *s,
				   struct perf_tool *tool)
582
{
583
	struct ordered_events *oe = &s->ordered_events;
584
	struct list_head *head = &oe->events;
585
	struct ordered_event *tmp, *iter;
586
	struct perf_sample sample;
587
	u64 limit = oe->next_flush;
588
	u64 last_ts = oe->last ? oe->last->timestamp : 0ULL;
589
	bool show_progress = limit == ULLONG_MAX;
590
	struct ui_progress prog;
591
	int ret;
592

593
	if (!tool->ordered_events || !limit)
594
		return 0;
595

596
	if (show_progress)
597
		ui_progress__init(&prog, oe->nr_events, "Processing time ordered events...");
598

599
	list_for_each_entry_safe(iter, tmp, head, list) {
600 601 602
		if (session_done())
			return 0;

603
		if (iter->timestamp > limit)
604
			break;
605

606
		ret = perf_evlist__parse_sample(s->evlist, iter->event, &sample);
607 608
		if (ret)
			pr_err("Can't parse sample, err = %d\n", ret);
609 610 611 612 613 614
		else {
			ret = perf_session_deliver_event(s, iter->event, &sample, tool,
							 iter->file_offset);
			if (ret)
				return ret;
		}
615

616
		ordered_events__delete(oe, iter);
617
		oe->last_flush = iter->timestamp;
618 619 620

		if (show_progress)
			ui_progress__update(&prog, 1);
621
	}
622

623 624 625 626
	if (list_empty(head))
		oe->last = NULL;
	else if (last_ts <= limit)
		oe->last = list_entry(head->prev, struct ordered_event, list);
627

628
	return 0;
629 630
}

631 632 633 634 635 636 637 638 639 640 641
static int ordered_events__flush(struct perf_session *s, struct perf_tool *tool,
				 enum oe_flush how)
{
	struct ordered_events *oe = &s->ordered_events;
	int err;

	switch (how) {
	case OE_FLUSH__FINAL:
		oe->next_flush = ULLONG_MAX;
		break;

642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658
	case OE_FLUSH__HALF:
	{
		struct ordered_event *first, *last;
		struct list_head *head = &oe->events;

		first = list_entry(head->next, struct ordered_event, list);
		last = oe->last;

		/* Warn if we are called before any event got allocated. */
		if (WARN_ONCE(!last || list_empty(head), "empty queue"))
			return 0;

		oe->next_flush  = first->timestamp;
		oe->next_flush += (last->timestamp - first->timestamp) / 2;
		break;
	}

659 660 661 662 663 664 665 666 667 668 669 670 671 672 673
	case OE_FLUSH__ROUND:
	default:
		break;
	};

	err = __ordered_events__flush(s, tool);

	if (!err) {
		if (how == OE_FLUSH__ROUND)
			oe->next_flush = oe->max_timestamp;
	}

	return err;
}

674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712
/*
 * When perf record finishes a pass on every buffers, it records this pseudo
 * event.
 * We record the max timestamp t found in the pass n.
 * Assuming these timestamps are monotonic across cpus, we know that if
 * a buffer still has events with timestamps below t, they will be all
 * available and then read in the pass n + 1.
 * Hence when we start to read the pass n + 2, we can safely flush every
 * events with timestamps below t.
 *
 *    ============ PASS n =================
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          1          |         2
 *          2          |         3
 *          -          |         4  <--- max recorded
 *
 *    ============ PASS n + 1 ==============
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          3          |         5
 *          4          |         6
 *          5          |         7 <---- max recorded
 *
 *      Flush every events below timestamp 4
 *
 *    ============ PASS n + 2 ==============
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          6          |         8
 *          7          |         9
 *          -          |         10
 *
 *      Flush every events below timestamp 7
 *      etc...
 */
713
static int process_finished_round(struct perf_tool *tool,
714
				  union perf_event *event __maybe_unused,
715
				  struct perf_session *session)
716
{
717
	return ordered_events__flush(session, tool, OE_FLUSH__ROUND);
718 719
}

720
int perf_session_queue_event(struct perf_session *s, union perf_event *event,
721 722
			     struct perf_tool *tool, struct perf_sample *sample,
			     u64 file_offset)
723
{
724
	struct ordered_events *oe = &s->ordered_events;
725
	u64 timestamp = sample->time;
726
	struct ordered_event *new;
727

728
	if (!timestamp || timestamp == ~0ULL)
729 730
		return -ETIME;

731
	if (timestamp < s->ordered_events.last_flush) {
732 733 734 735
		printf("Warning: Timestamp below last timeslice flush\n");
		return -EINVAL;
	}

736
	new = ordered_events__new(oe, timestamp);
737 738 739 740 741
	if (!new) {
		ordered_events__flush(s, tool, OE_FLUSH__HALF);
		new = ordered_events__new(oe, timestamp);
	}

742 743
	if (!new)
		return -ENOMEM;
744

745
	new->file_offset = file_offset;
746
	new->event = event;
747 748
	return 0;
}
749

750
static void callchain__printf(struct perf_sample *sample)
751 752
{
	unsigned int i;
753

754
	printf("... chain: nr:%" PRIu64 "\n", sample->callchain->nr);
755 756

	for (i = 0; i < sample->callchain->nr; i++)
757 758
		printf("..... %2d: %016" PRIx64 "\n",
		       i, sample->callchain->ips[i]);
759 760
}

761 762 763 764 765 766 767 768 769 770 771 772
static void branch_stack__printf(struct perf_sample *sample)
{
	uint64_t i;

	printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr);

	for (i = 0; i < sample->branch_stack->nr; i++)
		printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 "\n",
			i, sample->branch_stack->entries[i].from,
			sample->branch_stack->entries[i].to);
}

773 774 775 776 777 778 779 780 781 782 783 784
static void regs_dump__printf(u64 mask, u64 *regs)
{
	unsigned rid, i = 0;

	for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
		u64 val = regs[i++];

		printf(".... %-5s 0x%" PRIx64 "\n",
		       perf_reg_name(rid), val);
	}
}

785
static void regs_user__printf(struct perf_sample *sample)
786 787 788 789
{
	struct regs_dump *user_regs = &sample->user_regs;

	if (user_regs->regs) {
790
		u64 mask = user_regs->mask;
791 792 793 794 795 796 797 798 799 800 801
		printf("... user regs: mask 0x%" PRIx64 "\n", mask);
		regs_dump__printf(mask, user_regs->regs);
	}
}

static void stack_user__printf(struct stack_dump *dump)
{
	printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
	       dump->size, dump->offset);
}

802
static void perf_session__print_tstamp(struct perf_session *session,
803
				       union perf_event *event,
804
				       struct perf_sample *sample)
805
{
806
	u64 sample_type = __perf_evlist__combined_sample_type(session->evlist);
807

808
	if (event->header.type != PERF_RECORD_SAMPLE &&
809
	    !perf_evlist__sample_id_all(session->evlist)) {
810 811 812 813
		fputs("-1 -1 ", stdout);
		return;
	}

814
	if ((sample_type & PERF_SAMPLE_CPU))
815 816
		printf("%u ", sample->cpu);

817
	if (sample_type & PERF_SAMPLE_TIME)
818
		printf("%" PRIu64 " ", sample->time);
819 820
}

821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850
static void sample_read__printf(struct perf_sample *sample, u64 read_format)
{
	printf("... sample_read:\n");

	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
		printf("...... time enabled %016" PRIx64 "\n",
		       sample->read.time_enabled);

	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
		printf("...... time running %016" PRIx64 "\n",
		       sample->read.time_running);

	if (read_format & PERF_FORMAT_GROUP) {
		u64 i;

		printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);

		for (i = 0; i < sample->read.group.nr; i++) {
			struct sample_read_value *value;

			value = &sample->read.group.values[i];
			printf("..... id %016" PRIx64
			       ", value %016" PRIx64 "\n",
			       value->id, value->value);
		}
	} else
		printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
			sample->read.one.id, sample->read.one.value);
}

851
static void dump_event(struct perf_session *session, union perf_event *event,
852
		       u64 file_offset, struct perf_sample *sample)
853 854 855 856
{
	if (!dump_trace)
		return;

857 858
	printf("\n%#" PRIx64 " [%#x]: event: %d\n",
	       file_offset, event->header.size, event->header.type);
859 860 861 862 863 864

	trace_event(event);

	if (sample)
		perf_session__print_tstamp(session, event, sample);

865
	printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
866
	       event->header.size, perf_event__name(event->header.type));
867 868
}

869
static void dump_sample(struct perf_evsel *evsel, union perf_event *event,
870
			struct perf_sample *sample)
871
{
872 873
	u64 sample_type;

874 875 876
	if (!dump_trace)
		return;

877
	printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
878
	       event->header.misc, sample->pid, sample->tid, sample->ip,
879
	       sample->period, sample->addr);
880

881
	sample_type = evsel->attr.sample_type;
882 883

	if (sample_type & PERF_SAMPLE_CALLCHAIN)
884
		callchain__printf(sample);
885

886
	if (sample_type & PERF_SAMPLE_BRANCH_STACK)
887
		branch_stack__printf(sample);
888 889

	if (sample_type & PERF_SAMPLE_REGS_USER)
890
		regs_user__printf(sample);
891 892 893

	if (sample_type & PERF_SAMPLE_STACK_USER)
		stack_user__printf(&sample->user_stack);
894 895 896

	if (sample_type & PERF_SAMPLE_WEIGHT)
		printf("... weight: %" PRIu64 "\n", sample->weight);
897 898 899

	if (sample_type & PERF_SAMPLE_DATA_SRC)
		printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
900

901 902 903
	if (sample_type & PERF_SAMPLE_TRANSACTION)
		printf("... transaction: %" PRIx64 "\n", sample->transaction);

904 905
	if (sample_type & PERF_SAMPLE_READ)
		sample_read__printf(sample, evsel->attr.read_format);
906 907
}

908 909
static struct machine *
	perf_session__find_machine_for_cpumode(struct perf_session *session,
910 911
					       union perf_event *event,
					       struct perf_sample *sample)
912 913
{
	const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
914
	struct machine *machine;
915

916 917 918
	if (perf_guest &&
	    ((cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
	     (cpumode == PERF_RECORD_MISC_GUEST_USER))) {
919 920
		u32 pid;

921 922
		if (event->header.type == PERF_RECORD_MMAP
		    || event->header.type == PERF_RECORD_MMAP2)
923 924
			pid = event->mmap.pid;
		else
925
			pid = sample->pid;
926

927 928 929 930 931
		machine = perf_session__find_machine(session, pid);
		if (!machine)
			machine = perf_session__findnew_machine(session,
						DEFAULT_GUEST_KERNEL_ID);
		return machine;
932
	}
933

934
	return &session->machines.host;
935 936
}

937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005
static int deliver_sample_value(struct perf_session *session,
				struct perf_tool *tool,
				union perf_event *event,
				struct perf_sample *sample,
				struct sample_read_value *v,
				struct machine *machine)
{
	struct perf_sample_id *sid;

	sid = perf_evlist__id2sid(session->evlist, v->id);
	if (sid) {
		sample->id     = v->id;
		sample->period = v->value - sid->period;
		sid->period    = v->value;
	}

	if (!sid || sid->evsel == NULL) {
		++session->stats.nr_unknown_id;
		return 0;
	}

	return tool->sample(tool, event, sample, sid->evsel, machine);
}

static int deliver_sample_group(struct perf_session *session,
				struct perf_tool *tool,
				union  perf_event *event,
				struct perf_sample *sample,
				struct machine *machine)
{
	int ret = -EINVAL;
	u64 i;

	for (i = 0; i < sample->read.group.nr; i++) {
		ret = deliver_sample_value(session, tool, event, sample,
					   &sample->read.group.values[i],
					   machine);
		if (ret)
			break;
	}

	return ret;
}

static int
perf_session__deliver_sample(struct perf_session *session,
			     struct perf_tool *tool,
			     union  perf_event *event,
			     struct perf_sample *sample,
			     struct perf_evsel *evsel,
			     struct machine *machine)
{
	/* We know evsel != NULL. */
	u64 sample_type = evsel->attr.sample_type;
	u64 read_format = evsel->attr.read_format;

	/* Standard sample delievery. */
	if (!(sample_type & PERF_SAMPLE_READ))
		return tool->sample(tool, event, sample, evsel, machine);

	/* For PERF_SAMPLE_READ we have either single or group mode. */
	if (read_format & PERF_FORMAT_GROUP)
		return deliver_sample_group(session, tool, event, sample,
					    machine);
	else
		return deliver_sample_value(session, tool, event, sample,
					    &sample->read.one, machine);
}

1006
static int perf_session_deliver_event(struct perf_session *session,
1007
				      union perf_event *event,
1008
				      struct perf_sample *sample,
1009
				      struct perf_tool *tool,
1010
				      u64 file_offset)
1011
{
1012
	struct perf_evsel *evsel;
1013
	struct machine *machine;
1014

1015 1016
	dump_event(session, event, file_offset, sample);

1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034
	evsel = perf_evlist__id2evsel(session->evlist, sample->id);
	if (evsel != NULL && event->header.type != PERF_RECORD_SAMPLE) {
		/*
		 * XXX We're leaving PERF_RECORD_SAMPLE unnacounted here
		 * because the tools right now may apply filters, discarding
		 * some of the samples. For consistency, in the future we
		 * should have something like nr_filtered_samples and remove
		 * the sample->period from total_sample_period, etc, KISS for
		 * now tho.
		 *
		 * Also testing against NULL allows us to handle files without
		 * attr.sample_id_all and/or without PERF_SAMPLE_ID. In the
		 * future probably it'll be a good idea to restrict event
		 * processing via perf_session to files with both set.
		 */
		hists__inc_nr_events(&evsel->hists, event->header.type);
	}

1035 1036
	machine = perf_session__find_machine_for_cpumode(session, event,
							 sample);
1037

1038 1039
	switch (event->header.type) {
	case PERF_RECORD_SAMPLE:
1040
		dump_sample(evsel, event, sample);
1041
		if (evsel == NULL) {
1042
			++session->stats.nr_unknown_id;
1043
			return 0;
1044
		}
1045
		if (machine == NULL) {
1046
			++session->stats.nr_unprocessable_samples;
1047
			return 0;
1048
		}
1049 1050
		return perf_session__deliver_sample(session, tool, event,
						    sample, evsel, machine);
1051
	case PERF_RECORD_MMAP:
1052
		return tool->mmap(tool, event, sample, machine);
1053 1054
	case PERF_RECORD_MMAP2:
		return tool->mmap2(tool, event, sample, machine);
1055
	case PERF_RECORD_COMM:
1056
		return tool->comm(tool, event, sample, machine);
1057
	case PERF_RECORD_FORK:
1058
		return tool->fork(tool, event, sample, machine);
1059
	case PERF_RECORD_EXIT:
1060
		return tool->exit(tool, event, sample, machine);
1061
	case PERF_RECORD_LOST:
1062
		if (tool->lost == perf_event__process_lost)
1063
			session->stats.total_lost += event->lost.lost;
1064
		return tool->lost(tool, event, sample, machine);
1065
	case PERF_RECORD_READ:
1066
		return tool->read(tool, event, sample, evsel, machine);
1067
	case PERF_RECORD_THROTTLE:
1068
		return tool->throttle(tool, event, sample, machine);
1069
	case PERF_RECORD_UNTHROTTLE:
1070
		return tool->unthrottle(tool, event, sample, machine);
1071
	default:
1072
		++session->stats.nr_unknown_events;
1073 1074 1075 1076
		return -1;
	}
}

1077 1078 1079 1080
static s64 perf_session__process_user_event(struct perf_session *session,
					    union perf_event *event,
					    struct perf_tool *tool,
					    u64 file_offset)
1081
{
1082
	int fd = perf_data_file__fd(session->file);
1083 1084
	int err;

1085
	dump_event(session, event, file_offset, NULL);
1086

1087
	/* These events are processed right away */
1088
	switch (event->header.type) {
1089
	case PERF_RECORD_HEADER_ATTR:
1090
		err = tool->attr(tool, event, &session->evlist);
1091
		if (err == 0)
1092
			perf_session__set_id_hdr_size(session);
1093
		return err;
1094 1095 1096 1097 1098 1099
	case PERF_RECORD_HEADER_EVENT_TYPE:
		/*
		 * Depreceated, but we need to handle it for sake
		 * of old data files create in pipe mode.
		 */
		return 0;
1100 1101
	case PERF_RECORD_HEADER_TRACING_DATA:
		/* setup for reading amidst mmap */
1102
		lseek(fd, file_offset, SEEK_SET);
1103
		return tool->tracing_data(tool, event, session);
1104
	case PERF_RECORD_HEADER_BUILD_ID:
1105
		return tool->build_id(tool, event, session);
1106
	case PERF_RECORD_FINISHED_ROUND:
1107
		return tool->finished_round(tool, event, session);
1108
	default:
1109
		return -EINVAL;
1110
	}
1111 1112
}

1113 1114 1115 1116 1117 1118 1119 1120 1121
static void event_swap(union perf_event *event, bool sample_id_all)
{
	perf_event__swap_op swap;

	swap = perf_event__swap_ops[event->header.type];
	if (swap)
		swap(event, sample_id_all);
}

1122
static s64 perf_session__process_event(struct perf_session *session,
1123 1124 1125
				       union perf_event *event,
				       struct perf_tool *tool,
				       u64 file_offset)
1126
{
1127
	struct perf_sample sample;
1128 1129
	int ret;

1130
	if (session->header.needs_swap)
1131
		event_swap(event, perf_evlist__sample_id_all(session->evlist));
1132 1133 1134 1135

	if (event->header.type >= PERF_RECORD_HEADER_MAX)
		return -EINVAL;

1136
	events_stats__inc(&session->stats, event->header.type);
1137 1138

	if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1139
		return perf_session__process_user_event(session, event, tool, file_offset);
1140

1141 1142 1143
	/*
	 * For all kernel events we get the sample data
	 */
1144
	ret = perf_evlist__parse_sample(session->evlist, event, &sample);
1145 1146
	if (ret)
		return ret;
1147

1148
	if (tool->ordered_events) {
1149
		ret = perf_session_queue_event(session, event, tool, &sample,
1150
					       file_offset);
1151 1152 1153 1154
		if (ret != -ETIME)
			return ret;
	}

1155
	return perf_session_deliver_event(session, event, &sample, tool,
1156
					  file_offset);
1157 1158
}

1159
void perf_event_header__bswap(struct perf_event_header *hdr)
1160
{
1161 1162 1163
	hdr->type = bswap_32(hdr->type);
	hdr->misc = bswap_16(hdr->misc);
	hdr->size = bswap_16(hdr->size);
1164 1165
}

1166 1167
struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
{
1168
	return machine__findnew_thread(&session->machines.host, -1, pid);
1169 1170
}

1171
static struct thread *perf_session__register_idle_thread(struct perf_session *session)
1172
{
1173
	struct thread *thread;
1174

1175
	thread = machine__findnew_thread(&session->machines.host, 0, 0);
1176
	if (thread == NULL || thread__set_comm(thread, "swapper", 0)) {
1177 1178 1179 1180 1181 1182 1183
		pr_err("problem inserting idle task.\n");
		thread = NULL;
	}

	return thread;
}

1184
static void perf_session__warn_about_errors(const struct perf_session *session,
1185
					    const struct perf_tool *tool)
1186
{
1187
	if (tool->lost == perf_event__process_lost &&
1188
	    session->stats.nr_events[PERF_RECORD_LOST] != 0) {
1189 1190
		ui__warning("Processed %d events and lost %d chunks!\n\n"
			    "Check IO/CPU overload!\n\n",
1191 1192
			    session->stats.nr_events[0],
			    session->stats.nr_events[PERF_RECORD_LOST]);
1193 1194
	}

1195
	if (session->stats.nr_unknown_events != 0) {
1196 1197 1198 1199 1200
		ui__warning("Found %u unknown events!\n\n"
			    "Is this an older tool processing a perf.data "
			    "file generated by a more recent tool?\n\n"
			    "If that is not the case, consider "
			    "reporting to linux-kernel@vger.kernel.org.\n\n",
1201
			    session->stats.nr_unknown_events);
1202 1203
	}

1204
	if (session->stats.nr_unknown_id != 0) {
1205
		ui__warning("%u samples with id not present in the header\n",
1206
			    session->stats.nr_unknown_id);
1207 1208
	}

1209
 	if (session->stats.nr_invalid_chains != 0) {
1210 1211 1212
 		ui__warning("Found invalid callchains!\n\n"
 			    "%u out of %u events were discarded for this reason.\n\n"
 			    "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1213 1214
 			    session->stats.nr_invalid_chains,
 			    session->stats.nr_events[PERF_RECORD_SAMPLE]);
1215
 	}
1216

1217
	if (session->stats.nr_unprocessable_samples != 0) {
1218 1219
		ui__warning("%u unprocessable samples recorded.\n"
			    "Do you have a KVM guest running and not using 'perf kvm'?\n",
1220
			    session->stats.nr_unprocessable_samples);
1221
	}
1222 1223
}

1224 1225
volatile int session_done;

1226
static int __perf_session__process_pipe_events(struct perf_session *session,
1227
					       struct perf_tool *tool)
1228
{
1229
	int fd = perf_data_file__fd(session->file);
1230 1231 1232
	union perf_event *event;
	uint32_t size, cur_size = 0;
	void *buf = NULL;
1233
	s64 skip = 0;
1234
	u64 head;
1235
	ssize_t err;
1236 1237
	void *p;

1238
	perf_tool__fill_defaults(tool);
1239 1240

	head = 0;
1241 1242 1243 1244 1245
	cur_size = sizeof(union perf_event);

	buf = malloc(cur_size);
	if (!buf)
		return -errno;
1246
more:
1247
	event = buf;
1248
	err = readn(fd, event, sizeof(struct perf_event_header));
1249 1250 1251 1252 1253 1254 1255 1256
	if (err <= 0) {
		if (err == 0)
			goto done;

		pr_err("failed to read event header\n");
		goto out_err;
	}

1257
	if (session->header.needs_swap)
1258
		perf_event_header__bswap(&event->header);
1259

1260
	size = event->header.size;
1261 1262 1263 1264
	if (size < sizeof(struct perf_event_header)) {
		pr_err("bad event header size\n");
		goto out_err;
	}
1265

1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276
	if (size > cur_size) {
		void *new = realloc(buf, size);
		if (!new) {
			pr_err("failed to allocate memory to read event\n");
			goto out_err;
		}
		buf = new;
		cur_size = size;
		event = buf;
	}
	p = event;
1277 1278
	p += sizeof(struct perf_event_header);

1279
	if (size - sizeof(struct perf_event_header)) {
1280
		err = readn(fd, p, size - sizeof(struct perf_event_header));
1281 1282 1283 1284 1285
		if (err <= 0) {
			if (err == 0) {
				pr_err("unexpected end of event stream\n");
				goto done;
			}
1286

1287 1288 1289
			pr_err("failed to read event data\n");
			goto out_err;
		}
1290 1291
	}

1292
	if ((skip = perf_session__process_event(session, event, tool, head)) < 0) {
1293
		pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1294
		       head, event->header.size, event->header.type);
1295 1296
		err = -EINVAL;
		goto out_err;
1297 1298 1299 1300 1301 1302 1303 1304 1305 1306
	}

	head += size;

	if (skip > 0)
		head += skip;

	if (!session_done())
		goto more;
done:
1307
	/* do the final flush for ordered samples */
1308
	err = ordered_events__flush(session, tool, OE_FLUSH__FINAL);
1309
out_err:
1310
	free(buf);
1311 1312
	perf_session__warn_about_errors(session, tool);
	perf_session_free_sample_buffers(session);
1313 1314 1315
	return err;
}

1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333
static union perf_event *
fetch_mmaped_event(struct perf_session *session,
		   u64 head, size_t mmap_size, char *buf)
{
	union perf_event *event;

	/*
	 * Ensure we have enough space remaining to read
	 * the size of the event in the headers.
	 */
	if (head + sizeof(event->header) > mmap_size)
		return NULL;

	event = (union perf_event *)(buf + head);

	if (session->header.needs_swap)
		perf_event_header__bswap(&event->header);

1334 1335 1336 1337
	if (head + event->header.size > mmap_size) {
		/* We're not fetching the event so swap back again */
		if (session->header.needs_swap)
			perf_event_header__bswap(&event->header);
1338
		return NULL;
1339
	}
1340 1341 1342 1343

	return event;
}

1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355
/*
 * On 64bit we can mmap the data file in one go. No need for tiny mmap
 * slices. On 32bit we use 32MB.
 */
#if BITS_PER_LONG == 64
#define MMAP_SIZE ULLONG_MAX
#define NUM_MMAPS 1
#else
#define MMAP_SIZE (32 * 1024 * 1024ULL)
#define NUM_MMAPS 128
#endif

1356
int __perf_session__process_events(struct perf_session *session,
1357
				   u64 data_offset, u64 data_size,
1358
				   u64 file_size, struct perf_tool *tool)
1359
{
1360
	int fd = perf_data_file__fd(session->file);
1361
	u64 head, page_offset, file_offset, file_pos, size;
1362
	int err, mmap_prot, mmap_flags, map_idx = 0;
1363
	size_t	mmap_size;
1364
	char *buf, *mmaps[NUM_MMAPS];
1365
	union perf_event *event;
1366
	struct ui_progress prog;
1367
	s64 skip;
1368

1369
	perf_tool__fill_defaults(tool);
1370

1371 1372 1373
	page_offset = page_size * (data_offset / page_size);
	file_offset = page_offset;
	head = data_offset - page_offset;
1374

1375
	if (data_size && (data_offset + data_size < file_size))
1376 1377
		file_size = data_offset + data_size;

1378
	ui_progress__init(&prog, file_size, "Processing events...");
1379

1380
	mmap_size = MMAP_SIZE;
1381
	if (mmap_size > file_size) {
1382
		mmap_size = file_size;
1383 1384
		session->one_mmap = true;
	}
1385

1386 1387
	memset(mmaps, 0, sizeof(mmaps));

1388 1389 1390
	mmap_prot  = PROT_READ;
	mmap_flags = MAP_SHARED;

1391
	if (session->header.needs_swap) {
1392 1393 1394
		mmap_prot  |= PROT_WRITE;
		mmap_flags = MAP_PRIVATE;
	}
1395
remap:
1396
	buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, fd,
1397
		   file_offset);
1398 1399 1400 1401 1402
	if (buf == MAP_FAILED) {
		pr_err("failed to mmap file\n");
		err = -errno;
		goto out_err;
	}
1403 1404
	mmaps[map_idx] = buf;
	map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
1405
	file_pos = file_offset + head;
1406 1407 1408 1409
	if (session->one_mmap) {
		session->one_mmap_addr = buf;
		session->one_mmap_offset = file_offset;
	}
1410 1411

more:
1412 1413
	event = fetch_mmaped_event(session, head, mmap_size, buf);
	if (!event) {
1414 1415 1416 1417
		if (mmaps[map_idx]) {
			munmap(mmaps[map_idx], mmap_size);
			mmaps[map_idx] = NULL;
		}
1418

1419 1420 1421
		page_offset = page_size * (head / page_size);
		file_offset += page_offset;
		head -= page_offset;
1422 1423 1424 1425 1426
		goto remap;
	}

	size = event->header.size;

1427
	if (size < sizeof(struct perf_event_header) ||
1428 1429
	    (skip = perf_session__process_event(session, event, tool, file_pos))
									< 0) {
1430 1431 1432 1433 1434
		pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
		       file_offset + head, event->header.size,
		       event->header.type);
		err = -EINVAL;
		goto out_err;
1435 1436
	}

1437 1438 1439
	if (skip)
		size += skip;

1440
	head += size;
1441
	file_pos += size;
1442

1443
	ui_progress__update(&prog, size);
1444

1445
	if (session_done())
1446
		goto out;
1447

1448
	if (file_pos < file_size)
1449
		goto more;
1450

1451
out:
1452
	/* do the final flush for ordered samples */
1453
	err = ordered_events__flush(session, tool, OE_FLUSH__FINAL);
1454
out_err:
N
Namhyung Kim 已提交
1455
	ui_progress__finish();
1456
	perf_session__warn_about_errors(session, tool);
1457
	perf_session_free_sample_buffers(session);
1458
	session->one_mmap = false;
1459 1460
	return err;
}
1461

1462
int perf_session__process_events(struct perf_session *session,
1463
				 struct perf_tool *tool)
1464
{
1465
	u64 size = perf_data_file__size(session->file);
1466 1467
	int err;

1468
	if (perf_session__register_idle_thread(session) == NULL)
1469 1470
		return -ENOMEM;

1471 1472 1473 1474
	if (!perf_data_file__is_pipe(session->file))
		err = __perf_session__process_events(session,
						     session->header.data_offset,
						     session->header.data_size,
1475
						     size, tool);
1476
	else
1477
		err = __perf_session__process_pipe_events(session, tool);
1478

1479 1480 1481
	return err;
}

1482
bool perf_session__has_traces(struct perf_session *session, const char *msg)
1483
{
1484 1485
	struct perf_evsel *evsel;

1486
	evlist__for_each(session->evlist, evsel) {
1487 1488
		if (evsel->attr.type == PERF_TYPE_TRACEPOINT)
			return true;
1489 1490
	}

1491 1492
	pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
	return false;
1493
}
1494

1495 1496
int maps__set_kallsyms_ref_reloc_sym(struct map **maps,
				     const char *symbol_name, u64 addr)
1497 1498
{
	char *bracket;
1499
	enum map_type i;
1500 1501 1502 1503 1504
	struct ref_reloc_sym *ref;

	ref = zalloc(sizeof(struct ref_reloc_sym));
	if (ref == NULL)
		return -ENOMEM;
1505

1506 1507 1508
	ref->name = strdup(symbol_name);
	if (ref->name == NULL) {
		free(ref);
1509
		return -ENOMEM;
1510
	}
1511

1512
	bracket = strchr(ref->name, ']');
1513 1514 1515
	if (bracket)
		*bracket = '\0';

1516
	ref->addr = addr;
1517 1518

	for (i = 0; i < MAP__NR_TYPES; ++i) {
1519 1520
		struct kmap *kmap = map__kmap(maps[i]);
		kmap->ref_reloc_sym = ref;
1521 1522
	}

1523 1524
	return 0;
}
1525

1526
size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
1527
{
1528
	return machines__fprintf_dsos(&session->machines, fp);
1529
}
1530

1531
size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
1532
					  bool (skip)(struct dso *dso, int parm), int parm)
1533
{
1534
	return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
1535
}
1536 1537 1538 1539 1540 1541

size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
{
	struct perf_evsel *pos;
	size_t ret = fprintf(fp, "Aggregated stats:\n");

1542
	ret += events_stats__fprintf(&session->stats, fp);
1543

1544
	evlist__for_each(session->evlist, pos) {
1545
		ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
1546
		ret += events_stats__fprintf(&pos->hists.stats, fp);
1547 1548 1549 1550
	}

	return ret;
}
1551

1552 1553 1554 1555 1556 1557
size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
{
	/*
	 * FIXME: Here we have to actually print all the machines in this
	 * session, not just the host...
	 */
1558
	return machine__fprintf(&session->machines.host, fp);
1559 1560
}

1561 1562 1563 1564 1565
struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
					      unsigned int type)
{
	struct perf_evsel *pos;

1566
	evlist__for_each(session->evlist, pos) {
1567 1568 1569 1570 1571 1572
		if (pos->attr.type == type)
			return pos;
	}
	return NULL;
}

1573
void perf_evsel__print_ip(struct perf_evsel *evsel, struct perf_sample *sample,
1574
			  struct addr_location *al,
1575
			  unsigned int print_opts, unsigned int stack_depth)
1576 1577
{
	struct callchain_cursor_node *node;
1578 1579 1580 1581
	int print_ip = print_opts & PRINT_IP_OPT_IP;
	int print_sym = print_opts & PRINT_IP_OPT_SYM;
	int print_dso = print_opts & PRINT_IP_OPT_DSO;
	int print_symoffset = print_opts & PRINT_IP_OPT_SYMOFFSET;
1582
	int print_oneline = print_opts & PRINT_IP_OPT_ONELINE;
1583
	int print_srcline = print_opts & PRINT_IP_OPT_SRCLINE;
1584
	char s = print_oneline ? ' ' : '\t';
1585 1586

	if (symbol_conf.use_callchain && sample->callchain) {
1587
		struct addr_location node_al;
1588

1589
		if (machine__resolve_callchain(al->machine, evsel, al->thread,
1590 1591
					       sample, NULL, NULL,
					       PERF_MAX_STACK_DEPTH) != 0) {
1592 1593 1594 1595
			if (verbose)
				error("Failed to resolve callchain. Skipping\n");
			return;
		}
1596
		callchain_cursor_commit(&callchain_cursor);
1597

1598 1599 1600
		if (print_symoffset)
			node_al = *al;

1601
		while (stack_depth) {
1602 1603
			u64 addr = 0;

1604
			node = callchain_cursor_current(&callchain_cursor);
1605 1606 1607
			if (!node)
				break;

1608 1609 1610
			if (node->sym && node->sym->ignore)
				goto next;

1611
			if (print_ip)
1612
				printf("%c%16" PRIx64, s, node->ip);
1613

1614 1615 1616
			if (node->map)
				addr = node->map->map_ip(node->map, node->ip);

1617
			if (print_sym) {
1618
				printf(" ");
1619
				if (print_symoffset) {
1620
					node_al.addr = addr;
1621 1622
					node_al.map  = node->map;
					symbol__fprintf_symname_offs(node->sym, &node_al, stdout);
1623 1624
				} else
					symbol__fprintf_symname(node->sym, stdout);
1625
			}
1626

1627
			if (print_dso) {
1628
				printf(" (");
1629
				map__fprintf_dsoname(node->map, stdout);
1630
				printf(")");
1631
			}
1632

1633 1634 1635 1636
			if (print_srcline)
				map__fprintf_srcline(node->map, addr, "\n  ",
						     stdout);

1637 1638
			if (!print_oneline)
				printf("\n");
1639

1640
			stack_depth--;
1641 1642
next:
			callchain_cursor_advance(&callchain_cursor);
1643 1644 1645
		}

	} else {
1646
		if (al->sym && al->sym->ignore)
1647 1648
			return;

1649 1650 1651
		if (print_ip)
			printf("%16" PRIx64, sample->ip);

1652
		if (print_sym) {
1653
			printf(" ");
1654
			if (print_symoffset)
1655
				symbol__fprintf_symname_offs(al->sym, al,
1656 1657
							     stdout);
			else
1658
				symbol__fprintf_symname(al->sym, stdout);
1659 1660 1661
		}

		if (print_dso) {
1662
			printf(" (");
1663
			map__fprintf_dsoname(al->map, stdout);
1664
			printf(")");
1665
		}
1666 1667 1668

		if (print_srcline)
			map__fprintf_srcline(al->map, al->addr, "\n  ", stdout);
1669 1670
	}
}
1671 1672 1673 1674

int perf_session__cpu_bitmap(struct perf_session *session,
			     const char *cpu_list, unsigned long *cpu_bitmap)
{
1675
	int i, err = -1;
1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692
	struct cpu_map *map;

	for (i = 0; i < PERF_TYPE_MAX; ++i) {
		struct perf_evsel *evsel;

		evsel = perf_session__find_first_evtype(session, i);
		if (!evsel)
			continue;

		if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) {
			pr_err("File does not contain CPU events. "
			       "Remove -c option to proceed.\n");
			return -1;
		}
	}

	map = cpu_map__new(cpu_list);
1693 1694 1695 1696
	if (map == NULL) {
		pr_err("Invalid cpu_list\n");
		return -1;
	}
1697 1698 1699 1700 1701 1702 1703

	for (i = 0; i < map->nr; i++) {
		int cpu = map->map[i];

		if (cpu >= MAX_NR_CPUS) {
			pr_err("Requested CPU %d too large. "
			       "Consider raising MAX_NR_CPUS\n", cpu);
1704
			goto out_delete_map;
1705 1706 1707 1708 1709
		}

		set_bit(cpu, cpu_bitmap);
	}

1710 1711 1712 1713 1714
	err = 0;

out_delete_map:
	cpu_map__delete(map);
	return err;
1715
}
1716 1717 1718 1719 1720

void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
				bool full)
{
	struct stat st;
1721
	int fd, ret;
1722 1723 1724 1725

	if (session == NULL || fp == NULL)
		return;

1726 1727
	fd = perf_data_file__fd(session->file);

1728
	ret = fstat(fd, &st);
1729 1730 1731 1732 1733 1734 1735 1736
	if (ret == -1)
		return;

	fprintf(fp, "# ========\n");
	fprintf(fp, "# captured on: %s", ctime(&st.st_ctime));
	perf_header__fprintf_info(session, fp, full);
	fprintf(fp, "# ========\n#\n");
}
1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747


int __perf_session__set_tracepoints_handlers(struct perf_session *session,
					     const struct perf_evsel_str_handler *assocs,
					     size_t nr_assocs)
{
	struct perf_evsel *evsel;
	size_t i;
	int err;

	for (i = 0; i < nr_assocs; i++) {
1748 1749 1750 1751 1752
		/*
		 * Adding a handler for an event not in the session,
		 * just ignore it.
		 */
		evsel = perf_evlist__find_tracepoint_by_name(session->evlist, assocs[i].name);
1753
		if (evsel == NULL)
1754
			continue;
1755 1756

		err = -EEXIST;
1757
		if (evsel->handler != NULL)
1758
			goto out;
1759
		evsel->handler = assocs[i].handler;
1760 1761 1762 1763 1764 1765
	}

	err = 0;
out:
	return err;
}