session.c 33.9 KB
Newer Older
1 2
#define _FILE_OFFSET_BITS 64

3 4
#include <linux/kernel.h>

5
#include <byteswap.h>
6 7
#include <unistd.h>
#include <sys/types.h>
8
#include <sys/mman.h>
9

10 11
#include "evlist.h"
#include "evsel.h"
12
#include "session.h"
13
#include "sort.h"
14
#include "util.h"
15
#include "cpumap.h"
16 17 18 19 20

static int perf_session__open(struct perf_session *self, bool force)
{
	struct stat input_stat;

21 22 23 24
	if (!strcmp(self->filename, "-")) {
		self->fd_pipe = true;
		self->fd = STDIN_FILENO;

25
		if (perf_session__read_header(self, self->fd) < 0)
26 27 28 29 30
			pr_err("incompatible file format");

		return 0;
	}

31
	self->fd = open(self->filename, O_RDONLY);
32
	if (self->fd < 0) {
33 34 35 36
		int err = errno;

		pr_err("failed to open %s: %s", self->filename, strerror(err));
		if (err == ENOENT && !strcmp(self->filename, "perf.data"))
37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56
			pr_err("  (try 'perf record' first)");
		pr_err("\n");
		return -errno;
	}

	if (fstat(self->fd, &input_stat) < 0)
		goto out_close;

	if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
		pr_err("file %s not owned by current user or root\n",
		       self->filename);
		goto out_close;
	}

	if (!input_stat.st_size) {
		pr_info("zero-sized file (%s), nothing to do!\n",
			self->filename);
		goto out_close;
	}

57
	if (perf_session__read_header(self, self->fd) < 0) {
58 59 60 61
		pr_err("incompatible file format");
		goto out_close;
	}

62 63 64 65 66 67 68 69 70 71
	if (!perf_evlist__valid_sample_type(self->evlist)) {
		pr_err("non matching sample_type");
		goto out_close;
	}

	if (!perf_evlist__valid_sample_id_all(self->evlist)) {
		pr_err("non matching sample_id_all");
		goto out_close;
	}

72 73 74 75 76 77 78 79 80
	self->size = input_stat.st_size;
	return 0;

out_close:
	close(self->fd);
	self->fd = -1;
	return -1;
}

81
static void perf_session__id_header_size(struct perf_session *session)
82
{
83
       struct perf_sample *data;
84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109
       u64 sample_type = session->sample_type;
       u16 size = 0;

	if (!session->sample_id_all)
		goto out;

       if (sample_type & PERF_SAMPLE_TID)
               size += sizeof(data->tid) * 2;

       if (sample_type & PERF_SAMPLE_TIME)
               size += sizeof(data->time);

       if (sample_type & PERF_SAMPLE_ID)
               size += sizeof(data->id);

       if (sample_type & PERF_SAMPLE_STREAM_ID)
               size += sizeof(data->stream_id);

       if (sample_type & PERF_SAMPLE_CPU)
               size += sizeof(data->cpu) * 2;
out:
       session->id_hdr_size = size;
}

void perf_session__update_sample_type(struct perf_session *self)
{
110
	self->sample_type = perf_evlist__sample_type(self->evlist);
111
	self->sample_size = __perf_evsel__sample_size(self->sample_type);
112
	self->sample_id_all = perf_evlist__sample_id_all(self->evlist);
113 114 115
	perf_session__id_header_size(self);
}

116 117
int perf_session__create_kernel_maps(struct perf_session *self)
{
118
	int ret = machine__create_kernel_maps(&self->host_machine);
119 120

	if (ret >= 0)
121
		ret = machines__create_guest_kernel_maps(&self->machines);
122 123 124
	return ret;
}

125 126 127 128 129 130
static void perf_session__destroy_kernel_maps(struct perf_session *self)
{
	machine__destroy_kernel_maps(&self->host_machine);
	machines__destroy_guest_kernel_maps(&self->machines);
}

131 132 133
struct perf_session *perf_session__new(const char *filename, int mode,
				       bool force, bool repipe,
				       struct perf_event_ops *ops)
134
{
135
	size_t len = filename ? strlen(filename) + 1 : 0;
136 137 138 139 140 141
	struct perf_session *self = zalloc(sizeof(*self) + len);

	if (self == NULL)
		goto out;

	memcpy(self->filename, filename, len);
142
	self->threads = RB_ROOT;
143
	INIT_LIST_HEAD(&self->dead_threads);
144
	self->last_match = NULL;
145 146 147 148 149 150 151 152 153
	/*
	 * On 64bit we can mmap the data file in one go. No need for tiny mmap
	 * slices. On 32bit we use 32MB.
	 */
#if BITS_PER_LONG == 64
	self->mmap_window = ULLONG_MAX;
#else
	self->mmap_window = 32 * 1024 * 1024ULL;
#endif
154
	self->machines = RB_ROOT;
T
Tom Zanussi 已提交
155
	self->repipe = repipe;
156
	INIT_LIST_HEAD(&self->ordered_samples.samples);
157
	INIT_LIST_HEAD(&self->ordered_samples.sample_cache);
158
	INIT_LIST_HEAD(&self->ordered_samples.to_free);
159
	machine__init(&self->host_machine, "", HOST_KERNEL_ID);
160

161 162 163
	if (mode == O_RDONLY) {
		if (perf_session__open(self, force) < 0)
			goto out_delete;
164
		perf_session__update_sample_type(self);
165 166 167
	} else if (mode == O_WRONLY) {
		/*
		 * In O_RDONLY mode this will be performed when reading the
168
		 * kernel MMAP event, in perf_event__process_mmap().
169 170 171 172
		 */
		if (perf_session__create_kernel_maps(self) < 0)
			goto out_delete;
	}
173

174 175 176 177 178 179
	if (ops && ops->ordering_requires_timestamps &&
	    ops->ordered_samples && !self->sample_id_all) {
		dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
		ops->ordered_samples = false;
	}

180 181
out:
	return self;
182 183 184
out_delete:
	perf_session__delete(self);
	return NULL;
185 186
}

187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209
static void perf_session__delete_dead_threads(struct perf_session *self)
{
	struct thread *n, *t;

	list_for_each_entry_safe(t, n, &self->dead_threads, node) {
		list_del(&t->node);
		thread__delete(t);
	}
}

static void perf_session__delete_threads(struct perf_session *self)
{
	struct rb_node *nd = rb_first(&self->threads);

	while (nd) {
		struct thread *t = rb_entry(nd, struct thread, rb_node);

		rb_erase(&t->rb_node, &self->threads);
		nd = rb_next(nd);
		thread__delete(t);
	}
}

210 211
void perf_session__delete(struct perf_session *self)
{
212
	perf_session__destroy_kernel_maps(self);
213 214 215
	perf_session__delete_dead_threads(self);
	perf_session__delete_threads(self);
	machine__exit(&self->host_machine);
216 217 218
	close(self->fd);
	free(self);
}
219

220 221
void perf_session__remove_thread(struct perf_session *self, struct thread *th)
{
222
	self->last_match = NULL;
223 224 225 226 227 228 229 230
	rb_erase(&th->rb_node, &self->threads);
	/*
	 * We may have references to this thread, for instance in some hist_entry
	 * instances, so just move them to a separate list.
	 */
	list_add_tail(&th->node, &self->dead_threads);
}

231 232 233 234 235 236 237 238
static bool symbol__match_parent_regex(struct symbol *sym)
{
	if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
		return 1;

	return 0;
}

239 240 241 242
int perf_session__resolve_callchain(struct perf_session *self,
				    struct thread *thread,
				    struct ip_callchain *chain,
				    struct symbol **parent)
243 244 245
{
	u8 cpumode = PERF_RECORD_MISC_USER;
	unsigned int i;
246
	int err;
247

248
	callchain_cursor_reset(&self->callchain_cursor);
249 250

	for (i = 0; i < chain->nr; i++) {
251
		u64 ip;
252 253
		struct addr_location al;

254 255 256 257 258
		if (callchain_param.order == ORDER_CALLEE)
			ip = chain->ips[i];
		else
			ip = chain->ips[chain->nr - i - 1];

259 260 261 262 263 264 265 266 267 268 269 270 271 272
		if (ip >= PERF_CONTEXT_MAX) {
			switch (ip) {
			case PERF_CONTEXT_HV:
				cpumode = PERF_RECORD_MISC_HYPERVISOR;	break;
			case PERF_CONTEXT_KERNEL:
				cpumode = PERF_RECORD_MISC_KERNEL;	break;
			case PERF_CONTEXT_USER:
				cpumode = PERF_RECORD_MISC_USER;	break;
			default:
				break;
			}
			continue;
		}

273
		al.filtered = false;
274
		thread__find_addr_location(thread, self, cpumode,
275
				MAP__FUNCTION, thread->pid, ip, &al, NULL);
276 277 278 279
		if (al.sym != NULL) {
			if (sort__has_parent && !*parent &&
			    symbol__match_parent_regex(al.sym))
				*parent = al.sym;
280
			if (!symbol_conf.use_callchain)
281 282
				break;
		}
283 284 285 286 287

		err = callchain_cursor_append(&self->callchain_cursor,
					      ip, al.map, al.sym);
		if (err)
			return err;
288 289
	}

290
	return 0;
291
}
292

293
static int process_event_synth_stub(union perf_event *event __used,
294 295 296 297 298 299
				    struct perf_session *session __used)
{
	dump_printf(": unhandled!\n");
	return 0;
}

300 301 302 303 304 305 306 307 308
static int process_event_sample_stub(union perf_event *event __used,
				     struct perf_sample *sample __used,
				     struct perf_evsel *evsel __used,
				     struct perf_session *session __used)
{
	dump_printf(": unhandled!\n");
	return 0;
}

309
static int process_event_stub(union perf_event *event __used,
310
			      struct perf_sample *sample __used,
311 312 313 314 315 316
			      struct perf_session *session __used)
{
	dump_printf(": unhandled!\n");
	return 0;
}

317
static int process_finished_round_stub(union perf_event *event __used,
318 319 320 321 322 323 324
				       struct perf_session *session __used,
				       struct perf_event_ops *ops __used)
{
	dump_printf(": unhandled!\n");
	return 0;
}

325
static int process_finished_round(union perf_event *event,
326 327 328
				  struct perf_session *session,
				  struct perf_event_ops *ops);

329 330
static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
{
331
	if (handler->sample == NULL)
332
		handler->sample = process_event_sample_stub;
333 334 335 336 337 338 339 340 341
	if (handler->mmap == NULL)
		handler->mmap = process_event_stub;
	if (handler->comm == NULL)
		handler->comm = process_event_stub;
	if (handler->fork == NULL)
		handler->fork = process_event_stub;
	if (handler->exit == NULL)
		handler->exit = process_event_stub;
	if (handler->lost == NULL)
342
		handler->lost = perf_event__process_lost;
343 344 345 346 347 348
	if (handler->read == NULL)
		handler->read = process_event_stub;
	if (handler->throttle == NULL)
		handler->throttle = process_event_stub;
	if (handler->unthrottle == NULL)
		handler->unthrottle = process_event_stub;
349
	if (handler->attr == NULL)
350
		handler->attr = process_event_synth_stub;
351
	if (handler->event_type == NULL)
352
		handler->event_type = process_event_synth_stub;
353
	if (handler->tracing_data == NULL)
354
		handler->tracing_data = process_event_synth_stub;
355
	if (handler->build_id == NULL)
356
		handler->build_id = process_event_synth_stub;
357 358 359 360 361 362
	if (handler->finished_round == NULL) {
		if (handler->ordered_samples)
			handler->finished_round = process_finished_round;
		else
			handler->finished_round = process_finished_round_stub;
	}
363 364
}

365 366 367 368 369 370 371 372 373 374 375
void mem_bswap_64(void *src, int byte_size)
{
	u64 *m = src;

	while (byte_size > 0) {
		*m = bswap_64(*m);
		byte_size -= sizeof(u64);
		++m;
	}
}

376
static void perf_event__all64_swap(union perf_event *event)
377
{
378 379
	struct perf_event_header *hdr = &event->header;
	mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
380 381
}

382
static void perf_event__comm_swap(union perf_event *event)
383
{
384 385
	event->comm.pid = bswap_32(event->comm.pid);
	event->comm.tid = bswap_32(event->comm.tid);
386 387
}

388
static void perf_event__mmap_swap(union perf_event *event)
389
{
390 391 392 393 394
	event->mmap.pid	  = bswap_32(event->mmap.pid);
	event->mmap.tid	  = bswap_32(event->mmap.tid);
	event->mmap.start = bswap_64(event->mmap.start);
	event->mmap.len	  = bswap_64(event->mmap.len);
	event->mmap.pgoff = bswap_64(event->mmap.pgoff);
395 396
}

397
static void perf_event__task_swap(union perf_event *event)
398
{
399 400 401 402 403
	event->fork.pid	 = bswap_32(event->fork.pid);
	event->fork.tid	 = bswap_32(event->fork.tid);
	event->fork.ppid = bswap_32(event->fork.ppid);
	event->fork.ptid = bswap_32(event->fork.ptid);
	event->fork.time = bswap_64(event->fork.time);
404 405
}

406
static void perf_event__read_swap(union perf_event *event)
407
{
408 409 410 411 412 413
	event->read.pid		 = bswap_32(event->read.pid);
	event->read.tid		 = bswap_32(event->read.tid);
	event->read.value	 = bswap_64(event->read.value);
	event->read.time_enabled = bswap_64(event->read.time_enabled);
	event->read.time_running = bswap_64(event->read.time_running);
	event->read.id		 = bswap_64(event->read.id);
414 415
}

416
static void perf_event__attr_swap(union perf_event *event)
417 418 419
{
	size_t size;

420 421 422 423 424 425 426 427 428 429
	event->attr.attr.type		= bswap_32(event->attr.attr.type);
	event->attr.attr.size		= bswap_32(event->attr.attr.size);
	event->attr.attr.config		= bswap_64(event->attr.attr.config);
	event->attr.attr.sample_period	= bswap_64(event->attr.attr.sample_period);
	event->attr.attr.sample_type	= bswap_64(event->attr.attr.sample_type);
	event->attr.attr.read_format	= bswap_64(event->attr.attr.read_format);
	event->attr.attr.wakeup_events	= bswap_32(event->attr.attr.wakeup_events);
	event->attr.attr.bp_type	= bswap_32(event->attr.attr.bp_type);
	event->attr.attr.bp_addr	= bswap_64(event->attr.attr.bp_addr);
	event->attr.attr.bp_len		= bswap_64(event->attr.attr.bp_len);
430

431 432 433
	size = event->header.size;
	size -= (void *)&event->attr.id - (void *)event;
	mem_bswap_64(event->attr.id, size);
434 435
}

436
static void perf_event__event_type_swap(union perf_event *event)
437
{
438 439
	event->event_type.event_type.event_id =
		bswap_64(event->event_type.event_type.event_id);
440 441
}

442
static void perf_event__tracing_data_swap(union perf_event *event)
443
{
444
	event->tracing_data.size = bswap_32(event->tracing_data.size);
445 446
}

447
typedef void (*perf_event__swap_op)(union perf_event *event);
448

449 450 451 452 453 454 455 456 457 458 459 460 461
static perf_event__swap_op perf_event__swap_ops[] = {
	[PERF_RECORD_MMAP]		  = perf_event__mmap_swap,
	[PERF_RECORD_COMM]		  = perf_event__comm_swap,
	[PERF_RECORD_FORK]		  = perf_event__task_swap,
	[PERF_RECORD_EXIT]		  = perf_event__task_swap,
	[PERF_RECORD_LOST]		  = perf_event__all64_swap,
	[PERF_RECORD_READ]		  = perf_event__read_swap,
	[PERF_RECORD_SAMPLE]		  = perf_event__all64_swap,
	[PERF_RECORD_HEADER_ATTR]	  = perf_event__attr_swap,
	[PERF_RECORD_HEADER_EVENT_TYPE]	  = perf_event__event_type_swap,
	[PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
	[PERF_RECORD_HEADER_BUILD_ID]	  = NULL,
	[PERF_RECORD_HEADER_MAX]	  = NULL,
462 463
};

464 465
struct sample_queue {
	u64			timestamp;
466
	u64			file_offset;
467
	union perf_event	*event;
468 469 470
	struct list_head	list;
};

471 472 473 474
static void perf_session_free_sample_buffers(struct perf_session *session)
{
	struct ordered_samples *os = &session->ordered_samples;

475
	while (!list_empty(&os->to_free)) {
476 477
		struct sample_queue *sq;

478
		sq = list_entry(os->to_free.next, struct sample_queue, list);
479 480 481 482 483
		list_del(&sq->list);
		free(sq);
	}
}

484
static int perf_session_deliver_event(struct perf_session *session,
485
				      union perf_event *event,
486
				      struct perf_sample *sample,
487 488
				      struct perf_event_ops *ops,
				      u64 file_offset);
489

490 491 492
static void flush_sample_queue(struct perf_session *s,
			       struct perf_event_ops *ops)
{
493 494
	struct ordered_samples *os = &s->ordered_samples;
	struct list_head *head = &os->samples;
495
	struct sample_queue *tmp, *iter;
496
	struct perf_sample sample;
497 498
	u64 limit = os->next_flush;
	u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL;
499
	int ret;
500

501
	if (!ops->ordered_samples || !limit)
502 503 504 505
		return;

	list_for_each_entry_safe(iter, tmp, head, list) {
		if (iter->timestamp > limit)
506
			break;
507

508 509 510 511 512 513
		ret = perf_session__parse_sample(s, iter->event, &sample);
		if (ret)
			pr_err("Can't parse sample, err = %d\n", ret);
		else
			perf_session_deliver_event(s, iter->event, &sample, ops,
						   iter->file_offset);
514

515
		os->last_flush = iter->timestamp;
516
		list_del(&iter->list);
517
		list_add(&iter->list, &os->sample_cache);
518
	}
519 520 521 522 523 524 525

	if (list_empty(head)) {
		os->last_sample = NULL;
	} else if (last_ts <= limit) {
		os->last_sample =
			list_entry(head->prev, struct sample_queue, list);
	}
526 527
}

528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566
/*
 * When perf record finishes a pass on every buffers, it records this pseudo
 * event.
 * We record the max timestamp t found in the pass n.
 * Assuming these timestamps are monotonic across cpus, we know that if
 * a buffer still has events with timestamps below t, they will be all
 * available and then read in the pass n + 1.
 * Hence when we start to read the pass n + 2, we can safely flush every
 * events with timestamps below t.
 *
 *    ============ PASS n =================
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          1          |         2
 *          2          |         3
 *          -          |         4  <--- max recorded
 *
 *    ============ PASS n + 1 ==============
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          3          |         5
 *          4          |         6
 *          5          |         7 <---- max recorded
 *
 *      Flush every events below timestamp 4
 *
 *    ============ PASS n + 2 ==============
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          6          |         8
 *          7          |         9
 *          -          |         10
 *
 *      Flush every events below timestamp 7
 *      etc...
 */
567
static int process_finished_round(union perf_event *event __used,
568 569 570 571 572 573 574 575 576
				  struct perf_session *session,
				  struct perf_event_ops *ops)
{
	flush_sample_queue(session, ops);
	session->ordered_samples.next_flush = session->ordered_samples.max_timestamp;

	return 0;
}

577
/* The queue is ordered by time */
578
static void __queue_event(struct sample_queue *new, struct perf_session *s)
579
{
580 581 582 583
	struct ordered_samples *os = &s->ordered_samples;
	struct sample_queue *sample = os->last_sample;
	u64 timestamp = new->timestamp;
	struct list_head *p;
584

585
	os->last_sample = new;
586

587 588 589
	if (!sample) {
		list_add(&new->list, &os->samples);
		os->max_timestamp = timestamp;
590 591 592 593
		return;
	}

	/*
594 595 596
	 * last_sample might point to some random place in the list as it's
	 * the last queued event. We expect that the new event is close to
	 * this.
597
	 */
598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619
	if (sample->timestamp <= timestamp) {
		while (sample->timestamp <= timestamp) {
			p = sample->list.next;
			if (p == &os->samples) {
				list_add_tail(&new->list, &os->samples);
				os->max_timestamp = timestamp;
				return;
			}
			sample = list_entry(p, struct sample_queue, list);
		}
		list_add_tail(&new->list, &sample->list);
	} else {
		while (sample->timestamp > timestamp) {
			p = sample->list.prev;
			if (p == &os->samples) {
				list_add(&new->list, &os->samples);
				return;
			}
			sample = list_entry(p, struct sample_queue, list);
		}
		list_add(&new->list, &sample->list);
	}
620 621
}

622 623
#define MAX_SAMPLE_BUFFER	(64 * 1024 / sizeof(struct sample_queue))

624
static int perf_session_queue_event(struct perf_session *s, union perf_event *event,
625
				    struct perf_sample *sample, u64 file_offset)
626
{
627 628
	struct ordered_samples *os = &s->ordered_samples;
	struct list_head *sc = &os->sample_cache;
629
	u64 timestamp = sample->time;
630 631
	struct sample_queue *new;

632
	if (!timestamp || timestamp == ~0ULL)
633 634
		return -ETIME;

635 636 637 638 639
	if (timestamp < s->ordered_samples.last_flush) {
		printf("Warning: Timestamp below last timeslice flush\n");
		return -EINVAL;
	}

640 641 642
	if (!list_empty(sc)) {
		new = list_entry(sc->next, struct sample_queue, list);
		list_del(&new->list);
643 644 645 646
	} else if (os->sample_buffer) {
		new = os->sample_buffer + os->sample_buffer_idx;
		if (++os->sample_buffer_idx == MAX_SAMPLE_BUFFER)
			os->sample_buffer = NULL;
647
	} else {
648 649
		os->sample_buffer = malloc(MAX_SAMPLE_BUFFER * sizeof(*new));
		if (!os->sample_buffer)
650
			return -ENOMEM;
651 652 653
		list_add(&os->sample_buffer->list, &os->to_free);
		os->sample_buffer_idx = 2;
		new = os->sample_buffer + 1;
654
	}
655 656

	new->timestamp = timestamp;
657
	new->file_offset = file_offset;
658
	new->event = event;
659

660
	__queue_event(new, s);
661 662 663

	return 0;
}
664

665
static void callchain__printf(struct perf_sample *sample)
666 667
{
	unsigned int i;
668

669
	printf("... chain: nr:%" PRIu64 "\n", sample->callchain->nr);
670 671

	for (i = 0; i < sample->callchain->nr; i++)
672 673
		printf("..... %2d: %016" PRIx64 "\n",
		       i, sample->callchain->ips[i]);
674 675
}

676
static void perf_session__print_tstamp(struct perf_session *session,
677
				       union perf_event *event,
678
				       struct perf_sample *sample)
679 680 681 682 683 684 685 686 687 688 689
{
	if (event->header.type != PERF_RECORD_SAMPLE &&
	    !session->sample_id_all) {
		fputs("-1 -1 ", stdout);
		return;
	}

	if ((session->sample_type & PERF_SAMPLE_CPU))
		printf("%u ", sample->cpu);

	if (session->sample_type & PERF_SAMPLE_TIME)
690
		printf("%" PRIu64 " ", sample->time);
691 692
}

693
static void dump_event(struct perf_session *session, union perf_event *event,
694
		       u64 file_offset, struct perf_sample *sample)
695 696 697 698
{
	if (!dump_trace)
		return;

699 700
	printf("\n%#" PRIx64 " [%#x]: event: %d\n",
	       file_offset, event->header.size, event->header.type);
701 702 703 704 705 706

	trace_event(event);

	if (sample)
		perf_session__print_tstamp(session, event, sample);

707
	printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
708
	       event->header.size, perf_event__name(event->header.type));
709 710
}

711
static void dump_sample(struct perf_session *session, union perf_event *event,
712
			struct perf_sample *sample)
713
{
714 715 716
	if (!dump_trace)
		return;

717
	printf("(IP, %d): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
718
	       event->header.misc, sample->pid, sample->tid, sample->ip,
719
	       sample->period, sample->addr);
720 721

	if (session->sample_type & PERF_SAMPLE_CALLCHAIN)
722
		callchain__printf(sample);
723 724
}

725
static int perf_session_deliver_event(struct perf_session *session,
726
				      union perf_event *event,
727
				      struct perf_sample *sample,
728
				      struct perf_event_ops *ops,
729
				      u64 file_offset)
730
{
731 732
	struct perf_evsel *evsel;

733 734
	dump_event(session, event, file_offset, sample);

735 736
	switch (event->header.type) {
	case PERF_RECORD_SAMPLE:
737
		dump_sample(session, event, sample);
738 739 740 741 742 743
		evsel = perf_evlist__id2evsel(session->evlist, sample->id);
		if (evsel == NULL) {
			++session->hists.stats.nr_unknown_id;
			return -1;
		}
		return ops->sample(event, sample, evsel, session);
744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765
	case PERF_RECORD_MMAP:
		return ops->mmap(event, sample, session);
	case PERF_RECORD_COMM:
		return ops->comm(event, sample, session);
	case PERF_RECORD_FORK:
		return ops->fork(event, sample, session);
	case PERF_RECORD_EXIT:
		return ops->exit(event, sample, session);
	case PERF_RECORD_LOST:
		return ops->lost(event, sample, session);
	case PERF_RECORD_READ:
		return ops->read(event, sample, session);
	case PERF_RECORD_THROTTLE:
		return ops->throttle(event, sample, session);
	case PERF_RECORD_UNTHROTTLE:
		return ops->unthrottle(event, sample, session);
	default:
		++session->hists.stats.nr_unknown_events;
		return -1;
	}
}

766
static int perf_session__preprocess_sample(struct perf_session *session,
767
					   union perf_event *event, struct perf_sample *sample)
768 769 770 771 772 773 774 775 776 777 778 779 780 781
{
	if (event->header.type != PERF_RECORD_SAMPLE ||
	    !(session->sample_type & PERF_SAMPLE_CALLCHAIN))
		return 0;

	if (!ip_callchain__valid(sample->callchain, event)) {
		pr_debug("call-chain problem with event, skipping it.\n");
		++session->hists.stats.nr_invalid_chains;
		session->hists.stats.total_invalid_chains += sample->period;
		return -EINVAL;
	}
	return 0;
}

782
static int perf_session__process_user_event(struct perf_session *session, union perf_event *event,
783
					    struct perf_event_ops *ops, u64 file_offset)
784
{
785
	dump_event(session, event, file_offset, NULL);
786

787
	/* These events are processed right away */
788
	switch (event->header.type) {
789
	case PERF_RECORD_HEADER_ATTR:
790
		return ops->attr(event, session);
791
	case PERF_RECORD_HEADER_EVENT_TYPE:
792
		return ops->event_type(event, session);
793 794
	case PERF_RECORD_HEADER_TRACING_DATA:
		/* setup for reading amidst mmap */
795 796
		lseek(session->fd, file_offset, SEEK_SET);
		return ops->tracing_data(event, session);
797
	case PERF_RECORD_HEADER_BUILD_ID:
798
		return ops->build_id(event, session);
799
	case PERF_RECORD_FINISHED_ROUND:
800
		return ops->finished_round(event, session, ops);
801
	default:
802
		return -EINVAL;
803
	}
804 805 806
}

static int perf_session__process_event(struct perf_session *session,
807
				       union perf_event *event,
808 809 810
				       struct perf_event_ops *ops,
				       u64 file_offset)
{
811
	struct perf_sample sample;
812 813
	int ret;

814 815 816
	if (session->header.needs_swap &&
	    perf_event__swap_ops[event->header.type])
		perf_event__swap_ops[event->header.type](event);
817 818 819 820 821 822 823 824

	if (event->header.type >= PERF_RECORD_HEADER_MAX)
		return -EINVAL;

	hists__inc_nr_events(&session->hists, event->header.type);

	if (event->header.type >= PERF_RECORD_USER_TYPE_START)
		return perf_session__process_user_event(session, event, ops, file_offset);
825

826 827 828
	/*
	 * For all kernel events we get the sample data
	 */
829 830 831
	ret = perf_session__parse_sample(session, event, &sample);
	if (ret)
		return ret;
832 833 834 835 836

	/* Preprocess sample records - precheck callchains */
	if (perf_session__preprocess_sample(session, event, &sample))
		return 0;

837
	if (ops->ordered_samples) {
838 839
		ret = perf_session_queue_event(session, event, &sample,
					       file_offset);
840 841 842 843
		if (ret != -ETIME)
			return ret;
	}

844 845
	return perf_session_deliver_event(session, event, &sample, ops,
					  file_offset);
846 847
}

848 849 850 851 852 853 854
void perf_event_header__bswap(struct perf_event_header *self)
{
	self->type = bswap_32(self->type);
	self->misc = bswap_16(self->misc);
	self->size = bswap_16(self->size);
}

855 856 857 858 859 860 861 862 863 864 865 866
static struct thread *perf_session__register_idle_thread(struct perf_session *self)
{
	struct thread *thread = perf_session__findnew(self, 0);

	if (thread == NULL || thread__set_comm(thread, "swapper")) {
		pr_err("problem inserting idle task.\n");
		thread = NULL;
	}

	return thread;
}

867 868 869
static void perf_session__warn_about_errors(const struct perf_session *session,
					    const struct perf_event_ops *ops)
{
870
	if (ops->lost == perf_event__process_lost &&
871
	    session->hists.stats.total_lost != 0) {
872 873
		ui__warning("Processed %" PRIu64 " events and LOST %" PRIu64
			    "!\n\nCheck IO/CPU overload!\n\n",
874 875 876 877 878 879 880 881 882 883 884 885 886
			    session->hists.stats.total_period,
			    session->hists.stats.total_lost);
	}

	if (session->hists.stats.nr_unknown_events != 0) {
		ui__warning("Found %u unknown events!\n\n"
			    "Is this an older tool processing a perf.data "
			    "file generated by a more recent tool?\n\n"
			    "If that is not the case, consider "
			    "reporting to linux-kernel@vger.kernel.org.\n\n",
			    session->hists.stats.nr_unknown_events);
	}

887 888 889 890 891
	if (session->hists.stats.nr_unknown_id != 0) {
		ui__warning("%u samples with id not present in the header\n",
			    session->hists.stats.nr_unknown_id);
	}

892 893 894 895 896 897 898 899 900
 	if (session->hists.stats.nr_invalid_chains != 0) {
 		ui__warning("Found invalid callchains!\n\n"
 			    "%u out of %u events were discarded for this reason.\n\n"
 			    "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
 			    session->hists.stats.nr_invalid_chains,
 			    session->hists.stats.nr_events[PERF_RECORD_SAMPLE]);
 	}
}

901 902 903 904 905 906
#define session_done()	(*(volatile int *)(&session_done))
volatile int session_done;

static int __perf_session__process_pipe_events(struct perf_session *self,
					       struct perf_event_ops *ops)
{
907
	union perf_event event;
908 909 910 911 912 913 914 915 916 917
	uint32_t size;
	int skip = 0;
	u64 head;
	int err;
	void *p;

	perf_event_ops__fill_defaults(ops);

	head = 0;
more:
918
	err = readn(self->fd, &event, sizeof(struct perf_event_header));
919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936
	if (err <= 0) {
		if (err == 0)
			goto done;

		pr_err("failed to read event header\n");
		goto out_err;
	}

	if (self->header.needs_swap)
		perf_event_header__bswap(&event.header);

	size = event.header.size;
	if (size == 0)
		size = 8;

	p = &event;
	p += sizeof(struct perf_event_header);

937
	if (size - sizeof(struct perf_event_header)) {
938
		err = readn(self->fd, p, size - sizeof(struct perf_event_header));
939 940 941 942 943
		if (err <= 0) {
			if (err == 0) {
				pr_err("unexpected end of event stream\n");
				goto done;
			}
944

945 946 947
			pr_err("failed to read event data\n");
			goto out_err;
		}
948 949 950
	}

	if (size == 0 ||
951
	    (skip = perf_session__process_event(self, &event, ops, head)) < 0) {
952
		dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n",
953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973
			    head, event.header.size, event.header.type);
		/*
		 * assume we lost track of the stream, check alignment, and
		 * increment a single u64 in the hope to catch on again 'soon'.
		 */
		if (unlikely(head & 7))
			head &= ~7ULL;

		size = 8;
	}

	head += size;

	if (skip > 0)
		head += skip;

	if (!session_done())
		goto more;
done:
	err = 0;
out_err:
974
	perf_session__warn_about_errors(self, ops);
975
	perf_session_free_sample_buffers(self);
976 977 978
	return err;
}

979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002
static union perf_event *
fetch_mmaped_event(struct perf_session *session,
		   u64 head, size_t mmap_size, char *buf)
{
	union perf_event *event;

	/*
	 * Ensure we have enough space remaining to read
	 * the size of the event in the headers.
	 */
	if (head + sizeof(event->header) > mmap_size)
		return NULL;

	event = (union perf_event *)(buf + head);

	if (session->header.needs_swap)
		perf_event_header__bswap(&event->header);

	if (head + event->header.size > mmap_size)
		return NULL;

	return event;
}

1003
int __perf_session__process_events(struct perf_session *session,
1004 1005
				   u64 data_offset, u64 data_size,
				   u64 file_size, struct perf_event_ops *ops)
1006
{
1007
	u64 head, page_offset, file_offset, file_pos, progress_next;
1008
	int err, mmap_prot, mmap_flags, map_idx = 0;
1009
	struct ui_progress *progress;
1010
	size_t	page_size, mmap_size;
1011
	char *buf, *mmaps[8];
1012
	union perf_event *event;
1013
	uint32_t size;
1014

1015 1016
	perf_event_ops__fill_defaults(ops);

1017
	page_size = sysconf(_SC_PAGESIZE);
1018

1019 1020 1021
	page_offset = page_size * (data_offset / page_size);
	file_offset = page_offset;
	head = data_offset - page_offset;
1022

1023 1024 1025
	if (data_offset + data_size < file_size)
		file_size = data_offset + data_size;

1026 1027 1028 1029 1030 1031 1032 1033 1034
	progress_next = file_size / 16;
	progress = ui_progress__new("Processing events...", file_size);
	if (progress == NULL)
		return -1;

	mmap_size = session->mmap_window;
	if (mmap_size > file_size)
		mmap_size = file_size;

1035 1036
	memset(mmaps, 0, sizeof(mmaps));

1037 1038 1039
	mmap_prot  = PROT_READ;
	mmap_flags = MAP_SHARED;

1040
	if (session->header.needs_swap) {
1041 1042 1043
		mmap_prot  |= PROT_WRITE;
		mmap_flags = MAP_PRIVATE;
	}
1044
remap:
1045 1046
	buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, session->fd,
		   file_offset);
1047 1048 1049 1050 1051
	if (buf == MAP_FAILED) {
		pr_err("failed to mmap file\n");
		err = -errno;
		goto out_err;
	}
1052 1053
	mmaps[map_idx] = buf;
	map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
1054
	file_pos = file_offset + head;
1055 1056

more:
1057 1058
	event = fetch_mmaped_event(session, head, mmap_size, buf);
	if (!event) {
1059 1060 1061 1062
		if (mmaps[map_idx]) {
			munmap(mmaps[map_idx], mmap_size);
			mmaps[map_idx] = NULL;
		}
1063

1064 1065 1066
		page_offset = page_size * (head / page_size);
		file_offset += page_offset;
		head -= page_offset;
1067 1068 1069 1070 1071
		goto remap;
	}

	size = event->header.size;

1072 1073
	if (size == 0 ||
	    perf_session__process_event(session, event, ops, file_pos) < 0) {
1074
		dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n",
1075
			    file_offset + head, event->header.size,
1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087
			    event->header.type);
		/*
		 * assume we lost track of the stream, check alignment, and
		 * increment a single u64 in the hope to catch on again 'soon'.
		 */
		if (unlikely(head & 7))
			head &= ~7ULL;

		size = 8;
	}

	head += size;
1088
	file_pos += size;
1089

1090 1091 1092 1093 1094
	if (file_pos >= progress_next) {
		progress_next += file_size / 16;
		ui_progress__update(progress, file_pos);
	}

1095
	if (file_pos < file_size)
1096
		goto more;
1097

1098
	err = 0;
1099
	/* do the final flush for ordered samples */
1100 1101
	session->ordered_samples.next_flush = ULLONG_MAX;
	flush_sample_queue(session, ops);
1102
out_err:
1103
	ui_progress__delete(progress);
1104
	perf_session__warn_about_errors(session, ops);
1105
	perf_session_free_sample_buffers(session);
1106 1107
	return err;
}
1108

1109 1110 1111 1112 1113 1114 1115 1116
int perf_session__process_events(struct perf_session *self,
				 struct perf_event_ops *ops)
{
	int err;

	if (perf_session__register_idle_thread(self) == NULL)
		return -ENOMEM;

1117 1118 1119 1120 1121 1122 1123
	if (!self->fd_pipe)
		err = __perf_session__process_events(self,
						     self->header.data_offset,
						     self->header.data_size,
						     self->size, ops);
	else
		err = __perf_session__process_pipe_events(self, ops);
1124

1125 1126 1127
	return err;
}

1128
bool perf_session__has_traces(struct perf_session *self, const char *msg)
1129 1130
{
	if (!(self->sample_type & PERF_SAMPLE_RAW)) {
1131 1132
		pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
		return false;
1133 1134
	}

1135
	return true;
1136
}
1137

1138
int perf_session__set_kallsyms_ref_reloc_sym(struct map **maps,
1139 1140 1141 1142
					     const char *symbol_name,
					     u64 addr)
{
	char *bracket;
1143
	enum map_type i;
1144 1145 1146 1147 1148
	struct ref_reloc_sym *ref;

	ref = zalloc(sizeof(struct ref_reloc_sym));
	if (ref == NULL)
		return -ENOMEM;
1149

1150 1151 1152
	ref->name = strdup(symbol_name);
	if (ref->name == NULL) {
		free(ref);
1153
		return -ENOMEM;
1154
	}
1155

1156
	bracket = strchr(ref->name, ']');
1157 1158 1159
	if (bracket)
		*bracket = '\0';

1160
	ref->addr = addr;
1161 1162

	for (i = 0; i < MAP__NR_TYPES; ++i) {
1163 1164
		struct kmap *kmap = map__kmap(maps[i]);
		kmap->ref_reloc_sym = ref;
1165 1166
	}

1167 1168
	return 0;
}
1169 1170 1171 1172 1173 1174 1175

size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp)
{
	return __dsos__fprintf(&self->host_machine.kernel_dsos, fp) +
	       __dsos__fprintf(&self->host_machine.user_dsos, fp) +
	       machines__fprintf_dsos(&self->machines, fp);
}
1176 1177 1178 1179 1180 1181 1182

size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp,
					  bool with_hits)
{
	size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits);
	return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits);
}
1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197

size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
{
	struct perf_evsel *pos;
	size_t ret = fprintf(fp, "Aggregated stats:\n");

	ret += hists__fprintf_nr_events(&session->hists, fp);

	list_for_each_entry(pos, &session->evlist->entries, node) {
		ret += fprintf(fp, "%s stats:\n", event_name(pos));
		ret += hists__fprintf_nr_events(&pos->hists, fp);
	}

	return ret;
}
1198

1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210
struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
					      unsigned int type)
{
	struct perf_evsel *pos;

	list_for_each_entry(pos, &session->evlist->entries, node) {
		if (pos->attr.type == type)
			return pos;
	}
	return NULL;
}

1211 1212 1213
void perf_session__print_ip(union perf_event *event,
			    struct perf_sample *sample,
			    struct perf_session *session,
1214
			    int print_sym, int print_dso)
1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242
{
	struct addr_location al;
	const char *symname, *dsoname;
	struct callchain_cursor *cursor = &session->callchain_cursor;
	struct callchain_cursor_node *node;

	if (perf_event__preprocess_sample(event, session, &al, sample,
					  NULL) < 0) {
		error("problem processing %d event, skipping it.\n",
			event->header.type);
		return;
	}

	if (symbol_conf.use_callchain && sample->callchain) {

		if (perf_session__resolve_callchain(session, al.thread,
						sample->callchain, NULL) != 0) {
			if (verbose)
				error("Failed to resolve callchain. Skipping\n");
			return;
		}
		callchain_cursor_commit(cursor);

		while (1) {
			node = callchain_cursor_current(cursor);
			if (!node)
				break;

1243 1244 1245 1246 1247 1248
			printf("\t%16" PRIx64, node->ip);
			if (print_sym) {
				if (node->sym && node->sym->name)
					symname = node->sym->name;
				else
					symname = "";
1249

1250 1251 1252
				printf(" %s", symname);
			}
			if (print_dso) {
1253 1254 1255 1256
				if (node->map && node->map->dso && node->map->dso->name)
					dsoname = node->map->dso->name;
				else
					dsoname = "";
1257

1258
				printf(" (%s)", dsoname);
1259 1260
			}
			printf("\n");
1261 1262 1263 1264 1265

			callchain_cursor_advance(cursor);
		}

	} else {
1266 1267 1268 1269 1270 1271
		printf("%16" PRIx64, al.addr);
		if (print_sym) {
			if (al.sym && al.sym->name)
				symname = al.sym->name;
			else
				symname = "";
1272

1273 1274 1275 1276
			printf(" %s", symname);
		}

		if (print_dso) {
1277 1278 1279 1280
			if (al.map && al.map->dso && al.map->dso->name)
				dsoname = al.map->dso->name;
			else
				dsoname = "";
1281

1282
			printf(" (%s)", dsoname);
1283
		}
1284 1285
	}
}
1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322

int perf_session__cpu_bitmap(struct perf_session *session,
			     const char *cpu_list, unsigned long *cpu_bitmap)
{
	int i;
	struct cpu_map *map;

	for (i = 0; i < PERF_TYPE_MAX; ++i) {
		struct perf_evsel *evsel;

		evsel = perf_session__find_first_evtype(session, i);
		if (!evsel)
			continue;

		if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) {
			pr_err("File does not contain CPU events. "
			       "Remove -c option to proceed.\n");
			return -1;
		}
	}

	map = cpu_map__new(cpu_list);

	for (i = 0; i < map->nr; i++) {
		int cpu = map->map[i];

		if (cpu >= MAX_NR_CPUS) {
			pr_err("Requested CPU %d too large. "
			       "Consider raising MAX_NR_CPUS\n", cpu);
			return -1;
		}

		set_bit(cpu, cpu_bitmap);
	}

	return 0;
}