session.c 33.9 KB
Newer Older
1 2
#define _FILE_OFFSET_BITS 64

3 4
#include <linux/kernel.h>

5
#include <byteswap.h>
6 7
#include <unistd.h>
#include <sys/types.h>
8
#include <sys/mman.h>
9

10 11
#include "evlist.h"
#include "evsel.h"
12
#include "session.h"
13
#include "sort.h"
14
#include "util.h"
15
#include "cpumap.h"
16 17 18 19 20

static int perf_session__open(struct perf_session *self, bool force)
{
	struct stat input_stat;

21 22 23 24
	if (!strcmp(self->filename, "-")) {
		self->fd_pipe = true;
		self->fd = STDIN_FILENO;

25
		if (perf_session__read_header(self, self->fd) < 0)
26 27 28 29 30
			pr_err("incompatible file format");

		return 0;
	}

31
	self->fd = open(self->filename, O_RDONLY);
32
	if (self->fd < 0) {
33 34 35 36
		int err = errno;

		pr_err("failed to open %s: %s", self->filename, strerror(err));
		if (err == ENOENT && !strcmp(self->filename, "perf.data"))
37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56
			pr_err("  (try 'perf record' first)");
		pr_err("\n");
		return -errno;
	}

	if (fstat(self->fd, &input_stat) < 0)
		goto out_close;

	if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
		pr_err("file %s not owned by current user or root\n",
		       self->filename);
		goto out_close;
	}

	if (!input_stat.st_size) {
		pr_info("zero-sized file (%s), nothing to do!\n",
			self->filename);
		goto out_close;
	}

57
	if (perf_session__read_header(self, self->fd) < 0) {
58 59 60 61
		pr_err("incompatible file format");
		goto out_close;
	}

62 63 64 65 66 67 68 69 70 71
	if (!perf_evlist__valid_sample_type(self->evlist)) {
		pr_err("non matching sample_type");
		goto out_close;
	}

	if (!perf_evlist__valid_sample_id_all(self->evlist)) {
		pr_err("non matching sample_id_all");
		goto out_close;
	}

72 73 74 75 76 77 78 79 80
	self->size = input_stat.st_size;
	return 0;

out_close:
	close(self->fd);
	self->fd = -1;
	return -1;
}

81
static void perf_session__id_header_size(struct perf_session *session)
82
{
83
       struct perf_sample *data;
84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109
       u64 sample_type = session->sample_type;
       u16 size = 0;

	if (!session->sample_id_all)
		goto out;

       if (sample_type & PERF_SAMPLE_TID)
               size += sizeof(data->tid) * 2;

       if (sample_type & PERF_SAMPLE_TIME)
               size += sizeof(data->time);

       if (sample_type & PERF_SAMPLE_ID)
               size += sizeof(data->id);

       if (sample_type & PERF_SAMPLE_STREAM_ID)
               size += sizeof(data->stream_id);

       if (sample_type & PERF_SAMPLE_CPU)
               size += sizeof(data->cpu) * 2;
out:
       session->id_hdr_size = size;
}

void perf_session__update_sample_type(struct perf_session *self)
{
110
	self->sample_type = perf_evlist__sample_type(self->evlist);
111
	self->sample_size = __perf_evsel__sample_size(self->sample_type);
112
	self->sample_id_all = perf_evlist__sample_id_all(self->evlist);
113 114 115
	perf_session__id_header_size(self);
}

116 117
int perf_session__create_kernel_maps(struct perf_session *self)
{
118
	int ret = machine__create_kernel_maps(&self->host_machine);
119 120

	if (ret >= 0)
121
		ret = machines__create_guest_kernel_maps(&self->machines);
122 123 124
	return ret;
}

125 126 127 128 129 130
static void perf_session__destroy_kernel_maps(struct perf_session *self)
{
	machine__destroy_kernel_maps(&self->host_machine);
	machines__destroy_guest_kernel_maps(&self->machines);
}

131 132 133
struct perf_session *perf_session__new(const char *filename, int mode,
				       bool force, bool repipe,
				       struct perf_event_ops *ops)
134
{
135
	size_t len = filename ? strlen(filename) + 1 : 0;
136 137 138 139 140 141
	struct perf_session *self = zalloc(sizeof(*self) + len);

	if (self == NULL)
		goto out;

	memcpy(self->filename, filename, len);
142
	self->threads = RB_ROOT;
143
	INIT_LIST_HEAD(&self->dead_threads);
144
	self->last_match = NULL;
145 146 147 148 149 150 151 152 153
	/*
	 * On 64bit we can mmap the data file in one go. No need for tiny mmap
	 * slices. On 32bit we use 32MB.
	 */
#if BITS_PER_LONG == 64
	self->mmap_window = ULLONG_MAX;
#else
	self->mmap_window = 32 * 1024 * 1024ULL;
#endif
154
	self->machines = RB_ROOT;
T
Tom Zanussi 已提交
155
	self->repipe = repipe;
156
	INIT_LIST_HEAD(&self->ordered_samples.samples);
157
	INIT_LIST_HEAD(&self->ordered_samples.sample_cache);
158
	INIT_LIST_HEAD(&self->ordered_samples.to_free);
159
	machine__init(&self->host_machine, "", HOST_KERNEL_ID);
160

161 162 163
	if (mode == O_RDONLY) {
		if (perf_session__open(self, force) < 0)
			goto out_delete;
164
		perf_session__update_sample_type(self);
165 166 167
	} else if (mode == O_WRONLY) {
		/*
		 * In O_RDONLY mode this will be performed when reading the
168
		 * kernel MMAP event, in perf_event__process_mmap().
169 170 171 172
		 */
		if (perf_session__create_kernel_maps(self) < 0)
			goto out_delete;
	}
173

174 175 176 177 178 179
	if (ops && ops->ordering_requires_timestamps &&
	    ops->ordered_samples && !self->sample_id_all) {
		dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
		ops->ordered_samples = false;
	}

180 181
out:
	return self;
182 183 184
out_delete:
	perf_session__delete(self);
	return NULL;
185 186
}

187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209
static void perf_session__delete_dead_threads(struct perf_session *self)
{
	struct thread *n, *t;

	list_for_each_entry_safe(t, n, &self->dead_threads, node) {
		list_del(&t->node);
		thread__delete(t);
	}
}

static void perf_session__delete_threads(struct perf_session *self)
{
	struct rb_node *nd = rb_first(&self->threads);

	while (nd) {
		struct thread *t = rb_entry(nd, struct thread, rb_node);

		rb_erase(&t->rb_node, &self->threads);
		nd = rb_next(nd);
		thread__delete(t);
	}
}

210 211
void perf_session__delete(struct perf_session *self)
{
212
	perf_session__destroy_kernel_maps(self);
213 214 215
	perf_session__delete_dead_threads(self);
	perf_session__delete_threads(self);
	machine__exit(&self->host_machine);
216 217 218
	close(self->fd);
	free(self);
}
219

220 221
void perf_session__remove_thread(struct perf_session *self, struct thread *th)
{
222
	self->last_match = NULL;
223 224 225 226 227 228 229 230
	rb_erase(&th->rb_node, &self->threads);
	/*
	 * We may have references to this thread, for instance in some hist_entry
	 * instances, so just move them to a separate list.
	 */
	list_add_tail(&th->node, &self->dead_threads);
}

231 232 233 234 235 236 237 238
static bool symbol__match_parent_regex(struct symbol *sym)
{
	if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
		return 1;

	return 0;
}

239 240 241 242
int perf_session__resolve_callchain(struct perf_session *self,
				    struct thread *thread,
				    struct ip_callchain *chain,
				    struct symbol **parent)
243 244 245
{
	u8 cpumode = PERF_RECORD_MISC_USER;
	unsigned int i;
246
	int err;
247

248
	callchain_cursor_reset(&self->callchain_cursor);
249 250

	for (i = 0; i < chain->nr; i++) {
251
		u64 ip;
252 253
		struct addr_location al;

254 255 256 257 258
		if (callchain_param.order == ORDER_CALLEE)
			ip = chain->ips[i];
		else
			ip = chain->ips[chain->nr - i - 1];

259 260 261 262 263 264 265 266 267 268 269 270 271 272
		if (ip >= PERF_CONTEXT_MAX) {
			switch (ip) {
			case PERF_CONTEXT_HV:
				cpumode = PERF_RECORD_MISC_HYPERVISOR;	break;
			case PERF_CONTEXT_KERNEL:
				cpumode = PERF_RECORD_MISC_KERNEL;	break;
			case PERF_CONTEXT_USER:
				cpumode = PERF_RECORD_MISC_USER;	break;
			default:
				break;
			}
			continue;
		}

273
		al.filtered = false;
274
		thread__find_addr_location(thread, self, cpumode,
275
				MAP__FUNCTION, thread->pid, ip, &al, NULL);
276 277 278 279
		if (al.sym != NULL) {
			if (sort__has_parent && !*parent &&
			    symbol__match_parent_regex(al.sym))
				*parent = al.sym;
280
			if (!symbol_conf.use_callchain)
281 282
				break;
		}
283 284 285 286 287

		err = callchain_cursor_append(&self->callchain_cursor,
					      ip, al.map, al.sym);
		if (err)
			return err;
288 289
	}

290
	return 0;
291
}
292

293
static int process_event_synth_stub(union perf_event *event __used,
294 295 296 297 298 299
				    struct perf_session *session __used)
{
	dump_printf(": unhandled!\n");
	return 0;
}

300 301 302 303 304 305 306 307 308
static int process_event_sample_stub(union perf_event *event __used,
				     struct perf_sample *sample __used,
				     struct perf_evsel *evsel __used,
				     struct perf_session *session __used)
{
	dump_printf(": unhandled!\n");
	return 0;
}

309
static int process_event_stub(union perf_event *event __used,
310
			      struct perf_sample *sample __used,
311 312 313 314 315 316
			      struct perf_session *session __used)
{
	dump_printf(": unhandled!\n");
	return 0;
}

317
static int process_finished_round_stub(union perf_event *event __used,
318 319 320 321 322 323 324
				       struct perf_session *session __used,
				       struct perf_event_ops *ops __used)
{
	dump_printf(": unhandled!\n");
	return 0;
}

325
static int process_finished_round(union perf_event *event,
326 327 328
				  struct perf_session *session,
				  struct perf_event_ops *ops);

329 330
static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
{
331
	if (handler->sample == NULL)
332
		handler->sample = process_event_sample_stub;
333 334 335 336 337 338 339 340 341
	if (handler->mmap == NULL)
		handler->mmap = process_event_stub;
	if (handler->comm == NULL)
		handler->comm = process_event_stub;
	if (handler->fork == NULL)
		handler->fork = process_event_stub;
	if (handler->exit == NULL)
		handler->exit = process_event_stub;
	if (handler->lost == NULL)
342
		handler->lost = perf_event__process_lost;
343 344 345 346 347 348
	if (handler->read == NULL)
		handler->read = process_event_stub;
	if (handler->throttle == NULL)
		handler->throttle = process_event_stub;
	if (handler->unthrottle == NULL)
		handler->unthrottle = process_event_stub;
349
	if (handler->attr == NULL)
350
		handler->attr = process_event_synth_stub;
351
	if (handler->event_type == NULL)
352
		handler->event_type = process_event_synth_stub;
353
	if (handler->tracing_data == NULL)
354
		handler->tracing_data = process_event_synth_stub;
355
	if (handler->build_id == NULL)
356
		handler->build_id = process_event_synth_stub;
357 358 359 360 361 362
	if (handler->finished_round == NULL) {
		if (handler->ordered_samples)
			handler->finished_round = process_finished_round;
		else
			handler->finished_round = process_finished_round_stub;
	}
363 364
}

365 366 367 368 369 370 371 372 373 374 375
void mem_bswap_64(void *src, int byte_size)
{
	u64 *m = src;

	while (byte_size > 0) {
		*m = bswap_64(*m);
		byte_size -= sizeof(u64);
		++m;
	}
}

376
static void perf_event__all64_swap(union perf_event *event)
377
{
378 379
	struct perf_event_header *hdr = &event->header;
	mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
380 381
}

382
static void perf_event__comm_swap(union perf_event *event)
383
{
384 385
	event->comm.pid = bswap_32(event->comm.pid);
	event->comm.tid = bswap_32(event->comm.tid);
386 387
}

388
static void perf_event__mmap_swap(union perf_event *event)
389
{
390 391 392 393 394
	event->mmap.pid	  = bswap_32(event->mmap.pid);
	event->mmap.tid	  = bswap_32(event->mmap.tid);
	event->mmap.start = bswap_64(event->mmap.start);
	event->mmap.len	  = bswap_64(event->mmap.len);
	event->mmap.pgoff = bswap_64(event->mmap.pgoff);
395 396
}

397
static void perf_event__task_swap(union perf_event *event)
398
{
399 400 401 402 403
	event->fork.pid	 = bswap_32(event->fork.pid);
	event->fork.tid	 = bswap_32(event->fork.tid);
	event->fork.ppid = bswap_32(event->fork.ppid);
	event->fork.ptid = bswap_32(event->fork.ptid);
	event->fork.time = bswap_64(event->fork.time);
404 405
}

406
static void perf_event__read_swap(union perf_event *event)
407
{
408 409 410 411 412 413
	event->read.pid		 = bswap_32(event->read.pid);
	event->read.tid		 = bswap_32(event->read.tid);
	event->read.value	 = bswap_64(event->read.value);
	event->read.time_enabled = bswap_64(event->read.time_enabled);
	event->read.time_running = bswap_64(event->read.time_running);
	event->read.id		 = bswap_64(event->read.id);
414 415
}

416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431
/* exported for swapping attributes in file header */
void perf_event__attr_swap(struct perf_event_attr *attr)
{
	attr->type		= bswap_32(attr->type);
	attr->size		= bswap_32(attr->size);
	attr->config		= bswap_64(attr->config);
	attr->sample_period	= bswap_64(attr->sample_period);
	attr->sample_type	= bswap_64(attr->sample_type);
	attr->read_format	= bswap_64(attr->read_format);
	attr->wakeup_events	= bswap_32(attr->wakeup_events);
	attr->bp_type		= bswap_32(attr->bp_type);
	attr->bp_addr		= bswap_64(attr->bp_addr);
	attr->bp_len		= bswap_64(attr->bp_len);
}

static void perf_event__hdr_attr_swap(union perf_event *event)
432 433 434
{
	size_t size;

435
	perf_event__attr_swap(&event->attr.attr);
436

437 438 439
	size = event->header.size;
	size -= (void *)&event->attr.id - (void *)event;
	mem_bswap_64(event->attr.id, size);
440 441
}

442
static void perf_event__event_type_swap(union perf_event *event)
443
{
444 445
	event->event_type.event_type.event_id =
		bswap_64(event->event_type.event_type.event_id);
446 447
}

448
static void perf_event__tracing_data_swap(union perf_event *event)
449
{
450
	event->tracing_data.size = bswap_32(event->tracing_data.size);
451 452
}

453
typedef void (*perf_event__swap_op)(union perf_event *event);
454

455 456 457 458 459 460 461 462
static perf_event__swap_op perf_event__swap_ops[] = {
	[PERF_RECORD_MMAP]		  = perf_event__mmap_swap,
	[PERF_RECORD_COMM]		  = perf_event__comm_swap,
	[PERF_RECORD_FORK]		  = perf_event__task_swap,
	[PERF_RECORD_EXIT]		  = perf_event__task_swap,
	[PERF_RECORD_LOST]		  = perf_event__all64_swap,
	[PERF_RECORD_READ]		  = perf_event__read_swap,
	[PERF_RECORD_SAMPLE]		  = perf_event__all64_swap,
463
	[PERF_RECORD_HEADER_ATTR]	  = perf_event__hdr_attr_swap,
464 465 466 467
	[PERF_RECORD_HEADER_EVENT_TYPE]	  = perf_event__event_type_swap,
	[PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
	[PERF_RECORD_HEADER_BUILD_ID]	  = NULL,
	[PERF_RECORD_HEADER_MAX]	  = NULL,
468 469
};

470 471
struct sample_queue {
	u64			timestamp;
472
	u64			file_offset;
473
	union perf_event	*event;
474 475 476
	struct list_head	list;
};

477 478 479 480
static void perf_session_free_sample_buffers(struct perf_session *session)
{
	struct ordered_samples *os = &session->ordered_samples;

481
	while (!list_empty(&os->to_free)) {
482 483
		struct sample_queue *sq;

484
		sq = list_entry(os->to_free.next, struct sample_queue, list);
485 486 487 488 489
		list_del(&sq->list);
		free(sq);
	}
}

490
static int perf_session_deliver_event(struct perf_session *session,
491
				      union perf_event *event,
492
				      struct perf_sample *sample,
493 494
				      struct perf_event_ops *ops,
				      u64 file_offset);
495

496 497 498
static void flush_sample_queue(struct perf_session *s,
			       struct perf_event_ops *ops)
{
499 500
	struct ordered_samples *os = &s->ordered_samples;
	struct list_head *head = &os->samples;
501
	struct sample_queue *tmp, *iter;
502
	struct perf_sample sample;
503 504
	u64 limit = os->next_flush;
	u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL;
505
	int ret;
506

507
	if (!ops->ordered_samples || !limit)
508 509 510 511
		return;

	list_for_each_entry_safe(iter, tmp, head, list) {
		if (iter->timestamp > limit)
512
			break;
513

514 515 516 517 518 519
		ret = perf_session__parse_sample(s, iter->event, &sample);
		if (ret)
			pr_err("Can't parse sample, err = %d\n", ret);
		else
			perf_session_deliver_event(s, iter->event, &sample, ops,
						   iter->file_offset);
520

521
		os->last_flush = iter->timestamp;
522
		list_del(&iter->list);
523
		list_add(&iter->list, &os->sample_cache);
524
	}
525 526 527 528 529 530 531

	if (list_empty(head)) {
		os->last_sample = NULL;
	} else if (last_ts <= limit) {
		os->last_sample =
			list_entry(head->prev, struct sample_queue, list);
	}
532 533
}

534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572
/*
 * When perf record finishes a pass on every buffers, it records this pseudo
 * event.
 * We record the max timestamp t found in the pass n.
 * Assuming these timestamps are monotonic across cpus, we know that if
 * a buffer still has events with timestamps below t, they will be all
 * available and then read in the pass n + 1.
 * Hence when we start to read the pass n + 2, we can safely flush every
 * events with timestamps below t.
 *
 *    ============ PASS n =================
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          1          |         2
 *          2          |         3
 *          -          |         4  <--- max recorded
 *
 *    ============ PASS n + 1 ==============
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          3          |         5
 *          4          |         6
 *          5          |         7 <---- max recorded
 *
 *      Flush every events below timestamp 4
 *
 *    ============ PASS n + 2 ==============
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          6          |         8
 *          7          |         9
 *          -          |         10
 *
 *      Flush every events below timestamp 7
 *      etc...
 */
573
static int process_finished_round(union perf_event *event __used,
574 575 576 577 578 579 580 581 582
				  struct perf_session *session,
				  struct perf_event_ops *ops)
{
	flush_sample_queue(session, ops);
	session->ordered_samples.next_flush = session->ordered_samples.max_timestamp;

	return 0;
}

583
/* The queue is ordered by time */
584
static void __queue_event(struct sample_queue *new, struct perf_session *s)
585
{
586 587 588 589
	struct ordered_samples *os = &s->ordered_samples;
	struct sample_queue *sample = os->last_sample;
	u64 timestamp = new->timestamp;
	struct list_head *p;
590

591
	os->last_sample = new;
592

593 594 595
	if (!sample) {
		list_add(&new->list, &os->samples);
		os->max_timestamp = timestamp;
596 597 598 599
		return;
	}

	/*
600 601 602
	 * last_sample might point to some random place in the list as it's
	 * the last queued event. We expect that the new event is close to
	 * this.
603
	 */
604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625
	if (sample->timestamp <= timestamp) {
		while (sample->timestamp <= timestamp) {
			p = sample->list.next;
			if (p == &os->samples) {
				list_add_tail(&new->list, &os->samples);
				os->max_timestamp = timestamp;
				return;
			}
			sample = list_entry(p, struct sample_queue, list);
		}
		list_add_tail(&new->list, &sample->list);
	} else {
		while (sample->timestamp > timestamp) {
			p = sample->list.prev;
			if (p == &os->samples) {
				list_add(&new->list, &os->samples);
				return;
			}
			sample = list_entry(p, struct sample_queue, list);
		}
		list_add(&new->list, &sample->list);
	}
626 627
}

628 629
#define MAX_SAMPLE_BUFFER	(64 * 1024 / sizeof(struct sample_queue))

630
static int perf_session_queue_event(struct perf_session *s, union perf_event *event,
631
				    struct perf_sample *sample, u64 file_offset)
632
{
633 634
	struct ordered_samples *os = &s->ordered_samples;
	struct list_head *sc = &os->sample_cache;
635
	u64 timestamp = sample->time;
636 637
	struct sample_queue *new;

638
	if (!timestamp || timestamp == ~0ULL)
639 640
		return -ETIME;

641 642 643 644 645
	if (timestamp < s->ordered_samples.last_flush) {
		printf("Warning: Timestamp below last timeslice flush\n");
		return -EINVAL;
	}

646 647 648
	if (!list_empty(sc)) {
		new = list_entry(sc->next, struct sample_queue, list);
		list_del(&new->list);
649 650 651 652
	} else if (os->sample_buffer) {
		new = os->sample_buffer + os->sample_buffer_idx;
		if (++os->sample_buffer_idx == MAX_SAMPLE_BUFFER)
			os->sample_buffer = NULL;
653
	} else {
654 655
		os->sample_buffer = malloc(MAX_SAMPLE_BUFFER * sizeof(*new));
		if (!os->sample_buffer)
656
			return -ENOMEM;
657 658 659
		list_add(&os->sample_buffer->list, &os->to_free);
		os->sample_buffer_idx = 2;
		new = os->sample_buffer + 1;
660
	}
661 662

	new->timestamp = timestamp;
663
	new->file_offset = file_offset;
664
	new->event = event;
665

666
	__queue_event(new, s);
667 668 669

	return 0;
}
670

671
static void callchain__printf(struct perf_sample *sample)
672 673
{
	unsigned int i;
674

675
	printf("... chain: nr:%" PRIu64 "\n", sample->callchain->nr);
676 677

	for (i = 0; i < sample->callchain->nr; i++)
678 679
		printf("..... %2d: %016" PRIx64 "\n",
		       i, sample->callchain->ips[i]);
680 681
}

682
static void perf_session__print_tstamp(struct perf_session *session,
683
				       union perf_event *event,
684
				       struct perf_sample *sample)
685 686 687 688 689 690 691 692 693 694 695
{
	if (event->header.type != PERF_RECORD_SAMPLE &&
	    !session->sample_id_all) {
		fputs("-1 -1 ", stdout);
		return;
	}

	if ((session->sample_type & PERF_SAMPLE_CPU))
		printf("%u ", sample->cpu);

	if (session->sample_type & PERF_SAMPLE_TIME)
696
		printf("%" PRIu64 " ", sample->time);
697 698
}

699
static void dump_event(struct perf_session *session, union perf_event *event,
700
		       u64 file_offset, struct perf_sample *sample)
701 702 703 704
{
	if (!dump_trace)
		return;

705 706
	printf("\n%#" PRIx64 " [%#x]: event: %d\n",
	       file_offset, event->header.size, event->header.type);
707 708 709 710 711 712

	trace_event(event);

	if (sample)
		perf_session__print_tstamp(session, event, sample);

713
	printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
714
	       event->header.size, perf_event__name(event->header.type));
715 716
}

717
static void dump_sample(struct perf_session *session, union perf_event *event,
718
			struct perf_sample *sample)
719
{
720 721 722
	if (!dump_trace)
		return;

723
	printf("(IP, %d): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
724
	       event->header.misc, sample->pid, sample->tid, sample->ip,
725
	       sample->period, sample->addr);
726 727

	if (session->sample_type & PERF_SAMPLE_CALLCHAIN)
728
		callchain__printf(sample);
729 730
}

731
static int perf_session_deliver_event(struct perf_session *session,
732
				      union perf_event *event,
733
				      struct perf_sample *sample,
734
				      struct perf_event_ops *ops,
735
				      u64 file_offset)
736
{
737 738
	struct perf_evsel *evsel;

739 740
	dump_event(session, event, file_offset, sample);

741 742
	switch (event->header.type) {
	case PERF_RECORD_SAMPLE:
743
		dump_sample(session, event, sample);
744 745 746 747 748 749
		evsel = perf_evlist__id2evsel(session->evlist, sample->id);
		if (evsel == NULL) {
			++session->hists.stats.nr_unknown_id;
			return -1;
		}
		return ops->sample(event, sample, evsel, session);
750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771
	case PERF_RECORD_MMAP:
		return ops->mmap(event, sample, session);
	case PERF_RECORD_COMM:
		return ops->comm(event, sample, session);
	case PERF_RECORD_FORK:
		return ops->fork(event, sample, session);
	case PERF_RECORD_EXIT:
		return ops->exit(event, sample, session);
	case PERF_RECORD_LOST:
		return ops->lost(event, sample, session);
	case PERF_RECORD_READ:
		return ops->read(event, sample, session);
	case PERF_RECORD_THROTTLE:
		return ops->throttle(event, sample, session);
	case PERF_RECORD_UNTHROTTLE:
		return ops->unthrottle(event, sample, session);
	default:
		++session->hists.stats.nr_unknown_events;
		return -1;
	}
}

772
static int perf_session__preprocess_sample(struct perf_session *session,
773
					   union perf_event *event, struct perf_sample *sample)
774 775 776 777 778 779 780 781 782 783 784 785 786 787
{
	if (event->header.type != PERF_RECORD_SAMPLE ||
	    !(session->sample_type & PERF_SAMPLE_CALLCHAIN))
		return 0;

	if (!ip_callchain__valid(sample->callchain, event)) {
		pr_debug("call-chain problem with event, skipping it.\n");
		++session->hists.stats.nr_invalid_chains;
		session->hists.stats.total_invalid_chains += sample->period;
		return -EINVAL;
	}
	return 0;
}

788
static int perf_session__process_user_event(struct perf_session *session, union perf_event *event,
789
					    struct perf_event_ops *ops, u64 file_offset)
790
{
791
	dump_event(session, event, file_offset, NULL);
792

793
	/* These events are processed right away */
794
	switch (event->header.type) {
795
	case PERF_RECORD_HEADER_ATTR:
796
		return ops->attr(event, session);
797
	case PERF_RECORD_HEADER_EVENT_TYPE:
798
		return ops->event_type(event, session);
799 800
	case PERF_RECORD_HEADER_TRACING_DATA:
		/* setup for reading amidst mmap */
801 802
		lseek(session->fd, file_offset, SEEK_SET);
		return ops->tracing_data(event, session);
803
	case PERF_RECORD_HEADER_BUILD_ID:
804
		return ops->build_id(event, session);
805
	case PERF_RECORD_FINISHED_ROUND:
806
		return ops->finished_round(event, session, ops);
807
	default:
808
		return -EINVAL;
809
	}
810 811 812
}

static int perf_session__process_event(struct perf_session *session,
813
				       union perf_event *event,
814 815 816
				       struct perf_event_ops *ops,
				       u64 file_offset)
{
817
	struct perf_sample sample;
818 819
	int ret;

820 821 822
	if (session->header.needs_swap &&
	    perf_event__swap_ops[event->header.type])
		perf_event__swap_ops[event->header.type](event);
823 824 825 826 827 828 829 830

	if (event->header.type >= PERF_RECORD_HEADER_MAX)
		return -EINVAL;

	hists__inc_nr_events(&session->hists, event->header.type);

	if (event->header.type >= PERF_RECORD_USER_TYPE_START)
		return perf_session__process_user_event(session, event, ops, file_offset);
831

832 833 834
	/*
	 * For all kernel events we get the sample data
	 */
835 836 837
	ret = perf_session__parse_sample(session, event, &sample);
	if (ret)
		return ret;
838 839 840 841 842

	/* Preprocess sample records - precheck callchains */
	if (perf_session__preprocess_sample(session, event, &sample))
		return 0;

843
	if (ops->ordered_samples) {
844 845
		ret = perf_session_queue_event(session, event, &sample,
					       file_offset);
846 847 848 849
		if (ret != -ETIME)
			return ret;
	}

850 851
	return perf_session_deliver_event(session, event, &sample, ops,
					  file_offset);
852 853
}

854 855 856 857 858 859 860
void perf_event_header__bswap(struct perf_event_header *self)
{
	self->type = bswap_32(self->type);
	self->misc = bswap_16(self->misc);
	self->size = bswap_16(self->size);
}

861 862 863 864 865 866 867 868 869 870 871 872
static struct thread *perf_session__register_idle_thread(struct perf_session *self)
{
	struct thread *thread = perf_session__findnew(self, 0);

	if (thread == NULL || thread__set_comm(thread, "swapper")) {
		pr_err("problem inserting idle task.\n");
		thread = NULL;
	}

	return thread;
}

873 874 875
static void perf_session__warn_about_errors(const struct perf_session *session,
					    const struct perf_event_ops *ops)
{
876
	if (ops->lost == perf_event__process_lost &&
877
	    session->hists.stats.total_lost != 0) {
878 879
		ui__warning("Processed %" PRIu64 " events and LOST %" PRIu64
			    "!\n\nCheck IO/CPU overload!\n\n",
880 881 882 883 884 885 886 887 888 889 890 891 892
			    session->hists.stats.total_period,
			    session->hists.stats.total_lost);
	}

	if (session->hists.stats.nr_unknown_events != 0) {
		ui__warning("Found %u unknown events!\n\n"
			    "Is this an older tool processing a perf.data "
			    "file generated by a more recent tool?\n\n"
			    "If that is not the case, consider "
			    "reporting to linux-kernel@vger.kernel.org.\n\n",
			    session->hists.stats.nr_unknown_events);
	}

893 894 895 896 897
	if (session->hists.stats.nr_unknown_id != 0) {
		ui__warning("%u samples with id not present in the header\n",
			    session->hists.stats.nr_unknown_id);
	}

898 899 900 901 902 903 904 905 906
 	if (session->hists.stats.nr_invalid_chains != 0) {
 		ui__warning("Found invalid callchains!\n\n"
 			    "%u out of %u events were discarded for this reason.\n\n"
 			    "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
 			    session->hists.stats.nr_invalid_chains,
 			    session->hists.stats.nr_events[PERF_RECORD_SAMPLE]);
 	}
}

907 908 909 910 911 912
#define session_done()	(*(volatile int *)(&session_done))
volatile int session_done;

static int __perf_session__process_pipe_events(struct perf_session *self,
					       struct perf_event_ops *ops)
{
913
	union perf_event event;
914 915 916 917 918 919 920 921 922 923
	uint32_t size;
	int skip = 0;
	u64 head;
	int err;
	void *p;

	perf_event_ops__fill_defaults(ops);

	head = 0;
more:
924
	err = readn(self->fd, &event, sizeof(struct perf_event_header));
925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942
	if (err <= 0) {
		if (err == 0)
			goto done;

		pr_err("failed to read event header\n");
		goto out_err;
	}

	if (self->header.needs_swap)
		perf_event_header__bswap(&event.header);

	size = event.header.size;
	if (size == 0)
		size = 8;

	p = &event;
	p += sizeof(struct perf_event_header);

943
	if (size - sizeof(struct perf_event_header)) {
944
		err = readn(self->fd, p, size - sizeof(struct perf_event_header));
945 946 947 948 949
		if (err <= 0) {
			if (err == 0) {
				pr_err("unexpected end of event stream\n");
				goto done;
			}
950

951 952 953
			pr_err("failed to read event data\n");
			goto out_err;
		}
954 955 956
	}

	if (size == 0 ||
957
	    (skip = perf_session__process_event(self, &event, ops, head)) < 0) {
958
		dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n",
959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979
			    head, event.header.size, event.header.type);
		/*
		 * assume we lost track of the stream, check alignment, and
		 * increment a single u64 in the hope to catch on again 'soon'.
		 */
		if (unlikely(head & 7))
			head &= ~7ULL;

		size = 8;
	}

	head += size;

	if (skip > 0)
		head += skip;

	if (!session_done())
		goto more;
done:
	err = 0;
out_err:
980
	perf_session__warn_about_errors(self, ops);
981
	perf_session_free_sample_buffers(self);
982 983 984
	return err;
}

985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008
static union perf_event *
fetch_mmaped_event(struct perf_session *session,
		   u64 head, size_t mmap_size, char *buf)
{
	union perf_event *event;

	/*
	 * Ensure we have enough space remaining to read
	 * the size of the event in the headers.
	 */
	if (head + sizeof(event->header) > mmap_size)
		return NULL;

	event = (union perf_event *)(buf + head);

	if (session->header.needs_swap)
		perf_event_header__bswap(&event->header);

	if (head + event->header.size > mmap_size)
		return NULL;

	return event;
}

1009
int __perf_session__process_events(struct perf_session *session,
1010 1011
				   u64 data_offset, u64 data_size,
				   u64 file_size, struct perf_event_ops *ops)
1012
{
1013
	u64 head, page_offset, file_offset, file_pos, progress_next;
1014
	int err, mmap_prot, mmap_flags, map_idx = 0;
1015
	struct ui_progress *progress;
1016
	size_t	page_size, mmap_size;
1017
	char *buf, *mmaps[8];
1018
	union perf_event *event;
1019
	uint32_t size;
1020

1021 1022
	perf_event_ops__fill_defaults(ops);

1023
	page_size = sysconf(_SC_PAGESIZE);
1024

1025 1026 1027
	page_offset = page_size * (data_offset / page_size);
	file_offset = page_offset;
	head = data_offset - page_offset;
1028

1029 1030 1031
	if (data_offset + data_size < file_size)
		file_size = data_offset + data_size;

1032 1033 1034 1035 1036 1037 1038 1039 1040
	progress_next = file_size / 16;
	progress = ui_progress__new("Processing events...", file_size);
	if (progress == NULL)
		return -1;

	mmap_size = session->mmap_window;
	if (mmap_size > file_size)
		mmap_size = file_size;

1041 1042
	memset(mmaps, 0, sizeof(mmaps));

1043 1044 1045
	mmap_prot  = PROT_READ;
	mmap_flags = MAP_SHARED;

1046
	if (session->header.needs_swap) {
1047 1048 1049
		mmap_prot  |= PROT_WRITE;
		mmap_flags = MAP_PRIVATE;
	}
1050
remap:
1051 1052
	buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, session->fd,
		   file_offset);
1053 1054 1055 1056 1057
	if (buf == MAP_FAILED) {
		pr_err("failed to mmap file\n");
		err = -errno;
		goto out_err;
	}
1058 1059
	mmaps[map_idx] = buf;
	map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
1060
	file_pos = file_offset + head;
1061 1062

more:
1063 1064
	event = fetch_mmaped_event(session, head, mmap_size, buf);
	if (!event) {
1065 1066 1067 1068
		if (mmaps[map_idx]) {
			munmap(mmaps[map_idx], mmap_size);
			mmaps[map_idx] = NULL;
		}
1069

1070 1071 1072
		page_offset = page_size * (head / page_size);
		file_offset += page_offset;
		head -= page_offset;
1073 1074 1075 1076 1077
		goto remap;
	}

	size = event->header.size;

1078 1079
	if (size == 0 ||
	    perf_session__process_event(session, event, ops, file_pos) < 0) {
1080
		dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n",
1081
			    file_offset + head, event->header.size,
1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093
			    event->header.type);
		/*
		 * assume we lost track of the stream, check alignment, and
		 * increment a single u64 in the hope to catch on again 'soon'.
		 */
		if (unlikely(head & 7))
			head &= ~7ULL;

		size = 8;
	}

	head += size;
1094
	file_pos += size;
1095

1096 1097 1098 1099 1100
	if (file_pos >= progress_next) {
		progress_next += file_size / 16;
		ui_progress__update(progress, file_pos);
	}

1101
	if (file_pos < file_size)
1102
		goto more;
1103

1104
	err = 0;
1105
	/* do the final flush for ordered samples */
1106 1107
	session->ordered_samples.next_flush = ULLONG_MAX;
	flush_sample_queue(session, ops);
1108
out_err:
1109
	ui_progress__delete(progress);
1110
	perf_session__warn_about_errors(session, ops);
1111
	perf_session_free_sample_buffers(session);
1112 1113
	return err;
}
1114

1115 1116 1117 1118 1119 1120 1121 1122
int perf_session__process_events(struct perf_session *self,
				 struct perf_event_ops *ops)
{
	int err;

	if (perf_session__register_idle_thread(self) == NULL)
		return -ENOMEM;

1123 1124 1125 1126 1127 1128 1129
	if (!self->fd_pipe)
		err = __perf_session__process_events(self,
						     self->header.data_offset,
						     self->header.data_size,
						     self->size, ops);
	else
		err = __perf_session__process_pipe_events(self, ops);
1130

1131 1132 1133
	return err;
}

1134
bool perf_session__has_traces(struct perf_session *self, const char *msg)
1135 1136
{
	if (!(self->sample_type & PERF_SAMPLE_RAW)) {
1137 1138
		pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
		return false;
1139 1140
	}

1141
	return true;
1142
}
1143

1144
int perf_session__set_kallsyms_ref_reloc_sym(struct map **maps,
1145 1146 1147 1148
					     const char *symbol_name,
					     u64 addr)
{
	char *bracket;
1149
	enum map_type i;
1150 1151 1152 1153 1154
	struct ref_reloc_sym *ref;

	ref = zalloc(sizeof(struct ref_reloc_sym));
	if (ref == NULL)
		return -ENOMEM;
1155

1156 1157 1158
	ref->name = strdup(symbol_name);
	if (ref->name == NULL) {
		free(ref);
1159
		return -ENOMEM;
1160
	}
1161

1162
	bracket = strchr(ref->name, ']');
1163 1164 1165
	if (bracket)
		*bracket = '\0';

1166
	ref->addr = addr;
1167 1168

	for (i = 0; i < MAP__NR_TYPES; ++i) {
1169 1170
		struct kmap *kmap = map__kmap(maps[i]);
		kmap->ref_reloc_sym = ref;
1171 1172
	}

1173 1174
	return 0;
}
1175 1176 1177 1178 1179 1180 1181

size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp)
{
	return __dsos__fprintf(&self->host_machine.kernel_dsos, fp) +
	       __dsos__fprintf(&self->host_machine.user_dsos, fp) +
	       machines__fprintf_dsos(&self->machines, fp);
}
1182 1183 1184 1185 1186 1187 1188

size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp,
					  bool with_hits)
{
	size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits);
	return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits);
}
1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203

size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
{
	struct perf_evsel *pos;
	size_t ret = fprintf(fp, "Aggregated stats:\n");

	ret += hists__fprintf_nr_events(&session->hists, fp);

	list_for_each_entry(pos, &session->evlist->entries, node) {
		ret += fprintf(fp, "%s stats:\n", event_name(pos));
		ret += hists__fprintf_nr_events(&pos->hists, fp);
	}

	return ret;
}
1204

1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216
struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
					      unsigned int type)
{
	struct perf_evsel *pos;

	list_for_each_entry(pos, &session->evlist->entries, node) {
		if (pos->attr.type == type)
			return pos;
	}
	return NULL;
}

1217 1218 1219
void perf_session__print_ip(union perf_event *event,
			    struct perf_sample *sample,
			    struct perf_session *session,
1220
			    int print_sym, int print_dso)
1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248
{
	struct addr_location al;
	const char *symname, *dsoname;
	struct callchain_cursor *cursor = &session->callchain_cursor;
	struct callchain_cursor_node *node;

	if (perf_event__preprocess_sample(event, session, &al, sample,
					  NULL) < 0) {
		error("problem processing %d event, skipping it.\n",
			event->header.type);
		return;
	}

	if (symbol_conf.use_callchain && sample->callchain) {

		if (perf_session__resolve_callchain(session, al.thread,
						sample->callchain, NULL) != 0) {
			if (verbose)
				error("Failed to resolve callchain. Skipping\n");
			return;
		}
		callchain_cursor_commit(cursor);

		while (1) {
			node = callchain_cursor_current(cursor);
			if (!node)
				break;

1249 1250 1251 1252 1253 1254
			printf("\t%16" PRIx64, node->ip);
			if (print_sym) {
				if (node->sym && node->sym->name)
					symname = node->sym->name;
				else
					symname = "";
1255

1256 1257 1258
				printf(" %s", symname);
			}
			if (print_dso) {
1259 1260 1261 1262
				if (node->map && node->map->dso && node->map->dso->name)
					dsoname = node->map->dso->name;
				else
					dsoname = "";
1263

1264
				printf(" (%s)", dsoname);
1265 1266
			}
			printf("\n");
1267 1268 1269 1270 1271

			callchain_cursor_advance(cursor);
		}

	} else {
1272
		printf("%16" PRIx64, sample->ip);
1273 1274 1275 1276 1277
		if (print_sym) {
			if (al.sym && al.sym->name)
				symname = al.sym->name;
			else
				symname = "";
1278

1279 1280 1281 1282
			printf(" %s", symname);
		}

		if (print_dso) {
1283 1284 1285 1286
			if (al.map && al.map->dso && al.map->dso->name)
				dsoname = al.map->dso->name;
			else
				dsoname = "";
1287

1288
			printf(" (%s)", dsoname);
1289
		}
1290 1291
	}
}
1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328

int perf_session__cpu_bitmap(struct perf_session *session,
			     const char *cpu_list, unsigned long *cpu_bitmap)
{
	int i;
	struct cpu_map *map;

	for (i = 0; i < PERF_TYPE_MAX; ++i) {
		struct perf_evsel *evsel;

		evsel = perf_session__find_first_evtype(session, i);
		if (!evsel)
			continue;

		if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) {
			pr_err("File does not contain CPU events. "
			       "Remove -c option to proceed.\n");
			return -1;
		}
	}

	map = cpu_map__new(cpu_list);

	for (i = 0; i < map->nr; i++) {
		int cpu = map->map[i];

		if (cpu >= MAX_NR_CPUS) {
			pr_err("Requested CPU %d too large. "
			       "Consider raising MAX_NR_CPUS\n", cpu);
			return -1;
		}

		set_bit(cpu, cpu_bitmap);
	}

	return 0;
}