session.c 38.7 KB
Newer Older
1 2
#define _FILE_OFFSET_BITS 64

3 4
#include <linux/kernel.h>

5
#include <byteswap.h>
6 7
#include <unistd.h>
#include <sys/types.h>
8
#include <sys/mman.h>
9

10 11
#include "evlist.h"
#include "evsel.h"
12
#include "session.h"
13
#include "tool.h"
14
#include "sort.h"
15
#include "util.h"
16
#include "cpumap.h"
17 18 19 20 21

static int perf_session__open(struct perf_session *self, bool force)
{
	struct stat input_stat;

22 23 24 25
	if (!strcmp(self->filename, "-")) {
		self->fd_pipe = true;
		self->fd = STDIN_FILENO;

26
		if (perf_session__read_header(self, self->fd) < 0)
27
			pr_err("incompatible file format (rerun with -v to learn more)");
28 29 30 31

		return 0;
	}

32
	self->fd = open(self->filename, O_RDONLY);
33
	if (self->fd < 0) {
34 35 36 37
		int err = errno;

		pr_err("failed to open %s: %s", self->filename, strerror(err));
		if (err == ENOENT && !strcmp(self->filename, "perf.data"))
38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57
			pr_err("  (try 'perf record' first)");
		pr_err("\n");
		return -errno;
	}

	if (fstat(self->fd, &input_stat) < 0)
		goto out_close;

	if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
		pr_err("file %s not owned by current user or root\n",
		       self->filename);
		goto out_close;
	}

	if (!input_stat.st_size) {
		pr_info("zero-sized file (%s), nothing to do!\n",
			self->filename);
		goto out_close;
	}

58
	if (perf_session__read_header(self, self->fd) < 0) {
59
		pr_err("incompatible file format (rerun with -v to learn more)");
60 61 62
		goto out_close;
	}

63 64 65 66 67 68 69 70 71 72
	if (!perf_evlist__valid_sample_type(self->evlist)) {
		pr_err("non matching sample_type");
		goto out_close;
	}

	if (!perf_evlist__valid_sample_id_all(self->evlist)) {
		pr_err("non matching sample_id_all");
		goto out_close;
	}

73 74 75 76 77 78 79 80 81
	self->size = input_stat.st_size;
	return 0;

out_close:
	close(self->fd);
	self->fd = -1;
	return -1;
}

82 83
void perf_session__update_sample_type(struct perf_session *self)
{
84
	self->sample_type = perf_evlist__sample_type(self->evlist);
85
	self->sample_size = __perf_evsel__sample_size(self->sample_type);
86
	self->sample_id_all = perf_evlist__sample_id_all(self->evlist);
87
	self->id_hdr_size = perf_evlist__id_hdr_size(self->evlist);
88
	self->host_machine.id_hdr_size = self->id_hdr_size;
89 90
}

91 92
int perf_session__create_kernel_maps(struct perf_session *self)
{
93
	int ret = machine__create_kernel_maps(&self->host_machine);
94 95

	if (ret >= 0)
96
		ret = machines__create_guest_kernel_maps(&self->machines);
97 98 99
	return ret;
}

100 101 102 103 104 105
static void perf_session__destroy_kernel_maps(struct perf_session *self)
{
	machine__destroy_kernel_maps(&self->host_machine);
	machines__destroy_guest_kernel_maps(&self->machines);
}

106 107
struct perf_session *perf_session__new(const char *filename, int mode,
				       bool force, bool repipe,
108
				       struct perf_tool *tool)
109
{
110 111 112 113 114 115 116 117 118 119 120 121 122
	struct perf_session *self;
	struct stat st;
	size_t len;

	if (!filename || !strlen(filename)) {
		if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode))
			filename = "-";
		else
			filename = "perf.data";
	}

	len = strlen(filename);
	self = zalloc(sizeof(*self) + len);
123 124 125 126 127

	if (self == NULL)
		goto out;

	memcpy(self->filename, filename, len);
128 129 130 131 132 133 134 135 136
	/*
	 * On 64bit we can mmap the data file in one go. No need for tiny mmap
	 * slices. On 32bit we use 32MB.
	 */
#if BITS_PER_LONG == 64
	self->mmap_window = ULLONG_MAX;
#else
	self->mmap_window = 32 * 1024 * 1024ULL;
#endif
137
	self->machines = RB_ROOT;
T
Tom Zanussi 已提交
138
	self->repipe = repipe;
139
	INIT_LIST_HEAD(&self->ordered_samples.samples);
140
	INIT_LIST_HEAD(&self->ordered_samples.sample_cache);
141
	INIT_LIST_HEAD(&self->ordered_samples.to_free);
142
	machine__init(&self->host_machine, "", HOST_KERNEL_ID);
143
	hists__init(&self->hists);
144

145 146 147
	if (mode == O_RDONLY) {
		if (perf_session__open(self, force) < 0)
			goto out_delete;
148
		perf_session__update_sample_type(self);
149 150 151
	} else if (mode == O_WRONLY) {
		/*
		 * In O_RDONLY mode this will be performed when reading the
152
		 * kernel MMAP event, in perf_event__process_mmap().
153 154 155 156
		 */
		if (perf_session__create_kernel_maps(self) < 0)
			goto out_delete;
	}
157

158 159
	if (tool && tool->ordering_requires_timestamps &&
	    tool->ordered_samples && !self->sample_id_all) {
160
		dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
161
		tool->ordered_samples = false;
162 163
	}

164 165
out:
	return self;
166 167 168
out_delete:
	perf_session__delete(self);
	return NULL;
169 170
}

171
static void machine__delete_dead_threads(struct machine *machine)
172 173 174
{
	struct thread *n, *t;

175
	list_for_each_entry_safe(t, n, &machine->dead_threads, node) {
176 177 178 179 180
		list_del(&t->node);
		thread__delete(t);
	}
}

181 182 183 184 185 186
static void perf_session__delete_dead_threads(struct perf_session *session)
{
	machine__delete_dead_threads(&session->host_machine);
}

static void machine__delete_threads(struct machine *self)
187 188 189 190 191 192 193 194 195 196 197 198
{
	struct rb_node *nd = rb_first(&self->threads);

	while (nd) {
		struct thread *t = rb_entry(nd, struct thread, rb_node);

		rb_erase(&t->rb_node, &self->threads);
		nd = rb_next(nd);
		thread__delete(t);
	}
}

199 200 201 202 203
static void perf_session__delete_threads(struct perf_session *session)
{
	machine__delete_threads(&session->host_machine);
}

204 205
void perf_session__delete(struct perf_session *self)
{
206
	perf_session__destroy_kernel_maps(self);
207 208 209
	perf_session__delete_dead_threads(self);
	perf_session__delete_threads(self);
	machine__exit(&self->host_machine);
210 211 212
	close(self->fd);
	free(self);
}
213

214
void machine__remove_thread(struct machine *self, struct thread *th)
215
{
216
	self->last_match = NULL;
217 218 219 220 221 222 223 224
	rb_erase(&th->rb_node, &self->threads);
	/*
	 * We may have references to this thread, for instance in some hist_entry
	 * instances, so just move them to a separate list.
	 */
	list_add_tail(&th->node, &self->dead_threads);
}

225 226 227 228 229 230 231 232
static bool symbol__match_parent_regex(struct symbol *sym)
{
	if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
		return 1;

	return 0;
}

233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266
static const u8 cpumodes[] = {
	PERF_RECORD_MISC_USER,
	PERF_RECORD_MISC_KERNEL,
	PERF_RECORD_MISC_GUEST_USER,
	PERF_RECORD_MISC_GUEST_KERNEL
};
#define NCPUMODES (sizeof(cpumodes)/sizeof(u8))

static void ip__resolve_ams(struct machine *self, struct thread *thread,
			    struct addr_map_symbol *ams,
			    u64 ip)
{
	struct addr_location al;
	size_t i;
	u8 m;

	memset(&al, 0, sizeof(al));

	for (i = 0; i < NCPUMODES; i++) {
		m = cpumodes[i];
		/*
		 * We cannot use the header.misc hint to determine whether a
		 * branch stack address is user, kernel, guest, hypervisor.
		 * Branches may straddle the kernel/user/hypervisor boundaries.
		 * Thus, we have to try consecutively until we find a match
		 * or else, the symbol is unknown
		 */
		thread__find_addr_location(thread, self, m, MAP__FUNCTION,
				ip, &al, NULL);
		if (al.sym)
			goto found;
	}
found:
	ams->addr = ip;
267
	ams->al_addr = al.addr;
268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290
	ams->sym = al.sym;
	ams->map = al.map;
}

struct branch_info *machine__resolve_bstack(struct machine *self,
					    struct thread *thr,
					    struct branch_stack *bs)
{
	struct branch_info *bi;
	unsigned int i;

	bi = calloc(bs->nr, sizeof(struct branch_info));
	if (!bi)
		return NULL;

	for (i = 0; i < bs->nr; i++) {
		ip__resolve_ams(self, thr, &bi[i].to, bs->entries[i].to);
		ip__resolve_ams(self, thr, &bi[i].from, bs->entries[i].from);
		bi[i].flags = bs->entries[i].flags;
	}
	return bi;
}

291 292 293 294
int machine__resolve_callchain(struct machine *self, struct perf_evsel *evsel,
			       struct thread *thread,
			       struct ip_callchain *chain,
			       struct symbol **parent)
295 296 297
{
	u8 cpumode = PERF_RECORD_MISC_USER;
	unsigned int i;
298
	int err;
299

300
	callchain_cursor_reset(&evsel->hists.callchain_cursor);
301 302

	for (i = 0; i < chain->nr; i++) {
303
		u64 ip;
304 305
		struct addr_location al;

306 307 308 309 310
		if (callchain_param.order == ORDER_CALLEE)
			ip = chain->ips[i];
		else
			ip = chain->ips[chain->nr - i - 1];

311 312 313 314 315 316 317 318 319 320 321 322 323 324
		if (ip >= PERF_CONTEXT_MAX) {
			switch (ip) {
			case PERF_CONTEXT_HV:
				cpumode = PERF_RECORD_MISC_HYPERVISOR;	break;
			case PERF_CONTEXT_KERNEL:
				cpumode = PERF_RECORD_MISC_KERNEL;	break;
			case PERF_CONTEXT_USER:
				cpumode = PERF_RECORD_MISC_USER;	break;
			default:
				break;
			}
			continue;
		}

325
		al.filtered = false;
326
		thread__find_addr_location(thread, self, cpumode,
327
					   MAP__FUNCTION, ip, &al, NULL);
328 329 330 331
		if (al.sym != NULL) {
			if (sort__has_parent && !*parent &&
			    symbol__match_parent_regex(al.sym))
				*parent = al.sym;
332
			if (!symbol_conf.use_callchain)
333 334
				break;
		}
335

336
		err = callchain_cursor_append(&evsel->hists.callchain_cursor,
337 338 339
					      ip, al.map, al.sym);
		if (err)
			return err;
340 341
	}

342
	return 0;
343
}
344

345 346 347 348 349 350 351
static int process_event_synth_tracing_data_stub(union perf_event *event __used,
						 struct perf_session *session __used)
{
	dump_printf(": unhandled!\n");
	return 0;
}

352 353 354 355 356 357 358
static int process_event_synth_attr_stub(union perf_event *event __used,
					 struct perf_evlist **pevlist __used)
{
	dump_printf(": unhandled!\n");
	return 0;
}

359
static int process_event_sample_stub(struct perf_tool *tool __used,
360
				     union perf_event *event __used,
361 362
				     struct perf_sample *sample __used,
				     struct perf_evsel *evsel __used,
363
				     struct machine *machine __used)
364 365 366 367 368
{
	dump_printf(": unhandled!\n");
	return 0;
}

369
static int process_event_stub(struct perf_tool *tool __used,
370
			      union perf_event *event __used,
371
			      struct perf_sample *sample __used,
372
			      struct machine *machine __used)
373 374 375 376 377
{
	dump_printf(": unhandled!\n");
	return 0;
}

378
static int process_finished_round_stub(struct perf_tool *tool __used,
379
				       union perf_event *event __used,
380 381 382 383 384 385
				       struct perf_session *perf_session __used)
{
	dump_printf(": unhandled!\n");
	return 0;
}

386
static int process_event_type_stub(struct perf_tool *tool __used,
387
				   union perf_event *event __used)
388 389 390 391 392
{
	dump_printf(": unhandled!\n");
	return 0;
}

393
static int process_finished_round(struct perf_tool *tool,
394 395
				  union perf_event *event,
				  struct perf_session *session);
396

397
static void perf_tool__fill_defaults(struct perf_tool *tool)
398
{
399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427
	if (tool->sample == NULL)
		tool->sample = process_event_sample_stub;
	if (tool->mmap == NULL)
		tool->mmap = process_event_stub;
	if (tool->comm == NULL)
		tool->comm = process_event_stub;
	if (tool->fork == NULL)
		tool->fork = process_event_stub;
	if (tool->exit == NULL)
		tool->exit = process_event_stub;
	if (tool->lost == NULL)
		tool->lost = perf_event__process_lost;
	if (tool->read == NULL)
		tool->read = process_event_sample_stub;
	if (tool->throttle == NULL)
		tool->throttle = process_event_stub;
	if (tool->unthrottle == NULL)
		tool->unthrottle = process_event_stub;
	if (tool->attr == NULL)
		tool->attr = process_event_synth_attr_stub;
	if (tool->event_type == NULL)
		tool->event_type = process_event_type_stub;
	if (tool->tracing_data == NULL)
		tool->tracing_data = process_event_synth_tracing_data_stub;
	if (tool->build_id == NULL)
		tool->build_id = process_finished_round_stub;
	if (tool->finished_round == NULL) {
		if (tool->ordered_samples)
			tool->finished_round = process_finished_round;
428
		else
429
			tool->finished_round = process_finished_round_stub;
430
	}
431 432
}

433 434 435 436 437 438 439 440 441 442 443
void mem_bswap_64(void *src, int byte_size)
{
	u64 *m = src;

	while (byte_size > 0) {
		*m = bswap_64(*m);
		byte_size -= sizeof(u64);
		++m;
	}
}

444
static void perf_event__all64_swap(union perf_event *event)
445
{
446 447
	struct perf_event_header *hdr = &event->header;
	mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
448 449
}

450
static void perf_event__comm_swap(union perf_event *event)
451
{
452 453
	event->comm.pid = bswap_32(event->comm.pid);
	event->comm.tid = bswap_32(event->comm.tid);
454 455
}

456
static void perf_event__mmap_swap(union perf_event *event)
457
{
458 459 460 461 462
	event->mmap.pid	  = bswap_32(event->mmap.pid);
	event->mmap.tid	  = bswap_32(event->mmap.tid);
	event->mmap.start = bswap_64(event->mmap.start);
	event->mmap.len	  = bswap_64(event->mmap.len);
	event->mmap.pgoff = bswap_64(event->mmap.pgoff);
463 464
}

465
static void perf_event__task_swap(union perf_event *event)
466
{
467 468 469 470 471
	event->fork.pid	 = bswap_32(event->fork.pid);
	event->fork.tid	 = bswap_32(event->fork.tid);
	event->fork.ppid = bswap_32(event->fork.ppid);
	event->fork.ptid = bswap_32(event->fork.ptid);
	event->fork.time = bswap_64(event->fork.time);
472 473
}

474
static void perf_event__read_swap(union perf_event *event)
475
{
476 477 478 479 480 481
	event->read.pid		 = bswap_32(event->read.pid);
	event->read.tid		 = bswap_32(event->read.tid);
	event->read.value	 = bswap_64(event->read.value);
	event->read.time_enabled = bswap_64(event->read.time_enabled);
	event->read.time_running = bswap_64(event->read.time_running);
	event->read.id		 = bswap_64(event->read.id);
482 483
}

484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499
/* exported for swapping attributes in file header */
void perf_event__attr_swap(struct perf_event_attr *attr)
{
	attr->type		= bswap_32(attr->type);
	attr->size		= bswap_32(attr->size);
	attr->config		= bswap_64(attr->config);
	attr->sample_period	= bswap_64(attr->sample_period);
	attr->sample_type	= bswap_64(attr->sample_type);
	attr->read_format	= bswap_64(attr->read_format);
	attr->wakeup_events	= bswap_32(attr->wakeup_events);
	attr->bp_type		= bswap_32(attr->bp_type);
	attr->bp_addr		= bswap_64(attr->bp_addr);
	attr->bp_len		= bswap_64(attr->bp_len);
}

static void perf_event__hdr_attr_swap(union perf_event *event)
500 501 502
{
	size_t size;

503
	perf_event__attr_swap(&event->attr.attr);
504

505 506 507
	size = event->header.size;
	size -= (void *)&event->attr.id - (void *)event;
	mem_bswap_64(event->attr.id, size);
508 509
}

510
static void perf_event__event_type_swap(union perf_event *event)
511
{
512 513
	event->event_type.event_type.event_id =
		bswap_64(event->event_type.event_type.event_id);
514 515
}

516
static void perf_event__tracing_data_swap(union perf_event *event)
517
{
518
	event->tracing_data.size = bswap_32(event->tracing_data.size);
519 520
}

521
typedef void (*perf_event__swap_op)(union perf_event *event);
522

523 524 525 526 527 528 529 530
static perf_event__swap_op perf_event__swap_ops[] = {
	[PERF_RECORD_MMAP]		  = perf_event__mmap_swap,
	[PERF_RECORD_COMM]		  = perf_event__comm_swap,
	[PERF_RECORD_FORK]		  = perf_event__task_swap,
	[PERF_RECORD_EXIT]		  = perf_event__task_swap,
	[PERF_RECORD_LOST]		  = perf_event__all64_swap,
	[PERF_RECORD_READ]		  = perf_event__read_swap,
	[PERF_RECORD_SAMPLE]		  = perf_event__all64_swap,
531
	[PERF_RECORD_HEADER_ATTR]	  = perf_event__hdr_attr_swap,
532 533 534 535
	[PERF_RECORD_HEADER_EVENT_TYPE]	  = perf_event__event_type_swap,
	[PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
	[PERF_RECORD_HEADER_BUILD_ID]	  = NULL,
	[PERF_RECORD_HEADER_MAX]	  = NULL,
536 537
};

538 539
struct sample_queue {
	u64			timestamp;
540
	u64			file_offset;
541
	union perf_event	*event;
542 543 544
	struct list_head	list;
};

545 546 547 548
static void perf_session_free_sample_buffers(struct perf_session *session)
{
	struct ordered_samples *os = &session->ordered_samples;

549
	while (!list_empty(&os->to_free)) {
550 551
		struct sample_queue *sq;

552
		sq = list_entry(os->to_free.next, struct sample_queue, list);
553 554 555 556 557
		list_del(&sq->list);
		free(sq);
	}
}

558
static int perf_session_deliver_event(struct perf_session *session,
559
				      union perf_event *event,
560
				      struct perf_sample *sample,
561
				      struct perf_tool *tool,
562
				      u64 file_offset);
563

564
static void flush_sample_queue(struct perf_session *s,
565
			       struct perf_tool *tool)
566
{
567 568
	struct ordered_samples *os = &s->ordered_samples;
	struct list_head *head = &os->samples;
569
	struct sample_queue *tmp, *iter;
570
	struct perf_sample sample;
571 572
	u64 limit = os->next_flush;
	u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL;
573
	unsigned idx = 0, progress_next = os->nr_samples / 16;
574
	int ret;
575

576
	if (!tool->ordered_samples || !limit)
577 578 579 580
		return;

	list_for_each_entry_safe(iter, tmp, head, list) {
		if (iter->timestamp > limit)
581
			break;
582

583 584 585 586
		ret = perf_session__parse_sample(s, iter->event, &sample);
		if (ret)
			pr_err("Can't parse sample, err = %d\n", ret);
		else
587
			perf_session_deliver_event(s, iter->event, &sample, tool,
588
						   iter->file_offset);
589

590
		os->last_flush = iter->timestamp;
591
		list_del(&iter->list);
592
		list_add(&iter->list, &os->sample_cache);
593 594 595 596 597
		if (++idx >= progress_next) {
			progress_next += os->nr_samples / 16;
			ui_progress__update(idx, os->nr_samples,
					    "Processing time ordered events...");
		}
598
	}
599 600 601 602 603 604 605

	if (list_empty(head)) {
		os->last_sample = NULL;
	} else if (last_ts <= limit) {
		os->last_sample =
			list_entry(head->prev, struct sample_queue, list);
	}
606 607

	os->nr_samples = 0;
608 609
}

610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648
/*
 * When perf record finishes a pass on every buffers, it records this pseudo
 * event.
 * We record the max timestamp t found in the pass n.
 * Assuming these timestamps are monotonic across cpus, we know that if
 * a buffer still has events with timestamps below t, they will be all
 * available and then read in the pass n + 1.
 * Hence when we start to read the pass n + 2, we can safely flush every
 * events with timestamps below t.
 *
 *    ============ PASS n =================
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          1          |         2
 *          2          |         3
 *          -          |         4  <--- max recorded
 *
 *    ============ PASS n + 1 ==============
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          3          |         5
 *          4          |         6
 *          5          |         7 <---- max recorded
 *
 *      Flush every events below timestamp 4
 *
 *    ============ PASS n + 2 ==============
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          6          |         8
 *          7          |         9
 *          -          |         10
 *
 *      Flush every events below timestamp 7
 *      etc...
 */
649
static int process_finished_round(struct perf_tool *tool,
650 651
				  union perf_event *event __used,
				  struct perf_session *session)
652
{
653
	flush_sample_queue(session, tool);
654 655 656 657 658
	session->ordered_samples.next_flush = session->ordered_samples.max_timestamp;

	return 0;
}

659
/* The queue is ordered by time */
660
static void __queue_event(struct sample_queue *new, struct perf_session *s)
661
{
662 663 664 665
	struct ordered_samples *os = &s->ordered_samples;
	struct sample_queue *sample = os->last_sample;
	u64 timestamp = new->timestamp;
	struct list_head *p;
666

667
	++os->nr_samples;
668
	os->last_sample = new;
669

670 671 672
	if (!sample) {
		list_add(&new->list, &os->samples);
		os->max_timestamp = timestamp;
673 674 675 676
		return;
	}

	/*
677 678 679
	 * last_sample might point to some random place in the list as it's
	 * the last queued event. We expect that the new event is close to
	 * this.
680
	 */
681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702
	if (sample->timestamp <= timestamp) {
		while (sample->timestamp <= timestamp) {
			p = sample->list.next;
			if (p == &os->samples) {
				list_add_tail(&new->list, &os->samples);
				os->max_timestamp = timestamp;
				return;
			}
			sample = list_entry(p, struct sample_queue, list);
		}
		list_add_tail(&new->list, &sample->list);
	} else {
		while (sample->timestamp > timestamp) {
			p = sample->list.prev;
			if (p == &os->samples) {
				list_add(&new->list, &os->samples);
				return;
			}
			sample = list_entry(p, struct sample_queue, list);
		}
		list_add(&new->list, &sample->list);
	}
703 704
}

705 706
#define MAX_SAMPLE_BUFFER	(64 * 1024 / sizeof(struct sample_queue))

707
static int perf_session_queue_event(struct perf_session *s, union perf_event *event,
708
				    struct perf_sample *sample, u64 file_offset)
709
{
710 711
	struct ordered_samples *os = &s->ordered_samples;
	struct list_head *sc = &os->sample_cache;
712
	u64 timestamp = sample->time;
713 714
	struct sample_queue *new;

715
	if (!timestamp || timestamp == ~0ULL)
716 717
		return -ETIME;

718 719 720 721 722
	if (timestamp < s->ordered_samples.last_flush) {
		printf("Warning: Timestamp below last timeslice flush\n");
		return -EINVAL;
	}

723 724 725
	if (!list_empty(sc)) {
		new = list_entry(sc->next, struct sample_queue, list);
		list_del(&new->list);
726 727 728 729
	} else if (os->sample_buffer) {
		new = os->sample_buffer + os->sample_buffer_idx;
		if (++os->sample_buffer_idx == MAX_SAMPLE_BUFFER)
			os->sample_buffer = NULL;
730
	} else {
731 732
		os->sample_buffer = malloc(MAX_SAMPLE_BUFFER * sizeof(*new));
		if (!os->sample_buffer)
733
			return -ENOMEM;
734 735 736
		list_add(&os->sample_buffer->list, &os->to_free);
		os->sample_buffer_idx = 2;
		new = os->sample_buffer + 1;
737
	}
738 739

	new->timestamp = timestamp;
740
	new->file_offset = file_offset;
741
	new->event = event;
742

743
	__queue_event(new, s);
744 745 746

	return 0;
}
747

748
static void callchain__printf(struct perf_sample *sample)
749 750
{
	unsigned int i;
751

752
	printf("... chain: nr:%" PRIu64 "\n", sample->callchain->nr);
753 754

	for (i = 0; i < sample->callchain->nr; i++)
755 756
		printf("..... %2d: %016" PRIx64 "\n",
		       i, sample->callchain->ips[i]);
757 758
}

759 760 761 762 763 764 765 766 767 768 769 770
static void branch_stack__printf(struct perf_sample *sample)
{
	uint64_t i;

	printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr);

	for (i = 0; i < sample->branch_stack->nr; i++)
		printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 "\n",
			i, sample->branch_stack->entries[i].from,
			sample->branch_stack->entries[i].to);
}

771
static void perf_session__print_tstamp(struct perf_session *session,
772
				       union perf_event *event,
773
				       struct perf_sample *sample)
774 775 776 777 778 779 780 781 782 783 784
{
	if (event->header.type != PERF_RECORD_SAMPLE &&
	    !session->sample_id_all) {
		fputs("-1 -1 ", stdout);
		return;
	}

	if ((session->sample_type & PERF_SAMPLE_CPU))
		printf("%u ", sample->cpu);

	if (session->sample_type & PERF_SAMPLE_TIME)
785
		printf("%" PRIu64 " ", sample->time);
786 787
}

788
static void dump_event(struct perf_session *session, union perf_event *event,
789
		       u64 file_offset, struct perf_sample *sample)
790 791 792 793
{
	if (!dump_trace)
		return;

794 795
	printf("\n%#" PRIx64 " [%#x]: event: %d\n",
	       file_offset, event->header.size, event->header.type);
796 797 798 799 800 801

	trace_event(event);

	if (sample)
		perf_session__print_tstamp(session, event, sample);

802
	printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
803
	       event->header.size, perf_event__name(event->header.type));
804 805
}

806
static void dump_sample(struct perf_session *session, union perf_event *event,
807
			struct perf_sample *sample)
808
{
809 810 811
	if (!dump_trace)
		return;

812
	printf("(IP, %d): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
813
	       event->header.misc, sample->pid, sample->tid, sample->ip,
814
	       sample->period, sample->addr);
815 816

	if (session->sample_type & PERF_SAMPLE_CALLCHAIN)
817
		callchain__printf(sample);
818 819 820

	if (session->sample_type & PERF_SAMPLE_BRANCH_STACK)
		branch_stack__printf(sample);
821 822
}

823 824 825 826 827 828 829 830 831 832 833 834
static struct machine *
	perf_session__find_machine_for_cpumode(struct perf_session *session,
					       union perf_event *event)
{
	const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;

	if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest)
		return perf_session__find_machine(session, event->ip.pid);

	return perf_session__find_host_machine(session);
}

835
static int perf_session_deliver_event(struct perf_session *session,
836
				      union perf_event *event,
837
				      struct perf_sample *sample,
838
				      struct perf_tool *tool,
839
				      u64 file_offset)
840
{
841
	struct perf_evsel *evsel;
842
	struct machine *machine;
843

844 845
	dump_event(session, event, file_offset, sample);

846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863
	evsel = perf_evlist__id2evsel(session->evlist, sample->id);
	if (evsel != NULL && event->header.type != PERF_RECORD_SAMPLE) {
		/*
		 * XXX We're leaving PERF_RECORD_SAMPLE unnacounted here
		 * because the tools right now may apply filters, discarding
		 * some of the samples. For consistency, in the future we
		 * should have something like nr_filtered_samples and remove
		 * the sample->period from total_sample_period, etc, KISS for
		 * now tho.
		 *
		 * Also testing against NULL allows us to handle files without
		 * attr.sample_id_all and/or without PERF_SAMPLE_ID. In the
		 * future probably it'll be a good idea to restrict event
		 * processing via perf_session to files with both set.
		 */
		hists__inc_nr_events(&evsel->hists, event->header.type);
	}

864 865
	machine = perf_session__find_machine_for_cpumode(session, event);

866 867
	switch (event->header.type) {
	case PERF_RECORD_SAMPLE:
868
		dump_sample(session, event, sample);
869 870 871 872
		if (evsel == NULL) {
			++session->hists.stats.nr_unknown_id;
			return -1;
		}
873 874 875 876
		if (machine == NULL) {
			++session->hists.stats.nr_unprocessable_samples;
			return -1;
		}
877
		return tool->sample(tool, event, sample, evsel, machine);
878
	case PERF_RECORD_MMAP:
879
		return tool->mmap(tool, event, sample, machine);
880
	case PERF_RECORD_COMM:
881
		return tool->comm(tool, event, sample, machine);
882
	case PERF_RECORD_FORK:
883
		return tool->fork(tool, event, sample, machine);
884
	case PERF_RECORD_EXIT:
885
		return tool->exit(tool, event, sample, machine);
886
	case PERF_RECORD_LOST:
887
		if (tool->lost == perf_event__process_lost)
888
			session->hists.stats.total_lost += event->lost.lost;
889
		return tool->lost(tool, event, sample, machine);
890
	case PERF_RECORD_READ:
891
		return tool->read(tool, event, sample, evsel, machine);
892
	case PERF_RECORD_THROTTLE:
893
		return tool->throttle(tool, event, sample, machine);
894
	case PERF_RECORD_UNTHROTTLE:
895
		return tool->unthrottle(tool, event, sample, machine);
896 897 898 899 900 901
	default:
		++session->hists.stats.nr_unknown_events;
		return -1;
	}
}

902
static int perf_session__preprocess_sample(struct perf_session *session,
903
					   union perf_event *event, struct perf_sample *sample)
904 905 906 907 908 909 910 911 912 913 914 915 916 917
{
	if (event->header.type != PERF_RECORD_SAMPLE ||
	    !(session->sample_type & PERF_SAMPLE_CALLCHAIN))
		return 0;

	if (!ip_callchain__valid(sample->callchain, event)) {
		pr_debug("call-chain problem with event, skipping it.\n");
		++session->hists.stats.nr_invalid_chains;
		session->hists.stats.total_invalid_chains += sample->period;
		return -EINVAL;
	}
	return 0;
}

918
static int perf_session__process_user_event(struct perf_session *session, union perf_event *event,
919
					    struct perf_tool *tool, u64 file_offset)
920
{
921 922
	int err;

923
	dump_event(session, event, file_offset, NULL);
924

925
	/* These events are processed right away */
926
	switch (event->header.type) {
927
	case PERF_RECORD_HEADER_ATTR:
928
		err = tool->attr(event, &session->evlist);
929 930 931
		if (err == 0)
			perf_session__update_sample_type(session);
		return err;
932
	case PERF_RECORD_HEADER_EVENT_TYPE:
933
		return tool->event_type(tool, event);
934 935
	case PERF_RECORD_HEADER_TRACING_DATA:
		/* setup for reading amidst mmap */
936
		lseek(session->fd, file_offset, SEEK_SET);
937
		return tool->tracing_data(event, session);
938
	case PERF_RECORD_HEADER_BUILD_ID:
939
		return tool->build_id(tool, event, session);
940
	case PERF_RECORD_FINISHED_ROUND:
941
		return tool->finished_round(tool, event, session);
942
	default:
943
		return -EINVAL;
944
	}
945 946 947
}

static int perf_session__process_event(struct perf_session *session,
948
				       union perf_event *event,
949
				       struct perf_tool *tool,
950 951
				       u64 file_offset)
{
952
	struct perf_sample sample;
953 954
	int ret;

955 956 957
	if (session->header.needs_swap &&
	    perf_event__swap_ops[event->header.type])
		perf_event__swap_ops[event->header.type](event);
958 959 960 961 962 963 964

	if (event->header.type >= PERF_RECORD_HEADER_MAX)
		return -EINVAL;

	hists__inc_nr_events(&session->hists, event->header.type);

	if (event->header.type >= PERF_RECORD_USER_TYPE_START)
965
		return perf_session__process_user_event(session, event, tool, file_offset);
966

967 968 969
	/*
	 * For all kernel events we get the sample data
	 */
970 971 972
	ret = perf_session__parse_sample(session, event, &sample);
	if (ret)
		return ret;
973 974 975 976 977

	/* Preprocess sample records - precheck callchains */
	if (perf_session__preprocess_sample(session, event, &sample))
		return 0;

978
	if (tool->ordered_samples) {
979 980
		ret = perf_session_queue_event(session, event, &sample,
					       file_offset);
981 982 983 984
		if (ret != -ETIME)
			return ret;
	}

985
	return perf_session_deliver_event(session, event, &sample, tool,
986
					  file_offset);
987 988
}

989 990 991 992 993 994 995
void perf_event_header__bswap(struct perf_event_header *self)
{
	self->type = bswap_32(self->type);
	self->misc = bswap_16(self->misc);
	self->size = bswap_16(self->size);
}

996 997 998 999 1000
struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
{
	return machine__findnew_thread(&session->host_machine, pid);
}

1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012
static struct thread *perf_session__register_idle_thread(struct perf_session *self)
{
	struct thread *thread = perf_session__findnew(self, 0);

	if (thread == NULL || thread__set_comm(thread, "swapper")) {
		pr_err("problem inserting idle task.\n");
		thread = NULL;
	}

	return thread;
}

1013
static void perf_session__warn_about_errors(const struct perf_session *session,
1014
					    const struct perf_tool *tool)
1015
{
1016
	if (tool->lost == perf_event__process_lost &&
1017 1018 1019 1020 1021
	    session->hists.stats.nr_events[PERF_RECORD_LOST] != 0) {
		ui__warning("Processed %d events and lost %d chunks!\n\n"
			    "Check IO/CPU overload!\n\n",
			    session->hists.stats.nr_events[0],
			    session->hists.stats.nr_events[PERF_RECORD_LOST]);
1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032
	}

	if (session->hists.stats.nr_unknown_events != 0) {
		ui__warning("Found %u unknown events!\n\n"
			    "Is this an older tool processing a perf.data "
			    "file generated by a more recent tool?\n\n"
			    "If that is not the case, consider "
			    "reporting to linux-kernel@vger.kernel.org.\n\n",
			    session->hists.stats.nr_unknown_events);
	}

1033 1034 1035 1036 1037
	if (session->hists.stats.nr_unknown_id != 0) {
		ui__warning("%u samples with id not present in the header\n",
			    session->hists.stats.nr_unknown_id);
	}

1038 1039 1040 1041 1042 1043 1044
 	if (session->hists.stats.nr_invalid_chains != 0) {
 		ui__warning("Found invalid callchains!\n\n"
 			    "%u out of %u events were discarded for this reason.\n\n"
 			    "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
 			    session->hists.stats.nr_invalid_chains,
 			    session->hists.stats.nr_events[PERF_RECORD_SAMPLE]);
 	}
1045 1046 1047 1048 1049 1050

	if (session->hists.stats.nr_unprocessable_samples != 0) {
		ui__warning("%u unprocessable samples recorded.\n"
			    "Do you have a KVM guest running and not using 'perf kvm'?\n",
			    session->hists.stats.nr_unprocessable_samples);
	}
1051 1052
}

1053 1054 1055 1056
#define session_done()	(*(volatile int *)(&session_done))
volatile int session_done;

static int __perf_session__process_pipe_events(struct perf_session *self,
1057
					       struct perf_tool *tool)
1058
{
1059
	union perf_event event;
1060 1061 1062 1063 1064 1065
	uint32_t size;
	int skip = 0;
	u64 head;
	int err;
	void *p;

1066
	perf_tool__fill_defaults(tool);
1067 1068 1069

	head = 0;
more:
1070
	err = readn(self->fd, &event, sizeof(struct perf_event_header));
1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088
	if (err <= 0) {
		if (err == 0)
			goto done;

		pr_err("failed to read event header\n");
		goto out_err;
	}

	if (self->header.needs_swap)
		perf_event_header__bswap(&event.header);

	size = event.header.size;
	if (size == 0)
		size = 8;

	p = &event;
	p += sizeof(struct perf_event_header);

1089
	if (size - sizeof(struct perf_event_header)) {
1090
		err = readn(self->fd, p, size - sizeof(struct perf_event_header));
1091 1092 1093 1094 1095
		if (err <= 0) {
			if (err == 0) {
				pr_err("unexpected end of event stream\n");
				goto done;
			}
1096

1097 1098 1099
			pr_err("failed to read event data\n");
			goto out_err;
		}
1100 1101
	}

1102
	if ((skip = perf_session__process_event(self, &event, tool, head)) < 0) {
1103
		dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n",
1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124
			    head, event.header.size, event.header.type);
		/*
		 * assume we lost track of the stream, check alignment, and
		 * increment a single u64 in the hope to catch on again 'soon'.
		 */
		if (unlikely(head & 7))
			head &= ~7ULL;

		size = 8;
	}

	head += size;

	if (skip > 0)
		head += skip;

	if (!session_done())
		goto more;
done:
	err = 0;
out_err:
1125
	perf_session__warn_about_errors(self, tool);
1126
	perf_session_free_sample_buffers(self);
1127 1128 1129
	return err;
}

1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153
static union perf_event *
fetch_mmaped_event(struct perf_session *session,
		   u64 head, size_t mmap_size, char *buf)
{
	union perf_event *event;

	/*
	 * Ensure we have enough space remaining to read
	 * the size of the event in the headers.
	 */
	if (head + sizeof(event->header) > mmap_size)
		return NULL;

	event = (union perf_event *)(buf + head);

	if (session->header.needs_swap)
		perf_event_header__bswap(&event->header);

	if (head + event->header.size > mmap_size)
		return NULL;

	return event;
}

1154
int __perf_session__process_events(struct perf_session *session,
1155
				   u64 data_offset, u64 data_size,
1156
				   u64 file_size, struct perf_tool *tool)
1157
{
1158
	u64 head, page_offset, file_offset, file_pos, progress_next;
1159
	int err, mmap_prot, mmap_flags, map_idx = 0;
1160
	size_t	page_size, mmap_size;
1161
	char *buf, *mmaps[8];
1162
	union perf_event *event;
1163
	uint32_t size;
1164

1165
	perf_tool__fill_defaults(tool);
1166

1167
	page_size = sysconf(_SC_PAGESIZE);
1168

1169 1170 1171
	page_offset = page_size * (data_offset / page_size);
	file_offset = page_offset;
	head = data_offset - page_offset;
1172

1173 1174 1175
	if (data_offset + data_size < file_size)
		file_size = data_offset + data_size;

1176 1177 1178 1179 1180 1181
	progress_next = file_size / 16;

	mmap_size = session->mmap_window;
	if (mmap_size > file_size)
		mmap_size = file_size;

1182 1183
	memset(mmaps, 0, sizeof(mmaps));

1184 1185 1186
	mmap_prot  = PROT_READ;
	mmap_flags = MAP_SHARED;

1187
	if (session->header.needs_swap) {
1188 1189 1190
		mmap_prot  |= PROT_WRITE;
		mmap_flags = MAP_PRIVATE;
	}
1191
remap:
1192 1193
	buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, session->fd,
		   file_offset);
1194 1195 1196 1197 1198
	if (buf == MAP_FAILED) {
		pr_err("failed to mmap file\n");
		err = -errno;
		goto out_err;
	}
1199 1200
	mmaps[map_idx] = buf;
	map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
1201
	file_pos = file_offset + head;
1202 1203

more:
1204 1205
	event = fetch_mmaped_event(session, head, mmap_size, buf);
	if (!event) {
1206 1207 1208 1209
		if (mmaps[map_idx]) {
			munmap(mmaps[map_idx], mmap_size);
			mmaps[map_idx] = NULL;
		}
1210

1211 1212 1213
		page_offset = page_size * (head / page_size);
		file_offset += page_offset;
		head -= page_offset;
1214 1215 1216 1217 1218
		goto remap;
	}

	size = event->header.size;

1219
	if (size == 0 ||
1220
	    perf_session__process_event(session, event, tool, file_pos) < 0) {
1221
		dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n",
1222
			    file_offset + head, event->header.size,
1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234
			    event->header.type);
		/*
		 * assume we lost track of the stream, check alignment, and
		 * increment a single u64 in the hope to catch on again 'soon'.
		 */
		if (unlikely(head & 7))
			head &= ~7ULL;

		size = 8;
	}

	head += size;
1235
	file_pos += size;
1236

1237 1238
	if (file_pos >= progress_next) {
		progress_next += file_size / 16;
1239 1240
		ui_progress__update(file_pos, file_size,
				    "Processing events...");
1241 1242
	}

1243
	if (file_pos < file_size)
1244
		goto more;
1245

1246
	err = 0;
1247
	/* do the final flush for ordered samples */
1248
	session->ordered_samples.next_flush = ULLONG_MAX;
1249
	flush_sample_queue(session, tool);
1250
out_err:
1251
	perf_session__warn_about_errors(session, tool);
1252
	perf_session_free_sample_buffers(session);
1253 1254
	return err;
}
1255

1256
int perf_session__process_events(struct perf_session *self,
1257
				 struct perf_tool *tool)
1258 1259 1260 1261 1262 1263
{
	int err;

	if (perf_session__register_idle_thread(self) == NULL)
		return -ENOMEM;

1264 1265 1266 1267
	if (!self->fd_pipe)
		err = __perf_session__process_events(self,
						     self->header.data_offset,
						     self->header.data_size,
1268
						     self->size, tool);
1269
	else
1270
		err = __perf_session__process_pipe_events(self, tool);
1271

1272 1273 1274
	return err;
}

1275
bool perf_session__has_traces(struct perf_session *self, const char *msg)
1276 1277
{
	if (!(self->sample_type & PERF_SAMPLE_RAW)) {
1278 1279
		pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
		return false;
1280 1281
	}

1282
	return true;
1283
}
1284

1285 1286
int maps__set_kallsyms_ref_reloc_sym(struct map **maps,
				     const char *symbol_name, u64 addr)
1287 1288
{
	char *bracket;
1289
	enum map_type i;
1290 1291 1292 1293 1294
	struct ref_reloc_sym *ref;

	ref = zalloc(sizeof(struct ref_reloc_sym));
	if (ref == NULL)
		return -ENOMEM;
1295

1296 1297 1298
	ref->name = strdup(symbol_name);
	if (ref->name == NULL) {
		free(ref);
1299
		return -ENOMEM;
1300
	}
1301

1302
	bracket = strchr(ref->name, ']');
1303 1304 1305
	if (bracket)
		*bracket = '\0';

1306
	ref->addr = addr;
1307 1308

	for (i = 0; i < MAP__NR_TYPES; ++i) {
1309 1310
		struct kmap *kmap = map__kmap(maps[i]);
		kmap->ref_reloc_sym = ref;
1311 1312
	}

1313 1314
	return 0;
}
1315 1316 1317 1318 1319 1320 1321

size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp)
{
	return __dsos__fprintf(&self->host_machine.kernel_dsos, fp) +
	       __dsos__fprintf(&self->host_machine.user_dsos, fp) +
	       machines__fprintf_dsos(&self->machines, fp);
}
1322 1323 1324 1325 1326 1327 1328

size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp,
					  bool with_hits)
{
	size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits);
	return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits);
}
1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343

size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
{
	struct perf_evsel *pos;
	size_t ret = fprintf(fp, "Aggregated stats:\n");

	ret += hists__fprintf_nr_events(&session->hists, fp);

	list_for_each_entry(pos, &session->evlist->entries, node) {
		ret += fprintf(fp, "%s stats:\n", event_name(pos));
		ret += hists__fprintf_nr_events(&pos->hists, fp);
	}

	return ret;
}
1344

1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365
size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
{
	/*
	 * FIXME: Here we have to actually print all the machines in this
	 * session, not just the host...
	 */
	return machine__fprintf(&session->host_machine, fp);
}

void perf_session__remove_thread(struct perf_session *session,
				 struct thread *th)
{
	/*
	 * FIXME: This one makes no sense, we need to remove the thread from
	 * the machine it belongs to, perf_session can have many machines, so
	 * doing it always on ->host_machine is wrong.  Fix when auditing all
	 * the 'perf kvm' code.
	 */
	machine__remove_thread(&session->host_machine, th);
}

1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377
struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
					      unsigned int type)
{
	struct perf_evsel *pos;

	list_for_each_entry(pos, &session->evlist->entries, node) {
		if (pos->attr.type == type)
			return pos;
	}
	return NULL;
}

1378 1379
void perf_event__print_ip(union perf_event *event, struct perf_sample *sample,
			  struct machine *machine, struct perf_evsel *evsel,
1380
			  int print_sym, int print_dso, int print_symoffset)
1381 1382
{
	struct addr_location al;
1383
	struct callchain_cursor *cursor = &evsel->hists.callchain_cursor;
1384 1385
	struct callchain_cursor_node *node;

1386
	if (perf_event__preprocess_sample(event, machine, &al, sample,
1387 1388 1389 1390 1391 1392 1393 1394
					  NULL) < 0) {
		error("problem processing %d event, skipping it.\n",
			event->header.type);
		return;
	}

	if (symbol_conf.use_callchain && sample->callchain) {

1395
		if (machine__resolve_callchain(machine, evsel, al.thread,
1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407
						sample->callchain, NULL) != 0) {
			if (verbose)
				error("Failed to resolve callchain. Skipping\n");
			return;
		}
		callchain_cursor_commit(cursor);

		while (1) {
			node = callchain_cursor_current(cursor);
			if (!node)
				break;

1408 1409
			printf("\t%16" PRIx64, node->ip);
			if (print_sym) {
1410 1411
				printf(" ");
				symbol__fprintf_symname(node->sym, stdout);
1412 1413
			}
			if (print_dso) {
1414 1415 1416
				printf(" (");
				map__fprintf_dsoname(al.map, stdout);
				printf(")");
1417 1418
			}
			printf("\n");
1419 1420 1421 1422 1423

			callchain_cursor_advance(cursor);
		}

	} else {
1424
		printf("%16" PRIx64, sample->ip);
1425
		if (print_sym) {
1426
			printf(" ");
1427 1428 1429 1430 1431
			if (print_symoffset)
				symbol__fprintf_symname_offs(al.sym, &al,
							     stdout);
			else
				symbol__fprintf_symname(al.sym, stdout);
1432 1433 1434
		}

		if (print_dso) {
1435 1436 1437
			printf(" (");
			map__fprintf_dsoname(al.map, stdout);
			printf(")");
1438
		}
1439 1440
	}
}
1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462

int perf_session__cpu_bitmap(struct perf_session *session,
			     const char *cpu_list, unsigned long *cpu_bitmap)
{
	int i;
	struct cpu_map *map;

	for (i = 0; i < PERF_TYPE_MAX; ++i) {
		struct perf_evsel *evsel;

		evsel = perf_session__find_first_evtype(session, i);
		if (!evsel)
			continue;

		if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) {
			pr_err("File does not contain CPU events. "
			       "Remove -c option to proceed.\n");
			return -1;
		}
	}

	map = cpu_map__new(cpu_list);
1463 1464 1465 1466
	if (map == NULL) {
		pr_err("Invalid cpu_list\n");
		return -1;
	}
1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481

	for (i = 0; i < map->nr; i++) {
		int cpu = map->map[i];

		if (cpu >= MAX_NR_CPUS) {
			pr_err("Requested CPU %d too large. "
			       "Consider raising MAX_NR_CPUS\n", cpu);
			return -1;
		}

		set_bit(cpu, cpu_bitmap);
	}

	return 0;
}
1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500

void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
				bool full)
{
	struct stat st;
	int ret;

	if (session == NULL || fp == NULL)
		return;

	ret = fstat(session->fd, &st);
	if (ret == -1)
		return;

	fprintf(fp, "# ========\n");
	fprintf(fp, "# captured on: %s", ctime(&st.st_ctime));
	perf_header__fprintf_info(session, fp, full);
	fprintf(fp, "# ========\n#\n");
}