session.c 39.5 KB
Newer Older
1
#include <linux/kernel.h>
2
#include <traceevent/event-parse.h>
3

4
#include <byteswap.h>
5 6
#include <unistd.h>
#include <sys/types.h>
7
#include <sys/mman.h>
8

9 10
#include "evlist.h"
#include "evsel.h"
11
#include "session.h"
12
#include "tool.h"
13
#include "sort.h"
14
#include "util.h"
15
#include "cpumap.h"
16
#include "perf_regs.h"
17
#include "vdso.h"
18 19 20 21 22

static int perf_session__open(struct perf_session *self, bool force)
{
	struct stat input_stat;

23 24 25 26
	if (!strcmp(self->filename, "-")) {
		self->fd_pipe = true;
		self->fd = STDIN_FILENO;

27
		if (perf_session__read_header(self, self->fd) < 0)
28
			pr_err("incompatible file format (rerun with -v to learn more)");
29 30 31 32

		return 0;
	}

33
	self->fd = open(self->filename, O_RDONLY);
34
	if (self->fd < 0) {
35 36 37 38
		int err = errno;

		pr_err("failed to open %s: %s", self->filename, strerror(err));
		if (err == ENOENT && !strcmp(self->filename, "perf.data"))
39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
			pr_err("  (try 'perf record' first)");
		pr_err("\n");
		return -errno;
	}

	if (fstat(self->fd, &input_stat) < 0)
		goto out_close;

	if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
		pr_err("file %s not owned by current user or root\n",
		       self->filename);
		goto out_close;
	}

	if (!input_stat.st_size) {
		pr_info("zero-sized file (%s), nothing to do!\n",
			self->filename);
		goto out_close;
	}

59
	if (perf_session__read_header(self, self->fd) < 0) {
60
		pr_err("incompatible file format (rerun with -v to learn more)");
61 62 63
		goto out_close;
	}

64 65 66 67 68 69 70 71 72 73
	if (!perf_evlist__valid_sample_type(self->evlist)) {
		pr_err("non matching sample_type");
		goto out_close;
	}

	if (!perf_evlist__valid_sample_id_all(self->evlist)) {
		pr_err("non matching sample_id_all");
		goto out_close;
	}

74 75 76 77 78 79 80 81 82
	self->size = input_stat.st_size;
	return 0;

out_close:
	close(self->fd);
	self->fd = -1;
	return -1;
}

83
void perf_session__set_id_hdr_size(struct perf_session *session)
84
{
85 86 87
	u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);

	machines__set_id_hdr_size(&session->machines, id_hdr_size);
88 89
}

90 91
int perf_session__create_kernel_maps(struct perf_session *self)
{
92
	int ret = machine__create_kernel_maps(&self->machines.host);
93 94

	if (ret >= 0)
95
		ret = machines__create_guest_kernel_maps(&self->machines);
96 97 98
	return ret;
}

99 100
static void perf_session__destroy_kernel_maps(struct perf_session *self)
{
101
	machines__destroy_kernel_maps(&self->machines);
102 103
}

104 105
struct perf_session *perf_session__new(const char *filename, int mode,
				       bool force, bool repipe,
106
				       struct perf_tool *tool)
107
{
108 109 110 111 112 113 114 115 116 117 118 119 120
	struct perf_session *self;
	struct stat st;
	size_t len;

	if (!filename || !strlen(filename)) {
		if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode))
			filename = "-";
		else
			filename = "perf.data";
	}

	len = strlen(filename);
	self = zalloc(sizeof(*self) + len);
121 122 123 124 125

	if (self == NULL)
		goto out;

	memcpy(self->filename, filename, len);
T
Tom Zanussi 已提交
126
	self->repipe = repipe;
127
	INIT_LIST_HEAD(&self->ordered_samples.samples);
128
	INIT_LIST_HEAD(&self->ordered_samples.sample_cache);
129
	INIT_LIST_HEAD(&self->ordered_samples.to_free);
130
	machines__init(&self->machines);
131

132 133 134
	if (mode == O_RDONLY) {
		if (perf_session__open(self, force) < 0)
			goto out_delete;
135
		perf_session__set_id_hdr_size(self);
136 137 138
	} else if (mode == O_WRONLY) {
		/*
		 * In O_RDONLY mode this will be performed when reading the
139
		 * kernel MMAP event, in perf_event__process_mmap().
140 141 142 143
		 */
		if (perf_session__create_kernel_maps(self) < 0)
			goto out_delete;
	}
144

145
	if (tool && tool->ordering_requires_timestamps &&
146
	    tool->ordered_samples && !perf_evlist__sample_id_all(self->evlist)) {
147
		dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
148
		tool->ordered_samples = false;
149 150
	}

151 152
out:
	return self;
153 154 155
out_delete:
	perf_session__delete(self);
	return NULL;
156 157
}

158 159
static void perf_session__delete_dead_threads(struct perf_session *session)
{
160
	machine__delete_dead_threads(&session->machines.host);
161 162 163 164
}

static void perf_session__delete_threads(struct perf_session *session)
{
165
	machine__delete_threads(&session->machines.host);
166 167
}

168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183
static void perf_session_env__delete(struct perf_session_env *env)
{
	free(env->hostname);
	free(env->os_release);
	free(env->version);
	free(env->arch);
	free(env->cpu_desc);
	free(env->cpuid);

	free(env->cmdline);
	free(env->sibling_cores);
	free(env->sibling_threads);
	free(env->numa_nodes);
	free(env->pmu_mappings);
}

184 185
void perf_session__delete(struct perf_session *self)
{
186
	perf_session__destroy_kernel_maps(self);
187 188
	perf_session__delete_dead_threads(self);
	perf_session__delete_threads(self);
189
	perf_session_env__delete(&self->header.env);
190
	machines__exit(&self->machines);
191 192
	close(self->fd);
	free(self);
193
	vdso__exit();
194
}
195

196 197 198
static int process_event_synth_tracing_data_stub(struct perf_tool *tool
						 __maybe_unused,
						 union perf_event *event
199 200 201
						 __maybe_unused,
						 struct perf_session *session
						__maybe_unused)
202 203 204 205 206
{
	dump_printf(": unhandled!\n");
	return 0;
}

207 208
static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
					 union perf_event *event __maybe_unused,
209 210
					 struct perf_evlist **pevlist
					 __maybe_unused)
211 212 213 214 215
{
	dump_printf(": unhandled!\n");
	return 0;
}

216 217 218 219 220
static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
				     union perf_event *event __maybe_unused,
				     struct perf_sample *sample __maybe_unused,
				     struct perf_evsel *evsel __maybe_unused,
				     struct machine *machine __maybe_unused)
221 222 223 224 225
{
	dump_printf(": unhandled!\n");
	return 0;
}

226 227 228 229
static int process_event_stub(struct perf_tool *tool __maybe_unused,
			      union perf_event *event __maybe_unused,
			      struct perf_sample *sample __maybe_unused,
			      struct machine *machine __maybe_unused)
230 231 232 233 234
{
	dump_printf(": unhandled!\n");
	return 0;
}

235 236 237 238
static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
				       union perf_event *event __maybe_unused,
				       struct perf_session *perf_session
				       __maybe_unused)
239 240 241 242 243
{
	dump_printf(": unhandled!\n");
	return 0;
}

244 245
static int process_event_type_stub(struct perf_tool *tool __maybe_unused,
				   union perf_event *event __maybe_unused)
246 247 248 249 250
{
	dump_printf(": unhandled!\n");
	return 0;
}

251
static int process_finished_round(struct perf_tool *tool,
252 253
				  union perf_event *event,
				  struct perf_session *session);
254

255
static void perf_tool__fill_defaults(struct perf_tool *tool)
256
{
257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285
	if (tool->sample == NULL)
		tool->sample = process_event_sample_stub;
	if (tool->mmap == NULL)
		tool->mmap = process_event_stub;
	if (tool->comm == NULL)
		tool->comm = process_event_stub;
	if (tool->fork == NULL)
		tool->fork = process_event_stub;
	if (tool->exit == NULL)
		tool->exit = process_event_stub;
	if (tool->lost == NULL)
		tool->lost = perf_event__process_lost;
	if (tool->read == NULL)
		tool->read = process_event_sample_stub;
	if (tool->throttle == NULL)
		tool->throttle = process_event_stub;
	if (tool->unthrottle == NULL)
		tool->unthrottle = process_event_stub;
	if (tool->attr == NULL)
		tool->attr = process_event_synth_attr_stub;
	if (tool->event_type == NULL)
		tool->event_type = process_event_type_stub;
	if (tool->tracing_data == NULL)
		tool->tracing_data = process_event_synth_tracing_data_stub;
	if (tool->build_id == NULL)
		tool->build_id = process_finished_round_stub;
	if (tool->finished_round == NULL) {
		if (tool->ordered_samples)
			tool->finished_round = process_finished_round;
286
		else
287
			tool->finished_round = process_finished_round_stub;
288
	}
289
}
290 291 292 293 294 295 296 297 298 299
 
void mem_bswap_32(void *src, int byte_size)
{
	u32 *m = src;
	while (byte_size > 0) {
		*m = bswap_32(*m);
		byte_size -= sizeof(u32);
		++m;
	}
}
300

301 302 303 304 305 306 307 308 309 310 311
void mem_bswap_64(void *src, int byte_size)
{
	u64 *m = src;

	while (byte_size > 0) {
		*m = bswap_64(*m);
		byte_size -= sizeof(u64);
		++m;
	}
}

312 313 314 315 316 317 318 319 320 321
static void swap_sample_id_all(union perf_event *event, void *data)
{
	void *end = (void *) event + event->header.size;
	int size = end - data;

	BUG_ON(size % sizeof(u64));
	mem_bswap_64(data, size);
}

static void perf_event__all64_swap(union perf_event *event,
322
				   bool sample_id_all __maybe_unused)
323
{
324 325
	struct perf_event_header *hdr = &event->header;
	mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
326 327
}

328
static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
329
{
330 331
	event->comm.pid = bswap_32(event->comm.pid);
	event->comm.tid = bswap_32(event->comm.tid);
332 333 334 335

	if (sample_id_all) {
		void *data = &event->comm.comm;

336
		data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
337 338
		swap_sample_id_all(event, data);
	}
339 340
}

341 342
static void perf_event__mmap_swap(union perf_event *event,
				  bool sample_id_all)
343
{
344 345 346 347 348
	event->mmap.pid	  = bswap_32(event->mmap.pid);
	event->mmap.tid	  = bswap_32(event->mmap.tid);
	event->mmap.start = bswap_64(event->mmap.start);
	event->mmap.len	  = bswap_64(event->mmap.len);
	event->mmap.pgoff = bswap_64(event->mmap.pgoff);
349 350 351 352

	if (sample_id_all) {
		void *data = &event->mmap.filename;

353
		data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
354 355
		swap_sample_id_all(event, data);
	}
356 357
}

358
static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
359
{
360 361 362 363 364
	event->fork.pid	 = bswap_32(event->fork.pid);
	event->fork.tid	 = bswap_32(event->fork.tid);
	event->fork.ppid = bswap_32(event->fork.ppid);
	event->fork.ptid = bswap_32(event->fork.ptid);
	event->fork.time = bswap_64(event->fork.time);
365 366 367

	if (sample_id_all)
		swap_sample_id_all(event, &event->fork + 1);
368 369
}

370
static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
371
{
372 373 374 375 376 377
	event->read.pid		 = bswap_32(event->read.pid);
	event->read.tid		 = bswap_32(event->read.tid);
	event->read.value	 = bswap_64(event->read.value);
	event->read.time_enabled = bswap_64(event->read.time_enabled);
	event->read.time_running = bswap_64(event->read.time_running);
	event->read.id		 = bswap_64(event->read.id);
378 379 380

	if (sample_id_all)
		swap_sample_id_all(event, &event->read + 1);
381 382
}

383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414
static u8 revbyte(u8 b)
{
	int rev = (b >> 4) | ((b & 0xf) << 4);
	rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
	rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
	return (u8) rev;
}

/*
 * XXX this is hack in attempt to carry flags bitfield
 * throught endian village. ABI says:
 *
 * Bit-fields are allocated from right to left (least to most significant)
 * on little-endian implementations and from left to right (most to least
 * significant) on big-endian implementations.
 *
 * The above seems to be byte specific, so we need to reverse each
 * byte of the bitfield. 'Internet' also says this might be implementation
 * specific and we probably need proper fix and carry perf_event_attr
 * bitfield flags in separate data file FEAT_ section. Thought this seems
 * to work for now.
 */
static void swap_bitfield(u8 *p, unsigned len)
{
	unsigned i;

	for (i = 0; i < len; i++) {
		*p = revbyte(*p);
		p++;
	}
}

415 416 417 418 419 420 421 422 423 424 425 426 427
/* exported for swapping attributes in file header */
void perf_event__attr_swap(struct perf_event_attr *attr)
{
	attr->type		= bswap_32(attr->type);
	attr->size		= bswap_32(attr->size);
	attr->config		= bswap_64(attr->config);
	attr->sample_period	= bswap_64(attr->sample_period);
	attr->sample_type	= bswap_64(attr->sample_type);
	attr->read_format	= bswap_64(attr->read_format);
	attr->wakeup_events	= bswap_32(attr->wakeup_events);
	attr->bp_type		= bswap_32(attr->bp_type);
	attr->bp_addr		= bswap_64(attr->bp_addr);
	attr->bp_len		= bswap_64(attr->bp_len);
428 429

	swap_bitfield((u8 *) (&attr->read_format + 1), sizeof(u64));
430 431
}

432
static void perf_event__hdr_attr_swap(union perf_event *event,
433
				      bool sample_id_all __maybe_unused)
434 435 436
{
	size_t size;

437
	perf_event__attr_swap(&event->attr.attr);
438

439 440 441
	size = event->header.size;
	size -= (void *)&event->attr.id - (void *)event;
	mem_bswap_64(event->attr.id, size);
442 443
}

444
static void perf_event__event_type_swap(union perf_event *event,
445
					bool sample_id_all __maybe_unused)
446
{
447 448
	event->event_type.event_type.event_id =
		bswap_64(event->event_type.event_type.event_id);
449 450
}

451
static void perf_event__tracing_data_swap(union perf_event *event,
452
					  bool sample_id_all __maybe_unused)
453
{
454
	event->tracing_data.size = bswap_32(event->tracing_data.size);
455 456
}

457 458
typedef void (*perf_event__swap_op)(union perf_event *event,
				    bool sample_id_all);
459

460 461 462 463 464 465 466 467
static perf_event__swap_op perf_event__swap_ops[] = {
	[PERF_RECORD_MMAP]		  = perf_event__mmap_swap,
	[PERF_RECORD_COMM]		  = perf_event__comm_swap,
	[PERF_RECORD_FORK]		  = perf_event__task_swap,
	[PERF_RECORD_EXIT]		  = perf_event__task_swap,
	[PERF_RECORD_LOST]		  = perf_event__all64_swap,
	[PERF_RECORD_READ]		  = perf_event__read_swap,
	[PERF_RECORD_SAMPLE]		  = perf_event__all64_swap,
468
	[PERF_RECORD_HEADER_ATTR]	  = perf_event__hdr_attr_swap,
469 470 471 472
	[PERF_RECORD_HEADER_EVENT_TYPE]	  = perf_event__event_type_swap,
	[PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
	[PERF_RECORD_HEADER_BUILD_ID]	  = NULL,
	[PERF_RECORD_HEADER_MAX]	  = NULL,
473 474
};

475 476
struct sample_queue {
	u64			timestamp;
477
	u64			file_offset;
478
	union perf_event	*event;
479 480 481
	struct list_head	list;
};

482 483 484 485
static void perf_session_free_sample_buffers(struct perf_session *session)
{
	struct ordered_samples *os = &session->ordered_samples;

486
	while (!list_empty(&os->to_free)) {
487 488
		struct sample_queue *sq;

489
		sq = list_entry(os->to_free.next, struct sample_queue, list);
490 491 492 493 494
		list_del(&sq->list);
		free(sq);
	}
}

495
static int perf_session_deliver_event(struct perf_session *session,
496
				      union perf_event *event,
497
				      struct perf_sample *sample,
498
				      struct perf_tool *tool,
499
				      u64 file_offset);
500

501
static int flush_sample_queue(struct perf_session *s,
502
			       struct perf_tool *tool)
503
{
504 505
	struct ordered_samples *os = &s->ordered_samples;
	struct list_head *head = &os->samples;
506
	struct sample_queue *tmp, *iter;
507
	struct perf_sample sample;
508 509
	u64 limit = os->next_flush;
	u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL;
510
	unsigned idx = 0, progress_next = os->nr_samples / 16;
511
	int ret;
512

513
	if (!tool->ordered_samples || !limit)
514
		return 0;
515 516 517

	list_for_each_entry_safe(iter, tmp, head, list) {
		if (iter->timestamp > limit)
518
			break;
519

520
		ret = perf_evlist__parse_sample(s->evlist, iter->event, &sample);
521 522
		if (ret)
			pr_err("Can't parse sample, err = %d\n", ret);
523 524 525 526 527 528
		else {
			ret = perf_session_deliver_event(s, iter->event, &sample, tool,
							 iter->file_offset);
			if (ret)
				return ret;
		}
529

530
		os->last_flush = iter->timestamp;
531
		list_del(&iter->list);
532
		list_add(&iter->list, &os->sample_cache);
533 534 535 536 537
		if (++idx >= progress_next) {
			progress_next += os->nr_samples / 16;
			ui_progress__update(idx, os->nr_samples,
					    "Processing time ordered events...");
		}
538
	}
539 540 541 542 543 544 545

	if (list_empty(head)) {
		os->last_sample = NULL;
	} else if (last_ts <= limit) {
		os->last_sample =
			list_entry(head->prev, struct sample_queue, list);
	}
546 547

	os->nr_samples = 0;
548 549

	return 0;
550 551
}

552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590
/*
 * When perf record finishes a pass on every buffers, it records this pseudo
 * event.
 * We record the max timestamp t found in the pass n.
 * Assuming these timestamps are monotonic across cpus, we know that if
 * a buffer still has events with timestamps below t, they will be all
 * available and then read in the pass n + 1.
 * Hence when we start to read the pass n + 2, we can safely flush every
 * events with timestamps below t.
 *
 *    ============ PASS n =================
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          1          |         2
 *          2          |         3
 *          -          |         4  <--- max recorded
 *
 *    ============ PASS n + 1 ==============
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          3          |         5
 *          4          |         6
 *          5          |         7 <---- max recorded
 *
 *      Flush every events below timestamp 4
 *
 *    ============ PASS n + 2 ==============
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          6          |         8
 *          7          |         9
 *          -          |         10
 *
 *      Flush every events below timestamp 7
 *      etc...
 */
591
static int process_finished_round(struct perf_tool *tool,
592
				  union perf_event *event __maybe_unused,
593
				  struct perf_session *session)
594
{
595 596 597
	int ret = flush_sample_queue(session, tool);
	if (!ret)
		session->ordered_samples.next_flush = session->ordered_samples.max_timestamp;
598

599
	return ret;
600 601
}

602
/* The queue is ordered by time */
603
static void __queue_event(struct sample_queue *new, struct perf_session *s)
604
{
605 606 607 608
	struct ordered_samples *os = &s->ordered_samples;
	struct sample_queue *sample = os->last_sample;
	u64 timestamp = new->timestamp;
	struct list_head *p;
609

610
	++os->nr_samples;
611
	os->last_sample = new;
612

613 614 615
	if (!sample) {
		list_add(&new->list, &os->samples);
		os->max_timestamp = timestamp;
616 617 618 619
		return;
	}

	/*
620 621 622
	 * last_sample might point to some random place in the list as it's
	 * the last queued event. We expect that the new event is close to
	 * this.
623
	 */
624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645
	if (sample->timestamp <= timestamp) {
		while (sample->timestamp <= timestamp) {
			p = sample->list.next;
			if (p == &os->samples) {
				list_add_tail(&new->list, &os->samples);
				os->max_timestamp = timestamp;
				return;
			}
			sample = list_entry(p, struct sample_queue, list);
		}
		list_add_tail(&new->list, &sample->list);
	} else {
		while (sample->timestamp > timestamp) {
			p = sample->list.prev;
			if (p == &os->samples) {
				list_add(&new->list, &os->samples);
				return;
			}
			sample = list_entry(p, struct sample_queue, list);
		}
		list_add(&new->list, &sample->list);
	}
646 647
}

648 649
#define MAX_SAMPLE_BUFFER	(64 * 1024 / sizeof(struct sample_queue))

650
static int perf_session_queue_event(struct perf_session *s, union perf_event *event,
651
				    struct perf_sample *sample, u64 file_offset)
652
{
653 654
	struct ordered_samples *os = &s->ordered_samples;
	struct list_head *sc = &os->sample_cache;
655
	u64 timestamp = sample->time;
656 657
	struct sample_queue *new;

658
	if (!timestamp || timestamp == ~0ULL)
659 660
		return -ETIME;

661 662 663 664 665
	if (timestamp < s->ordered_samples.last_flush) {
		printf("Warning: Timestamp below last timeslice flush\n");
		return -EINVAL;
	}

666 667 668
	if (!list_empty(sc)) {
		new = list_entry(sc->next, struct sample_queue, list);
		list_del(&new->list);
669 670 671 672
	} else if (os->sample_buffer) {
		new = os->sample_buffer + os->sample_buffer_idx;
		if (++os->sample_buffer_idx == MAX_SAMPLE_BUFFER)
			os->sample_buffer = NULL;
673
	} else {
674 675
		os->sample_buffer = malloc(MAX_SAMPLE_BUFFER * sizeof(*new));
		if (!os->sample_buffer)
676
			return -ENOMEM;
677 678 679
		list_add(&os->sample_buffer->list, &os->to_free);
		os->sample_buffer_idx = 2;
		new = os->sample_buffer + 1;
680
	}
681 682

	new->timestamp = timestamp;
683
	new->file_offset = file_offset;
684
	new->event = event;
685

686
	__queue_event(new, s);
687 688 689

	return 0;
}
690

691
static void callchain__printf(struct perf_sample *sample)
692 693
{
	unsigned int i;
694

695
	printf("... chain: nr:%" PRIu64 "\n", sample->callchain->nr);
696 697

	for (i = 0; i < sample->callchain->nr; i++)
698 699
		printf("..... %2d: %016" PRIx64 "\n",
		       i, sample->callchain->ips[i]);
700 701
}

702 703 704 705 706 707 708 709 710 711 712 713
static void branch_stack__printf(struct perf_sample *sample)
{
	uint64_t i;

	printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr);

	for (i = 0; i < sample->branch_stack->nr; i++)
		printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 "\n",
			i, sample->branch_stack->entries[i].from,
			sample->branch_stack->entries[i].to);
}

714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741
static void regs_dump__printf(u64 mask, u64 *regs)
{
	unsigned rid, i = 0;

	for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
		u64 val = regs[i++];

		printf(".... %-5s 0x%" PRIx64 "\n",
		       perf_reg_name(rid), val);
	}
}

static void regs_user__printf(struct perf_sample *sample, u64 mask)
{
	struct regs_dump *user_regs = &sample->user_regs;

	if (user_regs->regs) {
		printf("... user regs: mask 0x%" PRIx64 "\n", mask);
		regs_dump__printf(mask, user_regs->regs);
	}
}

static void stack_user__printf(struct stack_dump *dump)
{
	printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
	       dump->size, dump->offset);
}

742
static void perf_session__print_tstamp(struct perf_session *session,
743
				       union perf_event *event,
744
				       struct perf_sample *sample)
745
{
746 747
	u64 sample_type = perf_evlist__sample_type(session->evlist);

748
	if (event->header.type != PERF_RECORD_SAMPLE &&
749
	    !perf_evlist__sample_id_all(session->evlist)) {
750 751 752 753
		fputs("-1 -1 ", stdout);
		return;
	}

754
	if ((sample_type & PERF_SAMPLE_CPU))
755 756
		printf("%u ", sample->cpu);

757
	if (sample_type & PERF_SAMPLE_TIME)
758
		printf("%" PRIu64 " ", sample->time);
759 760
}

761
static void dump_event(struct perf_session *session, union perf_event *event,
762
		       u64 file_offset, struct perf_sample *sample)
763 764 765 766
{
	if (!dump_trace)
		return;

767 768
	printf("\n%#" PRIx64 " [%#x]: event: %d\n",
	       file_offset, event->header.size, event->header.type);
769 770 771 772 773 774

	trace_event(event);

	if (sample)
		perf_session__print_tstamp(session, event, sample);

775
	printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
776
	       event->header.size, perf_event__name(event->header.type));
777 778
}

779
static void dump_sample(struct perf_evsel *evsel, union perf_event *event,
780
			struct perf_sample *sample)
781
{
782 783
	u64 sample_type;

784 785 786
	if (!dump_trace)
		return;

787
	printf("(IP, %d): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
788
	       event->header.misc, sample->pid, sample->tid, sample->ip,
789
	       sample->period, sample->addr);
790

791
	sample_type = evsel->attr.sample_type;
792 793

	if (sample_type & PERF_SAMPLE_CALLCHAIN)
794
		callchain__printf(sample);
795

796
	if (sample_type & PERF_SAMPLE_BRANCH_STACK)
797
		branch_stack__printf(sample);
798 799 800 801 802 803

	if (sample_type & PERF_SAMPLE_REGS_USER)
		regs_user__printf(sample, evsel->attr.sample_regs_user);

	if (sample_type & PERF_SAMPLE_STACK_USER)
		stack_user__printf(&sample->user_stack);
804 805 806

	if (sample_type & PERF_SAMPLE_WEIGHT)
		printf("... weight: %" PRIu64 "\n", sample->weight);
807 808 809

	if (sample_type & PERF_SAMPLE_DATA_SRC)
		printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
810 811
}

812 813 814 815 816 817
static struct machine *
	perf_session__find_machine_for_cpumode(struct perf_session *session,
					       union perf_event *event)
{
	const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;

818 819 820
	if (perf_guest &&
	    ((cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
	     (cpumode == PERF_RECORD_MISC_GUEST_USER))) {
821 822 823 824 825 826 827
		u32 pid;

		if (event->header.type == PERF_RECORD_MMAP)
			pid = event->mmap.pid;
		else
			pid = event->ip.pid;

828
		return perf_session__findnew_machine(session, pid);
829
	}
830

831
	return &session->machines.host;
832 833
}

834
static int perf_session_deliver_event(struct perf_session *session,
835
				      union perf_event *event,
836
				      struct perf_sample *sample,
837
				      struct perf_tool *tool,
838
				      u64 file_offset)
839
{
840
	struct perf_evsel *evsel;
841
	struct machine *machine;
842

843 844
	dump_event(session, event, file_offset, sample);

845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862
	evsel = perf_evlist__id2evsel(session->evlist, sample->id);
	if (evsel != NULL && event->header.type != PERF_RECORD_SAMPLE) {
		/*
		 * XXX We're leaving PERF_RECORD_SAMPLE unnacounted here
		 * because the tools right now may apply filters, discarding
		 * some of the samples. For consistency, in the future we
		 * should have something like nr_filtered_samples and remove
		 * the sample->period from total_sample_period, etc, KISS for
		 * now tho.
		 *
		 * Also testing against NULL allows us to handle files without
		 * attr.sample_id_all and/or without PERF_SAMPLE_ID. In the
		 * future probably it'll be a good idea to restrict event
		 * processing via perf_session to files with both set.
		 */
		hists__inc_nr_events(&evsel->hists, event->header.type);
	}

863 864
	machine = perf_session__find_machine_for_cpumode(session, event);

865 866
	switch (event->header.type) {
	case PERF_RECORD_SAMPLE:
867
		dump_sample(evsel, event, sample);
868
		if (evsel == NULL) {
869
			++session->stats.nr_unknown_id;
870
			return 0;
871
		}
872
		if (machine == NULL) {
873
			++session->stats.nr_unprocessable_samples;
874
			return 0;
875
		}
876
		return tool->sample(tool, event, sample, evsel, machine);
877
	case PERF_RECORD_MMAP:
878
		return tool->mmap(tool, event, sample, machine);
879
	case PERF_RECORD_COMM:
880
		return tool->comm(tool, event, sample, machine);
881
	case PERF_RECORD_FORK:
882
		return tool->fork(tool, event, sample, machine);
883
	case PERF_RECORD_EXIT:
884
		return tool->exit(tool, event, sample, machine);
885
	case PERF_RECORD_LOST:
886
		if (tool->lost == perf_event__process_lost)
887
			session->stats.total_lost += event->lost.lost;
888
		return tool->lost(tool, event, sample, machine);
889
	case PERF_RECORD_READ:
890
		return tool->read(tool, event, sample, evsel, machine);
891
	case PERF_RECORD_THROTTLE:
892
		return tool->throttle(tool, event, sample, machine);
893
	case PERF_RECORD_UNTHROTTLE:
894
		return tool->unthrottle(tool, event, sample, machine);
895
	default:
896
		++session->stats.nr_unknown_events;
897 898 899 900
		return -1;
	}
}

901
static int perf_session__preprocess_sample(struct perf_session *session,
902
					   union perf_event *event, struct perf_sample *sample)
903 904
{
	if (event->header.type != PERF_RECORD_SAMPLE ||
905
	    !(perf_evlist__sample_type(session->evlist) & PERF_SAMPLE_CALLCHAIN))
906 907 908 909
		return 0;

	if (!ip_callchain__valid(sample->callchain, event)) {
		pr_debug("call-chain problem with event, skipping it.\n");
910 911
		++session->stats.nr_invalid_chains;
		session->stats.total_invalid_chains += sample->period;
912 913 914 915 916
		return -EINVAL;
	}
	return 0;
}

917
static int perf_session__process_user_event(struct perf_session *session, union perf_event *event,
918
					    struct perf_tool *tool, u64 file_offset)
919
{
920 921
	int err;

922
	dump_event(session, event, file_offset, NULL);
923

924
	/* These events are processed right away */
925
	switch (event->header.type) {
926
	case PERF_RECORD_HEADER_ATTR:
927
		err = tool->attr(tool, event, &session->evlist);
928
		if (err == 0)
929
			perf_session__set_id_hdr_size(session);
930
		return err;
931
	case PERF_RECORD_HEADER_EVENT_TYPE:
932
		return tool->event_type(tool, event);
933 934
	case PERF_RECORD_HEADER_TRACING_DATA:
		/* setup for reading amidst mmap */
935
		lseek(session->fd, file_offset, SEEK_SET);
936
		return tool->tracing_data(tool, event, session);
937
	case PERF_RECORD_HEADER_BUILD_ID:
938
		return tool->build_id(tool, event, session);
939
	case PERF_RECORD_FINISHED_ROUND:
940
		return tool->finished_round(tool, event, session);
941
	default:
942
		return -EINVAL;
943
	}
944 945
}

946 947 948 949 950 951 952 953 954
static void event_swap(union perf_event *event, bool sample_id_all)
{
	perf_event__swap_op swap;

	swap = perf_event__swap_ops[event->header.type];
	if (swap)
		swap(event, sample_id_all);
}

955
static int perf_session__process_event(struct perf_session *session,
956
				       union perf_event *event,
957
				       struct perf_tool *tool,
958 959
				       u64 file_offset)
{
960
	struct perf_sample sample;
961 962
	int ret;

963
	if (session->header.needs_swap)
964
		event_swap(event, perf_evlist__sample_id_all(session->evlist));
965 966 967 968

	if (event->header.type >= PERF_RECORD_HEADER_MAX)
		return -EINVAL;

969
	events_stats__inc(&session->stats, event->header.type);
970 971

	if (event->header.type >= PERF_RECORD_USER_TYPE_START)
972
		return perf_session__process_user_event(session, event, tool, file_offset);
973

974 975 976
	/*
	 * For all kernel events we get the sample data
	 */
977
	ret = perf_evlist__parse_sample(session->evlist, event, &sample);
978 979
	if (ret)
		return ret;
980 981 982 983 984

	/* Preprocess sample records - precheck callchains */
	if (perf_session__preprocess_sample(session, event, &sample))
		return 0;

985
	if (tool->ordered_samples) {
986 987
		ret = perf_session_queue_event(session, event, &sample,
					       file_offset);
988 989 990 991
		if (ret != -ETIME)
			return ret;
	}

992
	return perf_session_deliver_event(session, event, &sample, tool,
993
					  file_offset);
994 995
}

996 997 998 999 1000 1001 1002
void perf_event_header__bswap(struct perf_event_header *self)
{
	self->type = bswap_32(self->type);
	self->misc = bswap_16(self->misc);
	self->size = bswap_16(self->size);
}

1003 1004
struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
{
1005
	return machine__findnew_thread(&session->machines.host, pid);
1006 1007
}

1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019
static struct thread *perf_session__register_idle_thread(struct perf_session *self)
{
	struct thread *thread = perf_session__findnew(self, 0);

	if (thread == NULL || thread__set_comm(thread, "swapper")) {
		pr_err("problem inserting idle task.\n");
		thread = NULL;
	}

	return thread;
}

1020
static void perf_session__warn_about_errors(const struct perf_session *session,
1021
					    const struct perf_tool *tool)
1022
{
1023
	if (tool->lost == perf_event__process_lost &&
1024
	    session->stats.nr_events[PERF_RECORD_LOST] != 0) {
1025 1026
		ui__warning("Processed %d events and lost %d chunks!\n\n"
			    "Check IO/CPU overload!\n\n",
1027 1028
			    session->stats.nr_events[0],
			    session->stats.nr_events[PERF_RECORD_LOST]);
1029 1030
	}

1031
	if (session->stats.nr_unknown_events != 0) {
1032 1033 1034 1035 1036
		ui__warning("Found %u unknown events!\n\n"
			    "Is this an older tool processing a perf.data "
			    "file generated by a more recent tool?\n\n"
			    "If that is not the case, consider "
			    "reporting to linux-kernel@vger.kernel.org.\n\n",
1037
			    session->stats.nr_unknown_events);
1038 1039
	}

1040
	if (session->stats.nr_unknown_id != 0) {
1041
		ui__warning("%u samples with id not present in the header\n",
1042
			    session->stats.nr_unknown_id);
1043 1044
	}

1045
 	if (session->stats.nr_invalid_chains != 0) {
1046 1047 1048
 		ui__warning("Found invalid callchains!\n\n"
 			    "%u out of %u events were discarded for this reason.\n\n"
 			    "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1049 1050
 			    session->stats.nr_invalid_chains,
 			    session->stats.nr_events[PERF_RECORD_SAMPLE]);
1051
 	}
1052

1053
	if (session->stats.nr_unprocessable_samples != 0) {
1054 1055
		ui__warning("%u unprocessable samples recorded.\n"
			    "Do you have a KVM guest running and not using 'perf kvm'?\n",
1056
			    session->stats.nr_unprocessable_samples);
1057
	}
1058 1059
}

1060 1061 1062 1063
#define session_done()	(*(volatile int *)(&session_done))
volatile int session_done;

static int __perf_session__process_pipe_events(struct perf_session *self,
1064
					       struct perf_tool *tool)
1065
{
1066 1067 1068
	union perf_event *event;
	uint32_t size, cur_size = 0;
	void *buf = NULL;
1069 1070 1071 1072 1073
	int skip = 0;
	u64 head;
	int err;
	void *p;

1074
	perf_tool__fill_defaults(tool);
1075 1076

	head = 0;
1077 1078 1079 1080 1081
	cur_size = sizeof(union perf_event);

	buf = malloc(cur_size);
	if (!buf)
		return -errno;
1082
more:
1083 1084
	event = buf;
	err = readn(self->fd, event, sizeof(struct perf_event_header));
1085 1086 1087 1088 1089 1090 1091 1092 1093
	if (err <= 0) {
		if (err == 0)
			goto done;

		pr_err("failed to read event header\n");
		goto out_err;
	}

	if (self->header.needs_swap)
1094
		perf_event_header__bswap(&event->header);
1095

1096
	size = event->header.size;
1097 1098 1099
	if (size == 0)
		size = 8;

1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110
	if (size > cur_size) {
		void *new = realloc(buf, size);
		if (!new) {
			pr_err("failed to allocate memory to read event\n");
			goto out_err;
		}
		buf = new;
		cur_size = size;
		event = buf;
	}
	p = event;
1111 1112
	p += sizeof(struct perf_event_header);

1113
	if (size - sizeof(struct perf_event_header)) {
1114
		err = readn(self->fd, p, size - sizeof(struct perf_event_header));
1115 1116 1117 1118 1119
		if (err <= 0) {
			if (err == 0) {
				pr_err("unexpected end of event stream\n");
				goto done;
			}
1120

1121 1122 1123
			pr_err("failed to read event data\n");
			goto out_err;
		}
1124 1125
	}

1126
	if ((skip = perf_session__process_event(self, event, tool, head)) < 0) {
1127
		pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1128
		       head, event->header.size, event->header.type);
1129 1130
		err = -EINVAL;
		goto out_err;
1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142
	}

	head += size;

	if (skip > 0)
		head += skip;

	if (!session_done())
		goto more;
done:
	err = 0;
out_err:
1143
	free(buf);
1144
	perf_session__warn_about_errors(self, tool);
1145
	perf_session_free_sample_buffers(self);
1146 1147 1148
	return err;
}

1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172
static union perf_event *
fetch_mmaped_event(struct perf_session *session,
		   u64 head, size_t mmap_size, char *buf)
{
	union perf_event *event;

	/*
	 * Ensure we have enough space remaining to read
	 * the size of the event in the headers.
	 */
	if (head + sizeof(event->header) > mmap_size)
		return NULL;

	event = (union perf_event *)(buf + head);

	if (session->header.needs_swap)
		perf_event_header__bswap(&event->header);

	if (head + event->header.size > mmap_size)
		return NULL;

	return event;
}

1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184
/*
 * On 64bit we can mmap the data file in one go. No need for tiny mmap
 * slices. On 32bit we use 32MB.
 */
#if BITS_PER_LONG == 64
#define MMAP_SIZE ULLONG_MAX
#define NUM_MMAPS 1
#else
#define MMAP_SIZE (32 * 1024 * 1024ULL)
#define NUM_MMAPS 128
#endif

1185
int __perf_session__process_events(struct perf_session *session,
1186
				   u64 data_offset, u64 data_size,
1187
				   u64 file_size, struct perf_tool *tool)
1188
{
1189
	u64 head, page_offset, file_offset, file_pos, progress_next;
1190
	int err, mmap_prot, mmap_flags, map_idx = 0;
1191
	size_t	mmap_size;
1192
	char *buf, *mmaps[NUM_MMAPS];
1193
	union perf_event *event;
1194
	uint32_t size;
1195

1196
	perf_tool__fill_defaults(tool);
1197

1198 1199 1200
	page_offset = page_size * (data_offset / page_size);
	file_offset = page_offset;
	head = data_offset - page_offset;
1201

1202 1203 1204
	if (data_offset + data_size < file_size)
		file_size = data_offset + data_size;

1205 1206
	progress_next = file_size / 16;

1207
	mmap_size = MMAP_SIZE;
1208 1209 1210
	if (mmap_size > file_size)
		mmap_size = file_size;

1211 1212
	memset(mmaps, 0, sizeof(mmaps));

1213 1214 1215
	mmap_prot  = PROT_READ;
	mmap_flags = MAP_SHARED;

1216
	if (session->header.needs_swap) {
1217 1218 1219
		mmap_prot  |= PROT_WRITE;
		mmap_flags = MAP_PRIVATE;
	}
1220
remap:
1221 1222
	buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, session->fd,
		   file_offset);
1223 1224 1225 1226 1227
	if (buf == MAP_FAILED) {
		pr_err("failed to mmap file\n");
		err = -errno;
		goto out_err;
	}
1228 1229
	mmaps[map_idx] = buf;
	map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
1230
	file_pos = file_offset + head;
1231 1232

more:
1233 1234
	event = fetch_mmaped_event(session, head, mmap_size, buf);
	if (!event) {
1235 1236 1237 1238
		if (mmaps[map_idx]) {
			munmap(mmaps[map_idx], mmap_size);
			mmaps[map_idx] = NULL;
		}
1239

1240 1241 1242
		page_offset = page_size * (head / page_size);
		file_offset += page_offset;
		head -= page_offset;
1243 1244 1245 1246 1247
		goto remap;
	}

	size = event->header.size;

1248
	if (size == 0 ||
1249
	    perf_session__process_event(session, event, tool, file_pos) < 0) {
1250 1251 1252 1253 1254
		pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
		       file_offset + head, event->header.size,
		       event->header.type);
		err = -EINVAL;
		goto out_err;
1255 1256 1257
	}

	head += size;
1258
	file_pos += size;
1259

1260 1261
	if (file_pos >= progress_next) {
		progress_next += file_size / 16;
1262 1263
		ui_progress__update(file_pos, file_size,
				    "Processing events...");
1264 1265
	}

1266
	if (file_pos < file_size)
1267
		goto more;
1268

1269
	err = 0;
1270
	/* do the final flush for ordered samples */
1271
	session->ordered_samples.next_flush = ULLONG_MAX;
1272
	err = flush_sample_queue(session, tool);
1273
out_err:
N
Namhyung Kim 已提交
1274
	ui_progress__finish();
1275
	perf_session__warn_about_errors(session, tool);
1276
	perf_session_free_sample_buffers(session);
1277 1278
	return err;
}
1279

1280
int perf_session__process_events(struct perf_session *self,
1281
				 struct perf_tool *tool)
1282 1283 1284 1285 1286 1287
{
	int err;

	if (perf_session__register_idle_thread(self) == NULL)
		return -ENOMEM;

1288 1289 1290 1291
	if (!self->fd_pipe)
		err = __perf_session__process_events(self,
						     self->header.data_offset,
						     self->header.data_size,
1292
						     self->size, tool);
1293
	else
1294
		err = __perf_session__process_pipe_events(self, tool);
1295

1296 1297 1298
	return err;
}

1299
bool perf_session__has_traces(struct perf_session *session, const char *msg)
1300
{
1301
	if (!(perf_evlist__sample_type(session->evlist) & PERF_SAMPLE_RAW)) {
1302 1303
		pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
		return false;
1304 1305
	}

1306
	return true;
1307
}
1308

1309 1310
int maps__set_kallsyms_ref_reloc_sym(struct map **maps,
				     const char *symbol_name, u64 addr)
1311 1312
{
	char *bracket;
1313
	enum map_type i;
1314 1315 1316 1317 1318
	struct ref_reloc_sym *ref;

	ref = zalloc(sizeof(struct ref_reloc_sym));
	if (ref == NULL)
		return -ENOMEM;
1319

1320 1321 1322
	ref->name = strdup(symbol_name);
	if (ref->name == NULL) {
		free(ref);
1323
		return -ENOMEM;
1324
	}
1325

1326
	bracket = strchr(ref->name, ']');
1327 1328 1329
	if (bracket)
		*bracket = '\0';

1330
	ref->addr = addr;
1331 1332

	for (i = 0; i < MAP__NR_TYPES; ++i) {
1333 1334
		struct kmap *kmap = map__kmap(maps[i]);
		kmap->ref_reloc_sym = ref;
1335 1336
	}

1337 1338
	return 0;
}
1339 1340 1341

size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp)
{
1342
	return machines__fprintf_dsos(&self->machines, fp);
1343
}
1344 1345

size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp,
1346
					  bool (skip)(struct dso *dso, int parm), int parm)
1347
{
1348
	return machines__fprintf_dsos_buildid(&self->machines, fp, skip, parm);
1349
}
1350 1351 1352 1353 1354 1355

size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
{
	struct perf_evsel *pos;
	size_t ret = fprintf(fp, "Aggregated stats:\n");

1356
	ret += events_stats__fprintf(&session->stats, fp);
1357 1358

	list_for_each_entry(pos, &session->evlist->entries, node) {
1359
		ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
1360
		ret += events_stats__fprintf(&pos->hists.stats, fp);
1361 1362 1363 1364
	}

	return ret;
}
1365

1366 1367 1368 1369 1370 1371
size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
{
	/*
	 * FIXME: Here we have to actually print all the machines in this
	 * session, not just the host...
	 */
1372
	return machine__fprintf(&session->machines.host, fp);
1373 1374
}

1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386
struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
					      unsigned int type)
{
	struct perf_evsel *pos;

	list_for_each_entry(pos, &session->evlist->entries, node) {
		if (pos->attr.type == type)
			return pos;
	}
	return NULL;
}

1387 1388 1389
void perf_evsel__print_ip(struct perf_evsel *evsel, union perf_event *event,
			  struct perf_sample *sample, struct machine *machine,
			  int print_sym, int print_dso, int print_symoffset)
1390 1391 1392 1393
{
	struct addr_location al;
	struct callchain_cursor_node *node;

1394
	if (perf_event__preprocess_sample(event, machine, &al, sample,
1395 1396 1397 1398 1399 1400 1401 1402
					  NULL) < 0) {
		error("problem processing %d event, skipping it.\n",
			event->header.type);
		return;
	}

	if (symbol_conf.use_callchain && sample->callchain) {

1403 1404 1405

		if (machine__resolve_callchain(machine, evsel, al.thread,
					       sample, NULL) != 0) {
1406 1407 1408 1409
			if (verbose)
				error("Failed to resolve callchain. Skipping\n");
			return;
		}
1410
		callchain_cursor_commit(&callchain_cursor);
1411 1412

		while (1) {
1413
			node = callchain_cursor_current(&callchain_cursor);
1414 1415 1416
			if (!node)
				break;

1417 1418
			printf("\t%16" PRIx64, node->ip);
			if (print_sym) {
1419 1420
				printf(" ");
				symbol__fprintf_symname(node->sym, stdout);
1421 1422
			}
			if (print_dso) {
1423
				printf(" (");
1424
				map__fprintf_dsoname(node->map, stdout);
1425
				printf(")");
1426 1427
			}
			printf("\n");
1428

1429
			callchain_cursor_advance(&callchain_cursor);
1430 1431 1432
		}

	} else {
1433
		printf("%16" PRIx64, sample->ip);
1434
		if (print_sym) {
1435
			printf(" ");
1436 1437 1438 1439 1440
			if (print_symoffset)
				symbol__fprintf_symname_offs(al.sym, &al,
							     stdout);
			else
				symbol__fprintf_symname(al.sym, stdout);
1441 1442 1443
		}

		if (print_dso) {
1444 1445 1446
			printf(" (");
			map__fprintf_dsoname(al.map, stdout);
			printf(")");
1447
		}
1448 1449
	}
}
1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471

int perf_session__cpu_bitmap(struct perf_session *session,
			     const char *cpu_list, unsigned long *cpu_bitmap)
{
	int i;
	struct cpu_map *map;

	for (i = 0; i < PERF_TYPE_MAX; ++i) {
		struct perf_evsel *evsel;

		evsel = perf_session__find_first_evtype(session, i);
		if (!evsel)
			continue;

		if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) {
			pr_err("File does not contain CPU events. "
			       "Remove -c option to proceed.\n");
			return -1;
		}
	}

	map = cpu_map__new(cpu_list);
1472 1473 1474 1475
	if (map == NULL) {
		pr_err("Invalid cpu_list\n");
		return -1;
	}
1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490

	for (i = 0; i < map->nr; i++) {
		int cpu = map->map[i];

		if (cpu >= MAX_NR_CPUS) {
			pr_err("Requested CPU %d too large. "
			       "Consider raising MAX_NR_CPUS\n", cpu);
			return -1;
		}

		set_bit(cpu, cpu_bitmap);
	}

	return 0;
}
1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509

void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
				bool full)
{
	struct stat st;
	int ret;

	if (session == NULL || fp == NULL)
		return;

	ret = fstat(session->fd, &st);
	if (ret == -1)
		return;

	fprintf(fp, "# ========\n");
	fprintf(fp, "# captured on: %s", ctime(&st.st_ctime));
	perf_header__fprintf_info(session, fp, full);
	fprintf(fp, "# ========\n#\n");
}
1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564


int __perf_session__set_tracepoints_handlers(struct perf_session *session,
					     const struct perf_evsel_str_handler *assocs,
					     size_t nr_assocs)
{
	struct perf_evlist *evlist = session->evlist;
	struct event_format *format;
	struct perf_evsel *evsel;
	char *tracepoint, *name;
	size_t i;
	int err;

	for (i = 0; i < nr_assocs; i++) {
		err = -ENOMEM;
		tracepoint = strdup(assocs[i].name);
		if (tracepoint == NULL)
			goto out;

		err = -ENOENT;
		name = strchr(tracepoint, ':');
		if (name == NULL)
			goto out_free;

		*name++ = '\0';
		format = pevent_find_event_by_name(session->pevent,
						   tracepoint, name);
		if (format == NULL) {
			/*
			 * Adding a handler for an event not in the session,
			 * just ignore it.
			 */
			goto next;
		}

		evsel = perf_evlist__find_tracepoint_by_id(evlist, format->id);
		if (evsel == NULL)
			goto next;

		err = -EEXIST;
		if (evsel->handler.func != NULL)
			goto out_free;
		evsel->handler.func = assocs[i].handler;
next:
		free(tracepoint);
	}

	err = 0;
out:
	return err;

out_free:
	free(tracepoint);
	goto out;
}