session.c 42.3 KB
Newer Older
1
#include <linux/kernel.h>
2
#include <traceevent/event-parse.h>
3

4
#include <byteswap.h>
5 6
#include <unistd.h>
#include <sys/types.h>
7
#include <sys/mman.h>
8

9 10
#include "evlist.h"
#include "evsel.h"
11
#include "session.h"
12
#include "tool.h"
13
#include "sort.h"
14
#include "util.h"
15
#include "cpumap.h"
16
#include "perf_regs.h"
17
#include "vdso.h"
18 19 20 21 22

static int perf_session__open(struct perf_session *self, bool force)
{
	struct stat input_stat;

23 24 25 26
	if (!strcmp(self->filename, "-")) {
		self->fd_pipe = true;
		self->fd = STDIN_FILENO;

27
		if (perf_session__read_header(self) < 0)
28
			pr_err("incompatible file format (rerun with -v to learn more)");
29 30 31 32

		return 0;
	}

33
	self->fd = open(self->filename, O_RDONLY);
34
	if (self->fd < 0) {
35 36 37 38
		int err = errno;

		pr_err("failed to open %s: %s", self->filename, strerror(err));
		if (err == ENOENT && !strcmp(self->filename, "perf.data"))
39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
			pr_err("  (try 'perf record' first)");
		pr_err("\n");
		return -errno;
	}

	if (fstat(self->fd, &input_stat) < 0)
		goto out_close;

	if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
		pr_err("file %s not owned by current user or root\n",
		       self->filename);
		goto out_close;
	}

	if (!input_stat.st_size) {
		pr_info("zero-sized file (%s), nothing to do!\n",
			self->filename);
		goto out_close;
	}

59
	if (perf_session__read_header(self) < 0) {
60
		pr_err("incompatible file format (rerun with -v to learn more)");
61 62 63
		goto out_close;
	}

64 65 66 67 68 69 70 71 72 73
	if (!perf_evlist__valid_sample_type(self->evlist)) {
		pr_err("non matching sample_type");
		goto out_close;
	}

	if (!perf_evlist__valid_sample_id_all(self->evlist)) {
		pr_err("non matching sample_id_all");
		goto out_close;
	}

74 75 76 77 78
	if (!perf_evlist__valid_read_format(self->evlist)) {
		pr_err("non matching read_format");
		goto out_close;
	}

79 80 81 82 83 84 85 86 87
	self->size = input_stat.st_size;
	return 0;

out_close:
	close(self->fd);
	self->fd = -1;
	return -1;
}

88
void perf_session__set_id_hdr_size(struct perf_session *session)
89
{
90 91 92
	u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);

	machines__set_id_hdr_size(&session->machines, id_hdr_size);
93 94
}

95 96
int perf_session__create_kernel_maps(struct perf_session *self)
{
97
	int ret = machine__create_kernel_maps(&self->machines.host);
98 99

	if (ret >= 0)
100
		ret = machines__create_guest_kernel_maps(&self->machines);
101 102 103
	return ret;
}

104 105
static void perf_session__destroy_kernel_maps(struct perf_session *self)
{
106
	machines__destroy_kernel_maps(&self->machines);
107 108
}

109 110
struct perf_session *perf_session__new(const char *filename, int mode,
				       bool force, bool repipe,
111
				       struct perf_tool *tool)
112
{
113 114 115 116 117 118 119 120 121 122 123 124 125
	struct perf_session *self;
	struct stat st;
	size_t len;

	if (!filename || !strlen(filename)) {
		if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode))
			filename = "-";
		else
			filename = "perf.data";
	}

	len = strlen(filename);
	self = zalloc(sizeof(*self) + len);
126 127 128 129 130

	if (self == NULL)
		goto out;

	memcpy(self->filename, filename, len);
T
Tom Zanussi 已提交
131
	self->repipe = repipe;
132
	INIT_LIST_HEAD(&self->ordered_samples.samples);
133
	INIT_LIST_HEAD(&self->ordered_samples.sample_cache);
134
	INIT_LIST_HEAD(&self->ordered_samples.to_free);
135
	machines__init(&self->machines);
136

137 138 139
	if (mode == O_RDONLY) {
		if (perf_session__open(self, force) < 0)
			goto out_delete;
140
		perf_session__set_id_hdr_size(self);
141 142 143
	} else if (mode == O_WRONLY) {
		/*
		 * In O_RDONLY mode this will be performed when reading the
144
		 * kernel MMAP event, in perf_event__process_mmap().
145 146 147 148
		 */
		if (perf_session__create_kernel_maps(self) < 0)
			goto out_delete;
	}
149

150
	if (tool && tool->ordering_requires_timestamps &&
151
	    tool->ordered_samples && !perf_evlist__sample_id_all(self->evlist)) {
152
		dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
153
		tool->ordered_samples = false;
154 155
	}

156 157
out:
	return self;
158 159 160
out_delete:
	perf_session__delete(self);
	return NULL;
161 162
}

163 164
static void perf_session__delete_dead_threads(struct perf_session *session)
{
165
	machine__delete_dead_threads(&session->machines.host);
166 167 168 169
}

static void perf_session__delete_threads(struct perf_session *session)
{
170
	machine__delete_threads(&session->machines.host);
171 172
}

173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
static void perf_session_env__delete(struct perf_session_env *env)
{
	free(env->hostname);
	free(env->os_release);
	free(env->version);
	free(env->arch);
	free(env->cpu_desc);
	free(env->cpuid);

	free(env->cmdline);
	free(env->sibling_cores);
	free(env->sibling_threads);
	free(env->numa_nodes);
	free(env->pmu_mappings);
}

189 190
void perf_session__delete(struct perf_session *self)
{
191
	perf_session__destroy_kernel_maps(self);
192 193
	perf_session__delete_dead_threads(self);
	perf_session__delete_threads(self);
194
	perf_session_env__delete(&self->header.env);
195
	machines__exit(&self->machines);
196 197
	close(self->fd);
	free(self);
198
	vdso__exit();
199
}
200

201 202 203
static int process_event_synth_tracing_data_stub(struct perf_tool *tool
						 __maybe_unused,
						 union perf_event *event
204 205 206
						 __maybe_unused,
						 struct perf_session *session
						__maybe_unused)
207 208 209 210 211
{
	dump_printf(": unhandled!\n");
	return 0;
}

212 213
static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
					 union perf_event *event __maybe_unused,
214 215
					 struct perf_evlist **pevlist
					 __maybe_unused)
216 217 218 219 220
{
	dump_printf(": unhandled!\n");
	return 0;
}

221 222 223 224 225
static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
				     union perf_event *event __maybe_unused,
				     struct perf_sample *sample __maybe_unused,
				     struct perf_evsel *evsel __maybe_unused,
				     struct machine *machine __maybe_unused)
226 227 228 229 230
{
	dump_printf(": unhandled!\n");
	return 0;
}

231 232 233 234
static int process_event_stub(struct perf_tool *tool __maybe_unused,
			      union perf_event *event __maybe_unused,
			      struct perf_sample *sample __maybe_unused,
			      struct machine *machine __maybe_unused)
235 236 237 238 239
{
	dump_printf(": unhandled!\n");
	return 0;
}

240 241 242 243
static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
				       union perf_event *event __maybe_unused,
				       struct perf_session *perf_session
				       __maybe_unused)
244 245 246 247 248
{
	dump_printf(": unhandled!\n");
	return 0;
}

249
static int process_finished_round(struct perf_tool *tool,
250 251
				  union perf_event *event,
				  struct perf_session *session);
252

253
void perf_tool__fill_defaults(struct perf_tool *tool)
254
{
255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281
	if (tool->sample == NULL)
		tool->sample = process_event_sample_stub;
	if (tool->mmap == NULL)
		tool->mmap = process_event_stub;
	if (tool->comm == NULL)
		tool->comm = process_event_stub;
	if (tool->fork == NULL)
		tool->fork = process_event_stub;
	if (tool->exit == NULL)
		tool->exit = process_event_stub;
	if (tool->lost == NULL)
		tool->lost = perf_event__process_lost;
	if (tool->read == NULL)
		tool->read = process_event_sample_stub;
	if (tool->throttle == NULL)
		tool->throttle = process_event_stub;
	if (tool->unthrottle == NULL)
		tool->unthrottle = process_event_stub;
	if (tool->attr == NULL)
		tool->attr = process_event_synth_attr_stub;
	if (tool->tracing_data == NULL)
		tool->tracing_data = process_event_synth_tracing_data_stub;
	if (tool->build_id == NULL)
		tool->build_id = process_finished_round_stub;
	if (tool->finished_round == NULL) {
		if (tool->ordered_samples)
			tool->finished_round = process_finished_round;
282
		else
283
			tool->finished_round = process_finished_round_stub;
284
	}
285
}
286 287 288 289 290 291 292 293 294 295
 
void mem_bswap_32(void *src, int byte_size)
{
	u32 *m = src;
	while (byte_size > 0) {
		*m = bswap_32(*m);
		byte_size -= sizeof(u32);
		++m;
	}
}
296

297 298 299 300 301 302 303 304 305 306 307
void mem_bswap_64(void *src, int byte_size)
{
	u64 *m = src;

	while (byte_size > 0) {
		*m = bswap_64(*m);
		byte_size -= sizeof(u64);
		++m;
	}
}

308 309 310 311 312 313 314 315 316 317
static void swap_sample_id_all(union perf_event *event, void *data)
{
	void *end = (void *) event + event->header.size;
	int size = end - data;

	BUG_ON(size % sizeof(u64));
	mem_bswap_64(data, size);
}

static void perf_event__all64_swap(union perf_event *event,
318
				   bool sample_id_all __maybe_unused)
319
{
320 321
	struct perf_event_header *hdr = &event->header;
	mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
322 323
}

324
static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
325
{
326 327
	event->comm.pid = bswap_32(event->comm.pid);
	event->comm.tid = bswap_32(event->comm.tid);
328 329 330 331

	if (sample_id_all) {
		void *data = &event->comm.comm;

332
		data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
333 334
		swap_sample_id_all(event, data);
	}
335 336
}

337 338
static void perf_event__mmap_swap(union perf_event *event,
				  bool sample_id_all)
339
{
340 341 342 343 344
	event->mmap.pid	  = bswap_32(event->mmap.pid);
	event->mmap.tid	  = bswap_32(event->mmap.tid);
	event->mmap.start = bswap_64(event->mmap.start);
	event->mmap.len	  = bswap_64(event->mmap.len);
	event->mmap.pgoff = bswap_64(event->mmap.pgoff);
345 346 347 348

	if (sample_id_all) {
		void *data = &event->mmap.filename;

349
		data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
350 351
		swap_sample_id_all(event, data);
	}
352 353
}

354
static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
355
{
356 357 358 359 360
	event->fork.pid	 = bswap_32(event->fork.pid);
	event->fork.tid	 = bswap_32(event->fork.tid);
	event->fork.ppid = bswap_32(event->fork.ppid);
	event->fork.ptid = bswap_32(event->fork.ptid);
	event->fork.time = bswap_64(event->fork.time);
361 362 363

	if (sample_id_all)
		swap_sample_id_all(event, &event->fork + 1);
364 365
}

366
static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
367
{
368 369 370 371 372 373
	event->read.pid		 = bswap_32(event->read.pid);
	event->read.tid		 = bswap_32(event->read.tid);
	event->read.value	 = bswap_64(event->read.value);
	event->read.time_enabled = bswap_64(event->read.time_enabled);
	event->read.time_running = bswap_64(event->read.time_running);
	event->read.id		 = bswap_64(event->read.id);
374 375 376

	if (sample_id_all)
		swap_sample_id_all(event, &event->read + 1);
377 378
}

379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410
static u8 revbyte(u8 b)
{
	int rev = (b >> 4) | ((b & 0xf) << 4);
	rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
	rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
	return (u8) rev;
}

/*
 * XXX this is hack in attempt to carry flags bitfield
 * throught endian village. ABI says:
 *
 * Bit-fields are allocated from right to left (least to most significant)
 * on little-endian implementations and from left to right (most to least
 * significant) on big-endian implementations.
 *
 * The above seems to be byte specific, so we need to reverse each
 * byte of the bitfield. 'Internet' also says this might be implementation
 * specific and we probably need proper fix and carry perf_event_attr
 * bitfield flags in separate data file FEAT_ section. Thought this seems
 * to work for now.
 */
static void swap_bitfield(u8 *p, unsigned len)
{
	unsigned i;

	for (i = 0; i < len; i++) {
		*p = revbyte(*p);
		p++;
	}
}

411 412 413 414 415 416 417 418 419 420 421 422 423
/* exported for swapping attributes in file header */
void perf_event__attr_swap(struct perf_event_attr *attr)
{
	attr->type		= bswap_32(attr->type);
	attr->size		= bswap_32(attr->size);
	attr->config		= bswap_64(attr->config);
	attr->sample_period	= bswap_64(attr->sample_period);
	attr->sample_type	= bswap_64(attr->sample_type);
	attr->read_format	= bswap_64(attr->read_format);
	attr->wakeup_events	= bswap_32(attr->wakeup_events);
	attr->bp_type		= bswap_32(attr->bp_type);
	attr->bp_addr		= bswap_64(attr->bp_addr);
	attr->bp_len		= bswap_64(attr->bp_len);
424 425

	swap_bitfield((u8 *) (&attr->read_format + 1), sizeof(u64));
426 427
}

428
static void perf_event__hdr_attr_swap(union perf_event *event,
429
				      bool sample_id_all __maybe_unused)
430 431 432
{
	size_t size;

433
	perf_event__attr_swap(&event->attr.attr);
434

435 436 437
	size = event->header.size;
	size -= (void *)&event->attr.id - (void *)event;
	mem_bswap_64(event->attr.id, size);
438 439
}

440
static void perf_event__event_type_swap(union perf_event *event,
441
					bool sample_id_all __maybe_unused)
442
{
443 444
	event->event_type.event_type.event_id =
		bswap_64(event->event_type.event_type.event_id);
445 446
}

447
static void perf_event__tracing_data_swap(union perf_event *event,
448
					  bool sample_id_all __maybe_unused)
449
{
450
	event->tracing_data.size = bswap_32(event->tracing_data.size);
451 452
}

453 454
typedef void (*perf_event__swap_op)(union perf_event *event,
				    bool sample_id_all);
455

456 457 458 459 460 461 462 463
static perf_event__swap_op perf_event__swap_ops[] = {
	[PERF_RECORD_MMAP]		  = perf_event__mmap_swap,
	[PERF_RECORD_COMM]		  = perf_event__comm_swap,
	[PERF_RECORD_FORK]		  = perf_event__task_swap,
	[PERF_RECORD_EXIT]		  = perf_event__task_swap,
	[PERF_RECORD_LOST]		  = perf_event__all64_swap,
	[PERF_RECORD_READ]		  = perf_event__read_swap,
	[PERF_RECORD_SAMPLE]		  = perf_event__all64_swap,
464
	[PERF_RECORD_HEADER_ATTR]	  = perf_event__hdr_attr_swap,
465 466 467 468
	[PERF_RECORD_HEADER_EVENT_TYPE]	  = perf_event__event_type_swap,
	[PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
	[PERF_RECORD_HEADER_BUILD_ID]	  = NULL,
	[PERF_RECORD_HEADER_MAX]	  = NULL,
469 470
};

471 472
struct sample_queue {
	u64			timestamp;
473
	u64			file_offset;
474
	union perf_event	*event;
475 476 477
	struct list_head	list;
};

478 479 480 481
static void perf_session_free_sample_buffers(struct perf_session *session)
{
	struct ordered_samples *os = &session->ordered_samples;

482
	while (!list_empty(&os->to_free)) {
483 484
		struct sample_queue *sq;

485
		sq = list_entry(os->to_free.next, struct sample_queue, list);
486 487 488 489 490
		list_del(&sq->list);
		free(sq);
	}
}

491
static int perf_session_deliver_event(struct perf_session *session,
492
				      union perf_event *event,
493
				      struct perf_sample *sample,
494
				      struct perf_tool *tool,
495
				      u64 file_offset);
496

497
static int flush_sample_queue(struct perf_session *s,
498
		       struct perf_tool *tool)
499
{
500 501
	struct ordered_samples *os = &s->ordered_samples;
	struct list_head *head = &os->samples;
502
	struct sample_queue *tmp, *iter;
503
	struct perf_sample sample;
504 505
	u64 limit = os->next_flush;
	u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL;
506
	unsigned idx = 0, progress_next = os->nr_samples / 16;
507
	bool show_progress = limit == ULLONG_MAX;
508
	int ret;
509

510
	if (!tool->ordered_samples || !limit)
511
		return 0;
512 513 514

	list_for_each_entry_safe(iter, tmp, head, list) {
		if (iter->timestamp > limit)
515
			break;
516

517
		ret = perf_evlist__parse_sample(s->evlist, iter->event, &sample);
518 519
		if (ret)
			pr_err("Can't parse sample, err = %d\n", ret);
520 521 522 523 524 525
		else {
			ret = perf_session_deliver_event(s, iter->event, &sample, tool,
							 iter->file_offset);
			if (ret)
				return ret;
		}
526

527
		os->last_flush = iter->timestamp;
528
		list_del(&iter->list);
529
		list_add(&iter->list, &os->sample_cache);
530
		if (show_progress && (++idx >= progress_next)) {
531 532 533 534
			progress_next += os->nr_samples / 16;
			ui_progress__update(idx, os->nr_samples,
					    "Processing time ordered events...");
		}
535
	}
536 537 538 539 540 541 542

	if (list_empty(head)) {
		os->last_sample = NULL;
	} else if (last_ts <= limit) {
		os->last_sample =
			list_entry(head->prev, struct sample_queue, list);
	}
543 544

	os->nr_samples = 0;
545 546

	return 0;
547 548
}

549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587
/*
 * When perf record finishes a pass on every buffers, it records this pseudo
 * event.
 * We record the max timestamp t found in the pass n.
 * Assuming these timestamps are monotonic across cpus, we know that if
 * a buffer still has events with timestamps below t, they will be all
 * available and then read in the pass n + 1.
 * Hence when we start to read the pass n + 2, we can safely flush every
 * events with timestamps below t.
 *
 *    ============ PASS n =================
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          1          |         2
 *          2          |         3
 *          -          |         4  <--- max recorded
 *
 *    ============ PASS n + 1 ==============
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          3          |         5
 *          4          |         6
 *          5          |         7 <---- max recorded
 *
 *      Flush every events below timestamp 4
 *
 *    ============ PASS n + 2 ==============
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          6          |         8
 *          7          |         9
 *          -          |         10
 *
 *      Flush every events below timestamp 7
 *      etc...
 */
588
static int process_finished_round(struct perf_tool *tool,
589
				  union perf_event *event __maybe_unused,
590
				  struct perf_session *session)
591
{
592 593 594
	int ret = flush_sample_queue(session, tool);
	if (!ret)
		session->ordered_samples.next_flush = session->ordered_samples.max_timestamp;
595

596
	return ret;
597 598
}

599
/* The queue is ordered by time */
600
static void __queue_event(struct sample_queue *new, struct perf_session *s)
601
{
602 603 604 605
	struct ordered_samples *os = &s->ordered_samples;
	struct sample_queue *sample = os->last_sample;
	u64 timestamp = new->timestamp;
	struct list_head *p;
606

607
	++os->nr_samples;
608
	os->last_sample = new;
609

610 611 612
	if (!sample) {
		list_add(&new->list, &os->samples);
		os->max_timestamp = timestamp;
613 614 615 616
		return;
	}

	/*
617 618 619
	 * last_sample might point to some random place in the list as it's
	 * the last queued event. We expect that the new event is close to
	 * this.
620
	 */
621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642
	if (sample->timestamp <= timestamp) {
		while (sample->timestamp <= timestamp) {
			p = sample->list.next;
			if (p == &os->samples) {
				list_add_tail(&new->list, &os->samples);
				os->max_timestamp = timestamp;
				return;
			}
			sample = list_entry(p, struct sample_queue, list);
		}
		list_add_tail(&new->list, &sample->list);
	} else {
		while (sample->timestamp > timestamp) {
			p = sample->list.prev;
			if (p == &os->samples) {
				list_add(&new->list, &os->samples);
				return;
			}
			sample = list_entry(p, struct sample_queue, list);
		}
		list_add(&new->list, &sample->list);
	}
643 644
}

645 646
#define MAX_SAMPLE_BUFFER	(64 * 1024 / sizeof(struct sample_queue))

647
int perf_session_queue_event(struct perf_session *s, union perf_event *event,
648
				    struct perf_sample *sample, u64 file_offset)
649
{
650 651
	struct ordered_samples *os = &s->ordered_samples;
	struct list_head *sc = &os->sample_cache;
652
	u64 timestamp = sample->time;
653 654
	struct sample_queue *new;

655
	if (!timestamp || timestamp == ~0ULL)
656 657
		return -ETIME;

658 659 660 661 662
	if (timestamp < s->ordered_samples.last_flush) {
		printf("Warning: Timestamp below last timeslice flush\n");
		return -EINVAL;
	}

663 664 665
	if (!list_empty(sc)) {
		new = list_entry(sc->next, struct sample_queue, list);
		list_del(&new->list);
666 667 668 669
	} else if (os->sample_buffer) {
		new = os->sample_buffer + os->sample_buffer_idx;
		if (++os->sample_buffer_idx == MAX_SAMPLE_BUFFER)
			os->sample_buffer = NULL;
670
	} else {
671 672
		os->sample_buffer = malloc(MAX_SAMPLE_BUFFER * sizeof(*new));
		if (!os->sample_buffer)
673
			return -ENOMEM;
674 675 676
		list_add(&os->sample_buffer->list, &os->to_free);
		os->sample_buffer_idx = 2;
		new = os->sample_buffer + 1;
677
	}
678 679

	new->timestamp = timestamp;
680
	new->file_offset = file_offset;
681
	new->event = event;
682

683
	__queue_event(new, s);
684 685 686

	return 0;
}
687

688
static void callchain__printf(struct perf_sample *sample)
689 690
{
	unsigned int i;
691

692
	printf("... chain: nr:%" PRIu64 "\n", sample->callchain->nr);
693 694

	for (i = 0; i < sample->callchain->nr; i++)
695 696
		printf("..... %2d: %016" PRIx64 "\n",
		       i, sample->callchain->ips[i]);
697 698
}

699 700 701 702 703 704 705 706 707 708 709 710
static void branch_stack__printf(struct perf_sample *sample)
{
	uint64_t i;

	printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr);

	for (i = 0; i < sample->branch_stack->nr; i++)
		printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 "\n",
			i, sample->branch_stack->entries[i].from,
			sample->branch_stack->entries[i].to);
}

711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738
static void regs_dump__printf(u64 mask, u64 *regs)
{
	unsigned rid, i = 0;

	for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
		u64 val = regs[i++];

		printf(".... %-5s 0x%" PRIx64 "\n",
		       perf_reg_name(rid), val);
	}
}

static void regs_user__printf(struct perf_sample *sample, u64 mask)
{
	struct regs_dump *user_regs = &sample->user_regs;

	if (user_regs->regs) {
		printf("... user regs: mask 0x%" PRIx64 "\n", mask);
		regs_dump__printf(mask, user_regs->regs);
	}
}

static void stack_user__printf(struct stack_dump *dump)
{
	printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
	       dump->size, dump->offset);
}

739
static void perf_session__print_tstamp(struct perf_session *session,
740
				       union perf_event *event,
741
				       struct perf_sample *sample)
742
{
743
	u64 sample_type = __perf_evlist__combined_sample_type(session->evlist);
744

745
	if (event->header.type != PERF_RECORD_SAMPLE &&
746
	    !perf_evlist__sample_id_all(session->evlist)) {
747 748 749 750
		fputs("-1 -1 ", stdout);
		return;
	}

751
	if ((sample_type & PERF_SAMPLE_CPU))
752 753
		printf("%u ", sample->cpu);

754
	if (sample_type & PERF_SAMPLE_TIME)
755
		printf("%" PRIu64 " ", sample->time);
756 757
}

758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787
static void sample_read__printf(struct perf_sample *sample, u64 read_format)
{
	printf("... sample_read:\n");

	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
		printf("...... time enabled %016" PRIx64 "\n",
		       sample->read.time_enabled);

	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
		printf("...... time running %016" PRIx64 "\n",
		       sample->read.time_running);

	if (read_format & PERF_FORMAT_GROUP) {
		u64 i;

		printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);

		for (i = 0; i < sample->read.group.nr; i++) {
			struct sample_read_value *value;

			value = &sample->read.group.values[i];
			printf("..... id %016" PRIx64
			       ", value %016" PRIx64 "\n",
			       value->id, value->value);
		}
	} else
		printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
			sample->read.one.id, sample->read.one.value);
}

788
static void dump_event(struct perf_session *session, union perf_event *event,
789
		       u64 file_offset, struct perf_sample *sample)
790 791 792 793
{
	if (!dump_trace)
		return;

794 795
	printf("\n%#" PRIx64 " [%#x]: event: %d\n",
	       file_offset, event->header.size, event->header.type);
796 797 798 799 800 801

	trace_event(event);

	if (sample)
		perf_session__print_tstamp(session, event, sample);

802
	printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
803
	       event->header.size, perf_event__name(event->header.type));
804 805
}

806
static void dump_sample(struct perf_evsel *evsel, union perf_event *event,
807
			struct perf_sample *sample)
808
{
809 810
	u64 sample_type;

811 812 813
	if (!dump_trace)
		return;

814
	printf("(IP, %d): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
815
	       event->header.misc, sample->pid, sample->tid, sample->ip,
816
	       sample->period, sample->addr);
817

818
	sample_type = evsel->attr.sample_type;
819 820

	if (sample_type & PERF_SAMPLE_CALLCHAIN)
821
		callchain__printf(sample);
822

823
	if (sample_type & PERF_SAMPLE_BRANCH_STACK)
824
		branch_stack__printf(sample);
825 826 827 828 829 830

	if (sample_type & PERF_SAMPLE_REGS_USER)
		regs_user__printf(sample, evsel->attr.sample_regs_user);

	if (sample_type & PERF_SAMPLE_STACK_USER)
		stack_user__printf(&sample->user_stack);
831 832 833

	if (sample_type & PERF_SAMPLE_WEIGHT)
		printf("... weight: %" PRIu64 "\n", sample->weight);
834 835 836

	if (sample_type & PERF_SAMPLE_DATA_SRC)
		printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
837 838 839

	if (sample_type & PERF_SAMPLE_READ)
		sample_read__printf(sample, evsel->attr.read_format);
840 841
}

842 843
static struct machine *
	perf_session__find_machine_for_cpumode(struct perf_session *session,
844 845
					       union perf_event *event,
					       struct perf_sample *sample)
846 847 848
{
	const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;

849 850 851
	if (perf_guest &&
	    ((cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
	     (cpumode == PERF_RECORD_MISC_GUEST_USER))) {
852 853 854 855 856
		u32 pid;

		if (event->header.type == PERF_RECORD_MMAP)
			pid = event->mmap.pid;
		else
857
			pid = sample->pid;
858

859
		return perf_session__findnew_machine(session, pid);
860
	}
861

862
	return &session->machines.host;
863 864
}

865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933
static int deliver_sample_value(struct perf_session *session,
				struct perf_tool *tool,
				union perf_event *event,
				struct perf_sample *sample,
				struct sample_read_value *v,
				struct machine *machine)
{
	struct perf_sample_id *sid;

	sid = perf_evlist__id2sid(session->evlist, v->id);
	if (sid) {
		sample->id     = v->id;
		sample->period = v->value - sid->period;
		sid->period    = v->value;
	}

	if (!sid || sid->evsel == NULL) {
		++session->stats.nr_unknown_id;
		return 0;
	}

	return tool->sample(tool, event, sample, sid->evsel, machine);
}

static int deliver_sample_group(struct perf_session *session,
				struct perf_tool *tool,
				union  perf_event *event,
				struct perf_sample *sample,
				struct machine *machine)
{
	int ret = -EINVAL;
	u64 i;

	for (i = 0; i < sample->read.group.nr; i++) {
		ret = deliver_sample_value(session, tool, event, sample,
					   &sample->read.group.values[i],
					   machine);
		if (ret)
			break;
	}

	return ret;
}

static int
perf_session__deliver_sample(struct perf_session *session,
			     struct perf_tool *tool,
			     union  perf_event *event,
			     struct perf_sample *sample,
			     struct perf_evsel *evsel,
			     struct machine *machine)
{
	/* We know evsel != NULL. */
	u64 sample_type = evsel->attr.sample_type;
	u64 read_format = evsel->attr.read_format;

	/* Standard sample delievery. */
	if (!(sample_type & PERF_SAMPLE_READ))
		return tool->sample(tool, event, sample, evsel, machine);

	/* For PERF_SAMPLE_READ we have either single or group mode. */
	if (read_format & PERF_FORMAT_GROUP)
		return deliver_sample_group(session, tool, event, sample,
					    machine);
	else
		return deliver_sample_value(session, tool, event, sample,
					    &sample->read.one, machine);
}

934
static int perf_session_deliver_event(struct perf_session *session,
935
				      union perf_event *event,
936
				      struct perf_sample *sample,
937
				      struct perf_tool *tool,
938
				      u64 file_offset)
939
{
940
	struct perf_evsel *evsel;
941
	struct machine *machine;
942

943 944
	dump_event(session, event, file_offset, sample);

945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962
	evsel = perf_evlist__id2evsel(session->evlist, sample->id);
	if (evsel != NULL && event->header.type != PERF_RECORD_SAMPLE) {
		/*
		 * XXX We're leaving PERF_RECORD_SAMPLE unnacounted here
		 * because the tools right now may apply filters, discarding
		 * some of the samples. For consistency, in the future we
		 * should have something like nr_filtered_samples and remove
		 * the sample->period from total_sample_period, etc, KISS for
		 * now tho.
		 *
		 * Also testing against NULL allows us to handle files without
		 * attr.sample_id_all and/or without PERF_SAMPLE_ID. In the
		 * future probably it'll be a good idea to restrict event
		 * processing via perf_session to files with both set.
		 */
		hists__inc_nr_events(&evsel->hists, event->header.type);
	}

963 964
	machine = perf_session__find_machine_for_cpumode(session, event,
							 sample);
965

966 967
	switch (event->header.type) {
	case PERF_RECORD_SAMPLE:
968
		dump_sample(evsel, event, sample);
969
		if (evsel == NULL) {
970
			++session->stats.nr_unknown_id;
971
			return 0;
972
		}
973
		if (machine == NULL) {
974
			++session->stats.nr_unprocessable_samples;
975
			return 0;
976
		}
977 978
		return perf_session__deliver_sample(session, tool, event,
						    sample, evsel, machine);
979
	case PERF_RECORD_MMAP:
980
		return tool->mmap(tool, event, sample, machine);
981
	case PERF_RECORD_COMM:
982
		return tool->comm(tool, event, sample, machine);
983
	case PERF_RECORD_FORK:
984
		return tool->fork(tool, event, sample, machine);
985
	case PERF_RECORD_EXIT:
986
		return tool->exit(tool, event, sample, machine);
987
	case PERF_RECORD_LOST:
988
		if (tool->lost == perf_event__process_lost)
989
			session->stats.total_lost += event->lost.lost;
990
		return tool->lost(tool, event, sample, machine);
991
	case PERF_RECORD_READ:
992
		return tool->read(tool, event, sample, evsel, machine);
993
	case PERF_RECORD_THROTTLE:
994
		return tool->throttle(tool, event, sample, machine);
995
	case PERF_RECORD_UNTHROTTLE:
996
		return tool->unthrottle(tool, event, sample, machine);
997
	default:
998
		++session->stats.nr_unknown_events;
999 1000 1001 1002
		return -1;
	}
}

1003
static int perf_session__process_user_event(struct perf_session *session, union perf_event *event,
1004
					    struct perf_tool *tool, u64 file_offset)
1005
{
1006 1007
	int err;

1008
	dump_event(session, event, file_offset, NULL);
1009

1010
	/* These events are processed right away */
1011
	switch (event->header.type) {
1012
	case PERF_RECORD_HEADER_ATTR:
1013
		err = tool->attr(tool, event, &session->evlist);
1014
		if (err == 0)
1015
			perf_session__set_id_hdr_size(session);
1016
		return err;
1017 1018
	case PERF_RECORD_HEADER_TRACING_DATA:
		/* setup for reading amidst mmap */
1019
		lseek(session->fd, file_offset, SEEK_SET);
1020
		return tool->tracing_data(tool, event, session);
1021
	case PERF_RECORD_HEADER_BUILD_ID:
1022
		return tool->build_id(tool, event, session);
1023
	case PERF_RECORD_FINISHED_ROUND:
1024
		return tool->finished_round(tool, event, session);
1025
	default:
1026
		return -EINVAL;
1027
	}
1028 1029
}

1030 1031 1032 1033 1034 1035 1036 1037 1038
static void event_swap(union perf_event *event, bool sample_id_all)
{
	perf_event__swap_op swap;

	swap = perf_event__swap_ops[event->header.type];
	if (swap)
		swap(event, sample_id_all);
}

1039 1040 1041 1042
static int perf_session__process_event(struct perf_session *session,
				       union perf_event *event,
				       struct perf_tool *tool,
				       u64 file_offset)
1043
{
1044
	struct perf_sample sample;
1045 1046
	int ret;

1047
	if (session->header.needs_swap)
1048
		event_swap(event, perf_evlist__sample_id_all(session->evlist));
1049 1050 1051 1052

	if (event->header.type >= PERF_RECORD_HEADER_MAX)
		return -EINVAL;

1053
	events_stats__inc(&session->stats, event->header.type);
1054 1055

	if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1056
		return perf_session__process_user_event(session, event, tool, file_offset);
1057

1058 1059 1060
	/*
	 * For all kernel events we get the sample data
	 */
1061
	ret = perf_evlist__parse_sample(session->evlist, event, &sample);
1062 1063
	if (ret)
		return ret;
1064

1065
	if (tool->ordered_samples) {
1066 1067
		ret = perf_session_queue_event(session, event, &sample,
					       file_offset);
1068 1069 1070 1071
		if (ret != -ETIME)
			return ret;
	}

1072
	return perf_session_deliver_event(session, event, &sample, tool,
1073
					  file_offset);
1074 1075
}

1076 1077 1078 1079 1080 1081 1082
void perf_event_header__bswap(struct perf_event_header *self)
{
	self->type = bswap_32(self->type);
	self->misc = bswap_16(self->misc);
	self->size = bswap_16(self->size);
}

1083 1084
struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
{
1085
	return machine__findnew_thread(&session->machines.host, 0, pid);
1086 1087
}

1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099
static struct thread *perf_session__register_idle_thread(struct perf_session *self)
{
	struct thread *thread = perf_session__findnew(self, 0);

	if (thread == NULL || thread__set_comm(thread, "swapper")) {
		pr_err("problem inserting idle task.\n");
		thread = NULL;
	}

	return thread;
}

1100
static void perf_session__warn_about_errors(const struct perf_session *session,
1101
					    const struct perf_tool *tool)
1102
{
1103
	if (tool->lost == perf_event__process_lost &&
1104
	    session->stats.nr_events[PERF_RECORD_LOST] != 0) {
1105 1106
		ui__warning("Processed %d events and lost %d chunks!\n\n"
			    "Check IO/CPU overload!\n\n",
1107 1108
			    session->stats.nr_events[0],
			    session->stats.nr_events[PERF_RECORD_LOST]);
1109 1110
	}

1111
	if (session->stats.nr_unknown_events != 0) {
1112 1113 1114 1115 1116
		ui__warning("Found %u unknown events!\n\n"
			    "Is this an older tool processing a perf.data "
			    "file generated by a more recent tool?\n\n"
			    "If that is not the case, consider "
			    "reporting to linux-kernel@vger.kernel.org.\n\n",
1117
			    session->stats.nr_unknown_events);
1118 1119
	}

1120
	if (session->stats.nr_unknown_id != 0) {
1121
		ui__warning("%u samples with id not present in the header\n",
1122
			    session->stats.nr_unknown_id);
1123 1124
	}

1125
 	if (session->stats.nr_invalid_chains != 0) {
1126 1127 1128
 		ui__warning("Found invalid callchains!\n\n"
 			    "%u out of %u events were discarded for this reason.\n\n"
 			    "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1129 1130
 			    session->stats.nr_invalid_chains,
 			    session->stats.nr_events[PERF_RECORD_SAMPLE]);
1131
 	}
1132

1133
	if (session->stats.nr_unprocessable_samples != 0) {
1134 1135
		ui__warning("%u unprocessable samples recorded.\n"
			    "Do you have a KVM guest running and not using 'perf kvm'?\n",
1136
			    session->stats.nr_unprocessable_samples);
1137
	}
1138 1139
}

1140 1141 1142 1143
#define session_done()	(*(volatile int *)(&session_done))
volatile int session_done;

static int __perf_session__process_pipe_events(struct perf_session *self,
1144
					       struct perf_tool *tool)
1145
{
1146 1147 1148
	union perf_event *event;
	uint32_t size, cur_size = 0;
	void *buf = NULL;
1149 1150 1151 1152 1153
	int skip = 0;
	u64 head;
	int err;
	void *p;

1154
	perf_tool__fill_defaults(tool);
1155 1156

	head = 0;
1157 1158 1159 1160 1161
	cur_size = sizeof(union perf_event);

	buf = malloc(cur_size);
	if (!buf)
		return -errno;
1162
more:
1163 1164
	event = buf;
	err = readn(self->fd, event, sizeof(struct perf_event_header));
1165 1166 1167 1168 1169 1170 1171 1172 1173
	if (err <= 0) {
		if (err == 0)
			goto done;

		pr_err("failed to read event header\n");
		goto out_err;
	}

	if (self->header.needs_swap)
1174
		perf_event_header__bswap(&event->header);
1175

1176
	size = event->header.size;
1177 1178 1179 1180
	if (size < sizeof(struct perf_event_header)) {
		pr_err("bad event header size\n");
		goto out_err;
	}
1181

1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192
	if (size > cur_size) {
		void *new = realloc(buf, size);
		if (!new) {
			pr_err("failed to allocate memory to read event\n");
			goto out_err;
		}
		buf = new;
		cur_size = size;
		event = buf;
	}
	p = event;
1193 1194
	p += sizeof(struct perf_event_header);

1195
	if (size - sizeof(struct perf_event_header)) {
1196
		err = readn(self->fd, p, size - sizeof(struct perf_event_header));
1197 1198 1199 1200 1201
		if (err <= 0) {
			if (err == 0) {
				pr_err("unexpected end of event stream\n");
				goto done;
			}
1202

1203 1204 1205
			pr_err("failed to read event data\n");
			goto out_err;
		}
1206 1207
	}

1208
	if ((skip = perf_session__process_event(self, event, tool, head)) < 0) {
1209
		pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1210
		       head, event->header.size, event->header.type);
1211 1212
		err = -EINVAL;
		goto out_err;
1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224
	}

	head += size;

	if (skip > 0)
		head += skip;

	if (!session_done())
		goto more;
done:
	err = 0;
out_err:
1225
	free(buf);
1226
	perf_session__warn_about_errors(self, tool);
1227
	perf_session_free_sample_buffers(self);
1228 1229 1230
	return err;
}

1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248
static union perf_event *
fetch_mmaped_event(struct perf_session *session,
		   u64 head, size_t mmap_size, char *buf)
{
	union perf_event *event;

	/*
	 * Ensure we have enough space remaining to read
	 * the size of the event in the headers.
	 */
	if (head + sizeof(event->header) > mmap_size)
		return NULL;

	event = (union perf_event *)(buf + head);

	if (session->header.needs_swap)
		perf_event_header__bswap(&event->header);

1249 1250 1251 1252
	if (head + event->header.size > mmap_size) {
		/* We're not fetching the event so swap back again */
		if (session->header.needs_swap)
			perf_event_header__bswap(&event->header);
1253
		return NULL;
1254
	}
1255 1256 1257 1258

	return event;
}

1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270
/*
 * On 64bit we can mmap the data file in one go. No need for tiny mmap
 * slices. On 32bit we use 32MB.
 */
#if BITS_PER_LONG == 64
#define MMAP_SIZE ULLONG_MAX
#define NUM_MMAPS 1
#else
#define MMAP_SIZE (32 * 1024 * 1024ULL)
#define NUM_MMAPS 128
#endif

1271
int __perf_session__process_events(struct perf_session *session,
1272
				   u64 data_offset, u64 data_size,
1273
				   u64 file_size, struct perf_tool *tool)
1274
{
1275
	u64 head, page_offset, file_offset, file_pos, progress_next;
1276
	int err, mmap_prot, mmap_flags, map_idx = 0;
1277
	size_t	mmap_size;
1278
	char *buf, *mmaps[NUM_MMAPS];
1279
	union perf_event *event;
1280
	uint32_t size;
1281

1282
	perf_tool__fill_defaults(tool);
1283

1284 1285 1286
	page_offset = page_size * (data_offset / page_size);
	file_offset = page_offset;
	head = data_offset - page_offset;
1287

1288 1289 1290
	if (data_offset + data_size < file_size)
		file_size = data_offset + data_size;

1291 1292
	progress_next = file_size / 16;

1293
	mmap_size = MMAP_SIZE;
1294 1295 1296
	if (mmap_size > file_size)
		mmap_size = file_size;

1297 1298
	memset(mmaps, 0, sizeof(mmaps));

1299 1300 1301
	mmap_prot  = PROT_READ;
	mmap_flags = MAP_SHARED;

1302
	if (session->header.needs_swap) {
1303 1304 1305
		mmap_prot  |= PROT_WRITE;
		mmap_flags = MAP_PRIVATE;
	}
1306
remap:
1307 1308
	buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, session->fd,
		   file_offset);
1309 1310 1311 1312 1313
	if (buf == MAP_FAILED) {
		pr_err("failed to mmap file\n");
		err = -errno;
		goto out_err;
	}
1314 1315
	mmaps[map_idx] = buf;
	map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
1316
	file_pos = file_offset + head;
1317 1318

more:
1319 1320
	event = fetch_mmaped_event(session, head, mmap_size, buf);
	if (!event) {
1321 1322 1323 1324
		if (mmaps[map_idx]) {
			munmap(mmaps[map_idx], mmap_size);
			mmaps[map_idx] = NULL;
		}
1325

1326 1327 1328
		page_offset = page_size * (head / page_size);
		file_offset += page_offset;
		head -= page_offset;
1329 1330 1331 1332 1333
		goto remap;
	}

	size = event->header.size;

1334
	if (size < sizeof(struct perf_event_header) ||
1335
	    perf_session__process_event(session, event, tool, file_pos) < 0) {
1336 1337 1338 1339 1340
		pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
		       file_offset + head, event->header.size,
		       event->header.type);
		err = -EINVAL;
		goto out_err;
1341 1342 1343
	}

	head += size;
1344
	file_pos += size;
1345

1346 1347
	if (file_pos >= progress_next) {
		progress_next += file_size / 16;
1348 1349
		ui_progress__update(file_pos, file_size,
				    "Processing events...");
1350 1351
	}

1352
	if (file_pos < file_size)
1353
		goto more;
1354

1355
	err = 0;
1356
	/* do the final flush for ordered samples */
1357
	session->ordered_samples.next_flush = ULLONG_MAX;
1358
	err = flush_sample_queue(session, tool);
1359
out_err:
N
Namhyung Kim 已提交
1360
	ui_progress__finish();
1361
	perf_session__warn_about_errors(session, tool);
1362
	perf_session_free_sample_buffers(session);
1363 1364
	return err;
}
1365

1366
int perf_session__process_events(struct perf_session *self,
1367
				 struct perf_tool *tool)
1368 1369 1370 1371 1372 1373
{
	int err;

	if (perf_session__register_idle_thread(self) == NULL)
		return -ENOMEM;

1374 1375 1376 1377
	if (!self->fd_pipe)
		err = __perf_session__process_events(self,
						     self->header.data_offset,
						     self->header.data_size,
1378
						     self->size, tool);
1379
	else
1380
		err = __perf_session__process_pipe_events(self, tool);
1381

1382 1383 1384
	return err;
}

1385
bool perf_session__has_traces(struct perf_session *session, const char *msg)
1386
{
1387 1388 1389 1390 1391
	struct perf_evsel *evsel;

	list_for_each_entry(evsel, &session->evlist->entries, node) {
		if (evsel->attr.type == PERF_TYPE_TRACEPOINT)
			return true;
1392 1393
	}

1394 1395
	pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
	return false;
1396
}
1397

1398 1399
int maps__set_kallsyms_ref_reloc_sym(struct map **maps,
				     const char *symbol_name, u64 addr)
1400 1401
{
	char *bracket;
1402
	enum map_type i;
1403 1404 1405 1406 1407
	struct ref_reloc_sym *ref;

	ref = zalloc(sizeof(struct ref_reloc_sym));
	if (ref == NULL)
		return -ENOMEM;
1408

1409 1410 1411
	ref->name = strdup(symbol_name);
	if (ref->name == NULL) {
		free(ref);
1412
		return -ENOMEM;
1413
	}
1414

1415
	bracket = strchr(ref->name, ']');
1416 1417 1418
	if (bracket)
		*bracket = '\0';

1419
	ref->addr = addr;
1420 1421

	for (i = 0; i < MAP__NR_TYPES; ++i) {
1422 1423
		struct kmap *kmap = map__kmap(maps[i]);
		kmap->ref_reloc_sym = ref;
1424 1425
	}

1426 1427
	return 0;
}
1428 1429 1430

size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp)
{
1431
	return machines__fprintf_dsos(&self->machines, fp);
1432
}
1433 1434

size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp,
1435
					  bool (skip)(struct dso *dso, int parm), int parm)
1436
{
1437
	return machines__fprintf_dsos_buildid(&self->machines, fp, skip, parm);
1438
}
1439 1440 1441 1442 1443 1444

size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
{
	struct perf_evsel *pos;
	size_t ret = fprintf(fp, "Aggregated stats:\n");

1445
	ret += events_stats__fprintf(&session->stats, fp);
1446 1447

	list_for_each_entry(pos, &session->evlist->entries, node) {
1448
		ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
1449
		ret += events_stats__fprintf(&pos->hists.stats, fp);
1450 1451 1452 1453
	}

	return ret;
}
1454

1455 1456 1457 1458 1459 1460
size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
{
	/*
	 * FIXME: Here we have to actually print all the machines in this
	 * session, not just the host...
	 */
1461
	return machine__fprintf(&session->machines.host, fp);
1462 1463
}

1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475
struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
					      unsigned int type)
{
	struct perf_evsel *pos;

	list_for_each_entry(pos, &session->evlist->entries, node) {
		if (pos->attr.type == type)
			return pos;
	}
	return NULL;
}

1476 1477
void perf_evsel__print_ip(struct perf_evsel *evsel, union perf_event *event,
			  struct perf_sample *sample, struct machine *machine,
1478
			  unsigned int print_opts, unsigned int stack_depth)
1479 1480 1481
{
	struct addr_location al;
	struct callchain_cursor_node *node;
1482 1483 1484 1485
	int print_ip = print_opts & PRINT_IP_OPT_IP;
	int print_sym = print_opts & PRINT_IP_OPT_SYM;
	int print_dso = print_opts & PRINT_IP_OPT_DSO;
	int print_symoffset = print_opts & PRINT_IP_OPT_SYMOFFSET;
1486 1487
	int print_oneline = print_opts & PRINT_IP_OPT_ONELINE;
	char s = print_oneline ? ' ' : '\t';
1488

1489
	if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) {
1490 1491 1492 1493 1494 1495 1496
		error("problem processing %d event, skipping it.\n",
			event->header.type);
		return;
	}

	if (symbol_conf.use_callchain && sample->callchain) {

1497
		if (machine__resolve_callchain(machine, evsel, al.thread,
1498
					       sample, NULL, NULL) != 0) {
1499 1500 1501 1502
			if (verbose)
				error("Failed to resolve callchain. Skipping\n");
			return;
		}
1503
		callchain_cursor_commit(&callchain_cursor);
1504

1505
		while (stack_depth) {
1506
			node = callchain_cursor_current(&callchain_cursor);
1507 1508 1509
			if (!node)
				break;

1510
			if (print_ip)
1511
				printf("%c%16" PRIx64, s, node->ip);
1512

1513
			if (print_sym) {
1514
				printf(" ");
1515 1516
				if (print_symoffset) {
					al.addr = node->ip;
1517
					al.map  = node->map;
1518 1519 1520
					symbol__fprintf_symname_offs(node->sym, &al, stdout);
				} else
					symbol__fprintf_symname(node->sym, stdout);
1521
			}
1522

1523
			if (print_dso) {
1524
				printf(" (");
1525
				map__fprintf_dsoname(node->map, stdout);
1526
				printf(")");
1527
			}
1528 1529 1530

			if (!print_oneline)
				printf("\n");
1531

1532
			callchain_cursor_advance(&callchain_cursor);
1533 1534

			stack_depth--;
1535 1536 1537
		}

	} else {
1538 1539 1540
		if (print_ip)
			printf("%16" PRIx64, sample->ip);

1541
		if (print_sym) {
1542
			printf(" ");
1543 1544 1545 1546 1547
			if (print_symoffset)
				symbol__fprintf_symname_offs(al.sym, &al,
							     stdout);
			else
				symbol__fprintf_symname(al.sym, stdout);
1548 1549 1550
		}

		if (print_dso) {
1551 1552 1553
			printf(" (");
			map__fprintf_dsoname(al.map, stdout);
			printf(")");
1554
		}
1555 1556
	}
}
1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578

int perf_session__cpu_bitmap(struct perf_session *session,
			     const char *cpu_list, unsigned long *cpu_bitmap)
{
	int i;
	struct cpu_map *map;

	for (i = 0; i < PERF_TYPE_MAX; ++i) {
		struct perf_evsel *evsel;

		evsel = perf_session__find_first_evtype(session, i);
		if (!evsel)
			continue;

		if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) {
			pr_err("File does not contain CPU events. "
			       "Remove -c option to proceed.\n");
			return -1;
		}
	}

	map = cpu_map__new(cpu_list);
1579 1580 1581 1582
	if (map == NULL) {
		pr_err("Invalid cpu_list\n");
		return -1;
	}
1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597

	for (i = 0; i < map->nr; i++) {
		int cpu = map->map[i];

		if (cpu >= MAX_NR_CPUS) {
			pr_err("Requested CPU %d too large. "
			       "Consider raising MAX_NR_CPUS\n", cpu);
			return -1;
		}

		set_bit(cpu, cpu_bitmap);
	}

	return 0;
}
1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616

void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
				bool full)
{
	struct stat st;
	int ret;

	if (session == NULL || fp == NULL)
		return;

	ret = fstat(session->fd, &st);
	if (ret == -1)
		return;

	fprintf(fp, "# ========\n");
	fprintf(fp, "# captured on: %s", ctime(&st.st_ctime));
	perf_header__fprintf_info(session, fp, full);
	fprintf(fp, "# ========\n#\n");
}
1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671


int __perf_session__set_tracepoints_handlers(struct perf_session *session,
					     const struct perf_evsel_str_handler *assocs,
					     size_t nr_assocs)
{
	struct perf_evlist *evlist = session->evlist;
	struct event_format *format;
	struct perf_evsel *evsel;
	char *tracepoint, *name;
	size_t i;
	int err;

	for (i = 0; i < nr_assocs; i++) {
		err = -ENOMEM;
		tracepoint = strdup(assocs[i].name);
		if (tracepoint == NULL)
			goto out;

		err = -ENOENT;
		name = strchr(tracepoint, ':');
		if (name == NULL)
			goto out_free;

		*name++ = '\0';
		format = pevent_find_event_by_name(session->pevent,
						   tracepoint, name);
		if (format == NULL) {
			/*
			 * Adding a handler for an event not in the session,
			 * just ignore it.
			 */
			goto next;
		}

		evsel = perf_evlist__find_tracepoint_by_id(evlist, format->id);
		if (evsel == NULL)
			goto next;

		err = -EEXIST;
		if (evsel->handler.func != NULL)
			goto out_free;
		evsel->handler.func = assocs[i].handler;
next:
		free(tracepoint);
	}

	err = 0;
out:
	return err;

out_free:
	free(tracepoint);
	goto out;
}