session.c 40.1 KB
Newer Older
1
#include <linux/kernel.h>
2
#include <traceevent/event-parse.h>
3

4
#include <byteswap.h>
5 6
#include <unistd.h>
#include <sys/types.h>
7
#include <sys/mman.h>
8

9 10
#include "evlist.h"
#include "evsel.h"
11
#include "session.h"
12
#include "tool.h"
13
#include "sort.h"
14
#include "util.h"
15
#include "cpumap.h"
16
#include "perf_regs.h"
17

18
static int perf_session__open(struct perf_session *session)
19
{
20
	struct perf_data_file *file = session->file;
21

22
	if (perf_session__read_header(session) < 0) {
23
		pr_err("incompatible file format (rerun with -v to learn more)");
24
		return -1;
25 26
	}

27 28 29
	if (perf_data_file__is_pipe(file))
		return 0;

30
	if (!perf_evlist__valid_sample_type(session->evlist)) {
31
		pr_err("non matching sample_type");
32
		return -1;
33 34
	}

35
	if (!perf_evlist__valid_sample_id_all(session->evlist)) {
36
		pr_err("non matching sample_id_all");
37
		return -1;
38 39
	}

40
	if (!perf_evlist__valid_read_format(session->evlist)) {
41
		pr_err("non matching read_format");
42
		return -1;
43 44
	}

45 46 47
	return 0;
}

48
void perf_session__set_id_hdr_size(struct perf_session *session)
49
{
50 51 52
	u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);

	machines__set_id_hdr_size(&session->machines, id_hdr_size);
53 54
}

55
int perf_session__create_kernel_maps(struct perf_session *session)
56
{
57
	int ret = machine__create_kernel_maps(&session->machines.host);
58 59

	if (ret >= 0)
60
		ret = machines__create_guest_kernel_maps(&session->machines);
61 62 63
	return ret;
}

64
static void perf_session__destroy_kernel_maps(struct perf_session *session)
65
{
66
	machines__destroy_kernel_maps(&session->machines);
67 68
}

69 70
struct perf_session *perf_session__new(struct perf_data_file *file,
				       bool repipe, struct perf_tool *tool)
71
{
72
	struct perf_session *session = zalloc(sizeof(*session));
73

74
	if (!session)
75 76
		goto out;

77
	session->repipe = repipe;
78 79
	INIT_LIST_HEAD(&session->ordered_events.events);
	INIT_LIST_HEAD(&session->ordered_events.cache);
80
	INIT_LIST_HEAD(&session->ordered_events.to_free);
81 82
	session->ordered_events.max_alloc_size = (u64) -1;
	session->ordered_events.cur_alloc_size = 0;
83
	machines__init(&session->machines);
84

85 86
	if (file) {
		if (perf_data_file__open(file))
87
			goto out_delete;
88

89
		session->file = file;
90 91

		if (perf_data_file__is_read(file)) {
92
			if (perf_session__open(session) < 0)
93 94
				goto out_close;

95
			perf_session__set_id_hdr_size(session);
96 97 98 99
		}
	}

	if (!file || perf_data_file__is_write(file)) {
100 101
		/*
		 * In O_RDONLY mode this will be performed when reading the
102
		 * kernel MMAP event, in perf_event__process_mmap().
103
		 */
104
		if (perf_session__create_kernel_maps(session) < 0)
105 106
			goto out_delete;
	}
107

108
	if (tool && tool->ordering_requires_timestamps &&
109
	    tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) {
110
		dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
111
		tool->ordered_events = false;
112 113
	}

114
	return session;
115 116 117 118

 out_close:
	perf_data_file__close(file);
 out_delete:
119
	perf_session__delete(session);
120
 out:
121
	return NULL;
122 123
}

124 125
static void perf_session__delete_dead_threads(struct perf_session *session)
{
126
	machine__delete_dead_threads(&session->machines.host);
127 128 129 130
}

static void perf_session__delete_threads(struct perf_session *session)
{
131
	machine__delete_threads(&session->machines.host);
132 133
}

134 135
static void perf_session_env__delete(struct perf_session_env *env)
{
136 137 138 139 140 141 142 143 144 145 146 147
	zfree(&env->hostname);
	zfree(&env->os_release);
	zfree(&env->version);
	zfree(&env->arch);
	zfree(&env->cpu_desc);
	zfree(&env->cpuid);

	zfree(&env->cmdline);
	zfree(&env->sibling_cores);
	zfree(&env->sibling_threads);
	zfree(&env->numa_nodes);
	zfree(&env->pmu_mappings);
148 149
}

150
void perf_session__delete(struct perf_session *session)
151
{
152 153 154 155 156 157 158 159
	perf_session__destroy_kernel_maps(session);
	perf_session__delete_dead_threads(session);
	perf_session__delete_threads(session);
	perf_session_env__delete(&session->header.env);
	machines__exit(&session->machines);
	if (session->file)
		perf_data_file__close(session->file);
	free(session);
160
}
161

162 163 164
static int process_event_synth_tracing_data_stub(struct perf_tool *tool
						 __maybe_unused,
						 union perf_event *event
165 166 167
						 __maybe_unused,
						 struct perf_session *session
						__maybe_unused)
168 169 170 171 172
{
	dump_printf(": unhandled!\n");
	return 0;
}

173 174
static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
					 union perf_event *event __maybe_unused,
175 176
					 struct perf_evlist **pevlist
					 __maybe_unused)
177 178 179 180 181
{
	dump_printf(": unhandled!\n");
	return 0;
}

182 183 184 185 186
static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
				     union perf_event *event __maybe_unused,
				     struct perf_sample *sample __maybe_unused,
				     struct perf_evsel *evsel __maybe_unused,
				     struct machine *machine __maybe_unused)
187 188 189 190 191
{
	dump_printf(": unhandled!\n");
	return 0;
}

192 193 194 195
static int process_event_stub(struct perf_tool *tool __maybe_unused,
			      union perf_event *event __maybe_unused,
			      struct perf_sample *sample __maybe_unused,
			      struct machine *machine __maybe_unused)
196 197 198 199 200
{
	dump_printf(": unhandled!\n");
	return 0;
}

201 202 203 204
static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
				       union perf_event *event __maybe_unused,
				       struct perf_session *perf_session
				       __maybe_unused)
205 206 207 208 209
{
	dump_printf(": unhandled!\n");
	return 0;
}

210
static int process_finished_round(struct perf_tool *tool,
211 212
				  union perf_event *event,
				  struct perf_session *session);
213

214
void perf_tool__fill_defaults(struct perf_tool *tool)
215
{
216 217 218 219
	if (tool->sample == NULL)
		tool->sample = process_event_sample_stub;
	if (tool->mmap == NULL)
		tool->mmap = process_event_stub;
220 221
	if (tool->mmap2 == NULL)
		tool->mmap2 = process_event_stub;
222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242
	if (tool->comm == NULL)
		tool->comm = process_event_stub;
	if (tool->fork == NULL)
		tool->fork = process_event_stub;
	if (tool->exit == NULL)
		tool->exit = process_event_stub;
	if (tool->lost == NULL)
		tool->lost = perf_event__process_lost;
	if (tool->read == NULL)
		tool->read = process_event_sample_stub;
	if (tool->throttle == NULL)
		tool->throttle = process_event_stub;
	if (tool->unthrottle == NULL)
		tool->unthrottle = process_event_stub;
	if (tool->attr == NULL)
		tool->attr = process_event_synth_attr_stub;
	if (tool->tracing_data == NULL)
		tool->tracing_data = process_event_synth_tracing_data_stub;
	if (tool->build_id == NULL)
		tool->build_id = process_finished_round_stub;
	if (tool->finished_round == NULL) {
243
		if (tool->ordered_events)
244
			tool->finished_round = process_finished_round;
245
		else
246
			tool->finished_round = process_finished_round_stub;
247
	}
248
}
249
 
250 251 252 253 254 255 256 257 258 259
static void swap_sample_id_all(union perf_event *event, void *data)
{
	void *end = (void *) event + event->header.size;
	int size = end - data;

	BUG_ON(size % sizeof(u64));
	mem_bswap_64(data, size);
}

static void perf_event__all64_swap(union perf_event *event,
260
				   bool sample_id_all __maybe_unused)
261
{
262 263
	struct perf_event_header *hdr = &event->header;
	mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
264 265
}

266
static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
267
{
268 269
	event->comm.pid = bswap_32(event->comm.pid);
	event->comm.tid = bswap_32(event->comm.tid);
270 271 272 273

	if (sample_id_all) {
		void *data = &event->comm.comm;

274
		data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
275 276
		swap_sample_id_all(event, data);
	}
277 278
}

279 280
static void perf_event__mmap_swap(union perf_event *event,
				  bool sample_id_all)
281
{
282 283 284 285 286
	event->mmap.pid	  = bswap_32(event->mmap.pid);
	event->mmap.tid	  = bswap_32(event->mmap.tid);
	event->mmap.start = bswap_64(event->mmap.start);
	event->mmap.len	  = bswap_64(event->mmap.len);
	event->mmap.pgoff = bswap_64(event->mmap.pgoff);
287 288 289 290

	if (sample_id_all) {
		void *data = &event->mmap.filename;

291
		data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
292 293
		swap_sample_id_all(event, data);
	}
294 295
}

296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314
static void perf_event__mmap2_swap(union perf_event *event,
				  bool sample_id_all)
{
	event->mmap2.pid   = bswap_32(event->mmap2.pid);
	event->mmap2.tid   = bswap_32(event->mmap2.tid);
	event->mmap2.start = bswap_64(event->mmap2.start);
	event->mmap2.len   = bswap_64(event->mmap2.len);
	event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
	event->mmap2.maj   = bswap_32(event->mmap2.maj);
	event->mmap2.min   = bswap_32(event->mmap2.min);
	event->mmap2.ino   = bswap_64(event->mmap2.ino);

	if (sample_id_all) {
		void *data = &event->mmap2.filename;

		data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
		swap_sample_id_all(event, data);
	}
}
315
static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
316
{
317 318 319 320 321
	event->fork.pid	 = bswap_32(event->fork.pid);
	event->fork.tid	 = bswap_32(event->fork.tid);
	event->fork.ppid = bswap_32(event->fork.ppid);
	event->fork.ptid = bswap_32(event->fork.ptid);
	event->fork.time = bswap_64(event->fork.time);
322 323 324

	if (sample_id_all)
		swap_sample_id_all(event, &event->fork + 1);
325 326
}

327
static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
328
{
329 330 331 332 333 334
	event->read.pid		 = bswap_32(event->read.pid);
	event->read.tid		 = bswap_32(event->read.tid);
	event->read.value	 = bswap_64(event->read.value);
	event->read.time_enabled = bswap_64(event->read.time_enabled);
	event->read.time_running = bswap_64(event->read.time_running);
	event->read.id		 = bswap_64(event->read.id);
335 336 337

	if (sample_id_all)
		swap_sample_id_all(event, &event->read + 1);
338 339
}

340 341 342 343 344 345 346 347 348 349 350
static void perf_event__throttle_swap(union perf_event *event,
				      bool sample_id_all)
{
	event->throttle.time	  = bswap_64(event->throttle.time);
	event->throttle.id	  = bswap_64(event->throttle.id);
	event->throttle.stream_id = bswap_64(event->throttle.stream_id);

	if (sample_id_all)
		swap_sample_id_all(event, &event->throttle + 1);
}

351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382
static u8 revbyte(u8 b)
{
	int rev = (b >> 4) | ((b & 0xf) << 4);
	rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
	rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
	return (u8) rev;
}

/*
 * XXX this is hack in attempt to carry flags bitfield
 * throught endian village. ABI says:
 *
 * Bit-fields are allocated from right to left (least to most significant)
 * on little-endian implementations and from left to right (most to least
 * significant) on big-endian implementations.
 *
 * The above seems to be byte specific, so we need to reverse each
 * byte of the bitfield. 'Internet' also says this might be implementation
 * specific and we probably need proper fix and carry perf_event_attr
 * bitfield flags in separate data file FEAT_ section. Thought this seems
 * to work for now.
 */
static void swap_bitfield(u8 *p, unsigned len)
{
	unsigned i;

	for (i = 0; i < len; i++) {
		*p = revbyte(*p);
		p++;
	}
}

383 384 385 386 387 388 389 390 391 392 393 394 395
/* exported for swapping attributes in file header */
void perf_event__attr_swap(struct perf_event_attr *attr)
{
	attr->type		= bswap_32(attr->type);
	attr->size		= bswap_32(attr->size);
	attr->config		= bswap_64(attr->config);
	attr->sample_period	= bswap_64(attr->sample_period);
	attr->sample_type	= bswap_64(attr->sample_type);
	attr->read_format	= bswap_64(attr->read_format);
	attr->wakeup_events	= bswap_32(attr->wakeup_events);
	attr->bp_type		= bswap_32(attr->bp_type);
	attr->bp_addr		= bswap_64(attr->bp_addr);
	attr->bp_len		= bswap_64(attr->bp_len);
396 397 398
	attr->branch_sample_type = bswap_64(attr->branch_sample_type);
	attr->sample_regs_user	 = bswap_64(attr->sample_regs_user);
	attr->sample_stack_user  = bswap_32(attr->sample_stack_user);
399 400

	swap_bitfield((u8 *) (&attr->read_format + 1), sizeof(u64));
401 402
}

403
static void perf_event__hdr_attr_swap(union perf_event *event,
404
				      bool sample_id_all __maybe_unused)
405 406 407
{
	size_t size;

408
	perf_event__attr_swap(&event->attr.attr);
409

410 411 412
	size = event->header.size;
	size -= (void *)&event->attr.id - (void *)event;
	mem_bswap_64(event->attr.id, size);
413 414
}

415
static void perf_event__event_type_swap(union perf_event *event,
416
					bool sample_id_all __maybe_unused)
417
{
418 419
	event->event_type.event_type.event_id =
		bswap_64(event->event_type.event_type.event_id);
420 421
}

422
static void perf_event__tracing_data_swap(union perf_event *event,
423
					  bool sample_id_all __maybe_unused)
424
{
425
	event->tracing_data.size = bswap_32(event->tracing_data.size);
426 427
}

428 429
typedef void (*perf_event__swap_op)(union perf_event *event,
				    bool sample_id_all);
430

431 432
static perf_event__swap_op perf_event__swap_ops[] = {
	[PERF_RECORD_MMAP]		  = perf_event__mmap_swap,
433
	[PERF_RECORD_MMAP2]		  = perf_event__mmap2_swap,
434 435 436 437 438
	[PERF_RECORD_COMM]		  = perf_event__comm_swap,
	[PERF_RECORD_FORK]		  = perf_event__task_swap,
	[PERF_RECORD_EXIT]		  = perf_event__task_swap,
	[PERF_RECORD_LOST]		  = perf_event__all64_swap,
	[PERF_RECORD_READ]		  = perf_event__read_swap,
439 440
	[PERF_RECORD_THROTTLE]		  = perf_event__throttle_swap,
	[PERF_RECORD_UNTHROTTLE]	  = perf_event__throttle_swap,
441
	[PERF_RECORD_SAMPLE]		  = perf_event__all64_swap,
442
	[PERF_RECORD_HEADER_ATTR]	  = perf_event__hdr_attr_swap,
443 444 445 446
	[PERF_RECORD_HEADER_EVENT_TYPE]	  = perf_event__event_type_swap,
	[PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
	[PERF_RECORD_HEADER_BUILD_ID]	  = NULL,
	[PERF_RECORD_HEADER_MAX]	  = NULL,
447 448
};

449 450
static void perf_session_free_sample_buffers(struct perf_session *session)
{
451
	struct ordered_events *oe = &session->ordered_events;
452

453 454
	while (!list_empty(&oe->to_free)) {
		struct ordered_event *event;
455

456 457 458
		event = list_entry(oe->to_free.next, struct ordered_event, list);
		list_del(&event->list);
		free(event);
459 460 461
	}
}

462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500
/*
 * When perf record finishes a pass on every buffers, it records this pseudo
 * event.
 * We record the max timestamp t found in the pass n.
 * Assuming these timestamps are monotonic across cpus, we know that if
 * a buffer still has events with timestamps below t, they will be all
 * available and then read in the pass n + 1.
 * Hence when we start to read the pass n + 2, we can safely flush every
 * events with timestamps below t.
 *
 *    ============ PASS n =================
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          1          |         2
 *          2          |         3
 *          -          |         4  <--- max recorded
 *
 *    ============ PASS n + 1 ==============
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          3          |         5
 *          4          |         6
 *          5          |         7 <---- max recorded
 *
 *      Flush every events below timestamp 4
 *
 *    ============ PASS n + 2 ==============
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          6          |         8
 *          7          |         9
 *          -          |         10
 *
 *      Flush every events below timestamp 7
 *      etc...
 */
501
static int process_finished_round(struct perf_tool *tool,
502
				  union perf_event *event __maybe_unused,
503
				  struct perf_session *session)
504
{
505
	return ordered_events__flush(session, tool, OE_FLUSH__ROUND);
506 507
}

508
int perf_session_queue_event(struct perf_session *s, union perf_event *event,
509 510
			     struct perf_tool *tool, struct perf_sample *sample,
			     u64 file_offset)
511
{
512
	struct ordered_events *oe = &s->ordered_events;
513
	u64 timestamp = sample->time;
514
	struct ordered_event *new;
515

516
	if (!timestamp || timestamp == ~0ULL)
517 518
		return -ETIME;

519
	if (timestamp < s->ordered_events.last_flush) {
520 521 522 523
		printf("Warning: Timestamp below last timeslice flush\n");
		return -EINVAL;
	}

524
	new = ordered_events__new(oe, timestamp);
525 526 527 528 529
	if (!new) {
		ordered_events__flush(s, tool, OE_FLUSH__HALF);
		new = ordered_events__new(oe, timestamp);
	}

530 531
	if (!new)
		return -ENOMEM;
532

533
	new->file_offset = file_offset;
534
	new->event = event;
535 536
	return 0;
}
537

538
static void callchain__printf(struct perf_sample *sample)
539 540
{
	unsigned int i;
541

542
	printf("... chain: nr:%" PRIu64 "\n", sample->callchain->nr);
543 544

	for (i = 0; i < sample->callchain->nr; i++)
545 546
		printf("..... %2d: %016" PRIx64 "\n",
		       i, sample->callchain->ips[i]);
547 548
}

549 550 551 552 553 554 555 556 557 558 559 560
static void branch_stack__printf(struct perf_sample *sample)
{
	uint64_t i;

	printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr);

	for (i = 0; i < sample->branch_stack->nr; i++)
		printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 "\n",
			i, sample->branch_stack->entries[i].from,
			sample->branch_stack->entries[i].to);
}

561 562 563 564 565 566 567 568 569 570 571 572
static void regs_dump__printf(u64 mask, u64 *regs)
{
	unsigned rid, i = 0;

	for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
		u64 val = regs[i++];

		printf(".... %-5s 0x%" PRIx64 "\n",
		       perf_reg_name(rid), val);
	}
}

573
static void regs_user__printf(struct perf_sample *sample)
574 575 576 577
{
	struct regs_dump *user_regs = &sample->user_regs;

	if (user_regs->regs) {
578
		u64 mask = user_regs->mask;
579 580 581 582 583 584 585 586 587 588 589
		printf("... user regs: mask 0x%" PRIx64 "\n", mask);
		regs_dump__printf(mask, user_regs->regs);
	}
}

static void stack_user__printf(struct stack_dump *dump)
{
	printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
	       dump->size, dump->offset);
}

590
static void perf_session__print_tstamp(struct perf_session *session,
591
				       union perf_event *event,
592
				       struct perf_sample *sample)
593
{
594
	u64 sample_type = __perf_evlist__combined_sample_type(session->evlist);
595

596
	if (event->header.type != PERF_RECORD_SAMPLE &&
597
	    !perf_evlist__sample_id_all(session->evlist)) {
598 599 600 601
		fputs("-1 -1 ", stdout);
		return;
	}

602
	if ((sample_type & PERF_SAMPLE_CPU))
603 604
		printf("%u ", sample->cpu);

605
	if (sample_type & PERF_SAMPLE_TIME)
606
		printf("%" PRIu64 " ", sample->time);
607 608
}

609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638
static void sample_read__printf(struct perf_sample *sample, u64 read_format)
{
	printf("... sample_read:\n");

	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
		printf("...... time enabled %016" PRIx64 "\n",
		       sample->read.time_enabled);

	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
		printf("...... time running %016" PRIx64 "\n",
		       sample->read.time_running);

	if (read_format & PERF_FORMAT_GROUP) {
		u64 i;

		printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);

		for (i = 0; i < sample->read.group.nr; i++) {
			struct sample_read_value *value;

			value = &sample->read.group.values[i];
			printf("..... id %016" PRIx64
			       ", value %016" PRIx64 "\n",
			       value->id, value->value);
		}
	} else
		printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
			sample->read.one.id, sample->read.one.value);
}

639
static void dump_event(struct perf_session *session, union perf_event *event,
640
		       u64 file_offset, struct perf_sample *sample)
641 642 643 644
{
	if (!dump_trace)
		return;

645 646
	printf("\n%#" PRIx64 " [%#x]: event: %d\n",
	       file_offset, event->header.size, event->header.type);
647 648 649 650 651 652

	trace_event(event);

	if (sample)
		perf_session__print_tstamp(session, event, sample);

653
	printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
654
	       event->header.size, perf_event__name(event->header.type));
655 656
}

657
static void dump_sample(struct perf_evsel *evsel, union perf_event *event,
658
			struct perf_sample *sample)
659
{
660 661
	u64 sample_type;

662 663 664
	if (!dump_trace)
		return;

665
	printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
666
	       event->header.misc, sample->pid, sample->tid, sample->ip,
667
	       sample->period, sample->addr);
668

669
	sample_type = evsel->attr.sample_type;
670 671

	if (sample_type & PERF_SAMPLE_CALLCHAIN)
672
		callchain__printf(sample);
673

674
	if (sample_type & PERF_SAMPLE_BRANCH_STACK)
675
		branch_stack__printf(sample);
676 677

	if (sample_type & PERF_SAMPLE_REGS_USER)
678
		regs_user__printf(sample);
679 680 681

	if (sample_type & PERF_SAMPLE_STACK_USER)
		stack_user__printf(&sample->user_stack);
682 683 684

	if (sample_type & PERF_SAMPLE_WEIGHT)
		printf("... weight: %" PRIu64 "\n", sample->weight);
685 686 687

	if (sample_type & PERF_SAMPLE_DATA_SRC)
		printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
688

689 690 691
	if (sample_type & PERF_SAMPLE_TRANSACTION)
		printf("... transaction: %" PRIx64 "\n", sample->transaction);

692 693
	if (sample_type & PERF_SAMPLE_READ)
		sample_read__printf(sample, evsel->attr.read_format);
694 695
}

696 697
static struct machine *
	perf_session__find_machine_for_cpumode(struct perf_session *session,
698 699
					       union perf_event *event,
					       struct perf_sample *sample)
700 701
{
	const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
702
	struct machine *machine;
703

704 705 706
	if (perf_guest &&
	    ((cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
	     (cpumode == PERF_RECORD_MISC_GUEST_USER))) {
707 708
		u32 pid;

709 710
		if (event->header.type == PERF_RECORD_MMAP
		    || event->header.type == PERF_RECORD_MMAP2)
711 712
			pid = event->mmap.pid;
		else
713
			pid = sample->pid;
714

715 716 717 718 719
		machine = perf_session__find_machine(session, pid);
		if (!machine)
			machine = perf_session__findnew_machine(session,
						DEFAULT_GUEST_KERNEL_ID);
		return machine;
720
	}
721

722
	return &session->machines.host;
723 724
}

725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793
static int deliver_sample_value(struct perf_session *session,
				struct perf_tool *tool,
				union perf_event *event,
				struct perf_sample *sample,
				struct sample_read_value *v,
				struct machine *machine)
{
	struct perf_sample_id *sid;

	sid = perf_evlist__id2sid(session->evlist, v->id);
	if (sid) {
		sample->id     = v->id;
		sample->period = v->value - sid->period;
		sid->period    = v->value;
	}

	if (!sid || sid->evsel == NULL) {
		++session->stats.nr_unknown_id;
		return 0;
	}

	return tool->sample(tool, event, sample, sid->evsel, machine);
}

static int deliver_sample_group(struct perf_session *session,
				struct perf_tool *tool,
				union  perf_event *event,
				struct perf_sample *sample,
				struct machine *machine)
{
	int ret = -EINVAL;
	u64 i;

	for (i = 0; i < sample->read.group.nr; i++) {
		ret = deliver_sample_value(session, tool, event, sample,
					   &sample->read.group.values[i],
					   machine);
		if (ret)
			break;
	}

	return ret;
}

static int
perf_session__deliver_sample(struct perf_session *session,
			     struct perf_tool *tool,
			     union  perf_event *event,
			     struct perf_sample *sample,
			     struct perf_evsel *evsel,
			     struct machine *machine)
{
	/* We know evsel != NULL. */
	u64 sample_type = evsel->attr.sample_type;
	u64 read_format = evsel->attr.read_format;

	/* Standard sample delievery. */
	if (!(sample_type & PERF_SAMPLE_READ))
		return tool->sample(tool, event, sample, evsel, machine);

	/* For PERF_SAMPLE_READ we have either single or group mode. */
	if (read_format & PERF_FORMAT_GROUP)
		return deliver_sample_group(session, tool, event, sample,
					    machine);
	else
		return deliver_sample_value(session, tool, event, sample,
					    &sample->read.one, machine);
}

794 795 796 797
int perf_session__deliver_event(struct perf_session *session,
				union perf_event *event,
				struct perf_sample *sample,
				struct perf_tool *tool, u64 file_offset)
798
{
799
	struct perf_evsel *evsel;
800
	struct machine *machine;
801

802 803
	dump_event(session, event, file_offset, sample);

804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821
	evsel = perf_evlist__id2evsel(session->evlist, sample->id);
	if (evsel != NULL && event->header.type != PERF_RECORD_SAMPLE) {
		/*
		 * XXX We're leaving PERF_RECORD_SAMPLE unnacounted here
		 * because the tools right now may apply filters, discarding
		 * some of the samples. For consistency, in the future we
		 * should have something like nr_filtered_samples and remove
		 * the sample->period from total_sample_period, etc, KISS for
		 * now tho.
		 *
		 * Also testing against NULL allows us to handle files without
		 * attr.sample_id_all and/or without PERF_SAMPLE_ID. In the
		 * future probably it'll be a good idea to restrict event
		 * processing via perf_session to files with both set.
		 */
		hists__inc_nr_events(&evsel->hists, event->header.type);
	}

822 823
	machine = perf_session__find_machine_for_cpumode(session, event,
							 sample);
824

825 826
	switch (event->header.type) {
	case PERF_RECORD_SAMPLE:
827
		dump_sample(evsel, event, sample);
828
		if (evsel == NULL) {
829
			++session->stats.nr_unknown_id;
830
			return 0;
831
		}
832
		if (machine == NULL) {
833
			++session->stats.nr_unprocessable_samples;
834
			return 0;
835
		}
836 837
		return perf_session__deliver_sample(session, tool, event,
						    sample, evsel, machine);
838
	case PERF_RECORD_MMAP:
839
		return tool->mmap(tool, event, sample, machine);
840 841
	case PERF_RECORD_MMAP2:
		return tool->mmap2(tool, event, sample, machine);
842
	case PERF_RECORD_COMM:
843
		return tool->comm(tool, event, sample, machine);
844
	case PERF_RECORD_FORK:
845
		return tool->fork(tool, event, sample, machine);
846
	case PERF_RECORD_EXIT:
847
		return tool->exit(tool, event, sample, machine);
848
	case PERF_RECORD_LOST:
849
		if (tool->lost == perf_event__process_lost)
850
			session->stats.total_lost += event->lost.lost;
851
		return tool->lost(tool, event, sample, machine);
852
	case PERF_RECORD_READ:
853
		return tool->read(tool, event, sample, evsel, machine);
854
	case PERF_RECORD_THROTTLE:
855
		return tool->throttle(tool, event, sample, machine);
856
	case PERF_RECORD_UNTHROTTLE:
857
		return tool->unthrottle(tool, event, sample, machine);
858
	default:
859
		++session->stats.nr_unknown_events;
860 861 862 863
		return -1;
	}
}

864 865 866 867
static s64 perf_session__process_user_event(struct perf_session *session,
					    union perf_event *event,
					    struct perf_tool *tool,
					    u64 file_offset)
868
{
869
	int fd = perf_data_file__fd(session->file);
870 871
	int err;

872
	dump_event(session, event, file_offset, NULL);
873

874
	/* These events are processed right away */
875
	switch (event->header.type) {
876
	case PERF_RECORD_HEADER_ATTR:
877
		err = tool->attr(tool, event, &session->evlist);
878
		if (err == 0)
879
			perf_session__set_id_hdr_size(session);
880
		return err;
881 882 883 884 885 886
	case PERF_RECORD_HEADER_EVENT_TYPE:
		/*
		 * Depreceated, but we need to handle it for sake
		 * of old data files create in pipe mode.
		 */
		return 0;
887 888
	case PERF_RECORD_HEADER_TRACING_DATA:
		/* setup for reading amidst mmap */
889
		lseek(fd, file_offset, SEEK_SET);
890
		return tool->tracing_data(tool, event, session);
891
	case PERF_RECORD_HEADER_BUILD_ID:
892
		return tool->build_id(tool, event, session);
893
	case PERF_RECORD_FINISHED_ROUND:
894
		return tool->finished_round(tool, event, session);
895
	default:
896
		return -EINVAL;
897
	}
898 899
}

900 901 902 903 904 905 906 907 908
static void event_swap(union perf_event *event, bool sample_id_all)
{
	perf_event__swap_op swap;

	swap = perf_event__swap_ops[event->header.type];
	if (swap)
		swap(event, sample_id_all);
}

909
static s64 perf_session__process_event(struct perf_session *session,
910 911 912
				       union perf_event *event,
				       struct perf_tool *tool,
				       u64 file_offset)
913
{
914
	struct perf_sample sample;
915 916
	int ret;

917
	if (session->header.needs_swap)
918
		event_swap(event, perf_evlist__sample_id_all(session->evlist));
919 920 921 922

	if (event->header.type >= PERF_RECORD_HEADER_MAX)
		return -EINVAL;

923
	events_stats__inc(&session->stats, event->header.type);
924 925

	if (event->header.type >= PERF_RECORD_USER_TYPE_START)
926
		return perf_session__process_user_event(session, event, tool, file_offset);
927

928 929 930
	/*
	 * For all kernel events we get the sample data
	 */
931
	ret = perf_evlist__parse_sample(session->evlist, event, &sample);
932 933
	if (ret)
		return ret;
934

935
	if (tool->ordered_events) {
936
		ret = perf_session_queue_event(session, event, tool, &sample,
937
					       file_offset);
938 939 940 941
		if (ret != -ETIME)
			return ret;
	}

942 943
	return perf_session__deliver_event(session, event, &sample, tool,
					   file_offset);
944 945
}

946
void perf_event_header__bswap(struct perf_event_header *hdr)
947
{
948 949 950
	hdr->type = bswap_32(hdr->type);
	hdr->misc = bswap_16(hdr->misc);
	hdr->size = bswap_16(hdr->size);
951 952
}

953 954
struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
{
955
	return machine__findnew_thread(&session->machines.host, -1, pid);
956 957
}

958
static struct thread *perf_session__register_idle_thread(struct perf_session *session)
959
{
960
	struct thread *thread;
961

962
	thread = machine__findnew_thread(&session->machines.host, 0, 0);
963
	if (thread == NULL || thread__set_comm(thread, "swapper", 0)) {
964 965 966 967 968 969 970
		pr_err("problem inserting idle task.\n");
		thread = NULL;
	}

	return thread;
}

971
static void perf_session__warn_about_errors(const struct perf_session *session,
972
					    const struct perf_tool *tool)
973
{
974
	if (tool->lost == perf_event__process_lost &&
975
	    session->stats.nr_events[PERF_RECORD_LOST] != 0) {
976 977
		ui__warning("Processed %d events and lost %d chunks!\n\n"
			    "Check IO/CPU overload!\n\n",
978 979
			    session->stats.nr_events[0],
			    session->stats.nr_events[PERF_RECORD_LOST]);
980 981
	}

982
	if (session->stats.nr_unknown_events != 0) {
983 984 985 986 987
		ui__warning("Found %u unknown events!\n\n"
			    "Is this an older tool processing a perf.data "
			    "file generated by a more recent tool?\n\n"
			    "If that is not the case, consider "
			    "reporting to linux-kernel@vger.kernel.org.\n\n",
988
			    session->stats.nr_unknown_events);
989 990
	}

991
	if (session->stats.nr_unknown_id != 0) {
992
		ui__warning("%u samples with id not present in the header\n",
993
			    session->stats.nr_unknown_id);
994 995
	}

996
 	if (session->stats.nr_invalid_chains != 0) {
997 998 999
 		ui__warning("Found invalid callchains!\n\n"
 			    "%u out of %u events were discarded for this reason.\n\n"
 			    "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1000 1001
 			    session->stats.nr_invalid_chains,
 			    session->stats.nr_events[PERF_RECORD_SAMPLE]);
1002
 	}
1003

1004
	if (session->stats.nr_unprocessable_samples != 0) {
1005 1006
		ui__warning("%u unprocessable samples recorded.\n"
			    "Do you have a KVM guest running and not using 'perf kvm'?\n",
1007
			    session->stats.nr_unprocessable_samples);
1008
	}
1009 1010
}

1011 1012
volatile int session_done;

1013
static int __perf_session__process_pipe_events(struct perf_session *session,
1014
					       struct perf_tool *tool)
1015
{
1016
	int fd = perf_data_file__fd(session->file);
1017 1018 1019
	union perf_event *event;
	uint32_t size, cur_size = 0;
	void *buf = NULL;
1020
	s64 skip = 0;
1021
	u64 head;
1022
	ssize_t err;
1023 1024
	void *p;

1025
	perf_tool__fill_defaults(tool);
1026 1027

	head = 0;
1028 1029 1030 1031 1032
	cur_size = sizeof(union perf_event);

	buf = malloc(cur_size);
	if (!buf)
		return -errno;
1033
more:
1034
	event = buf;
1035
	err = readn(fd, event, sizeof(struct perf_event_header));
1036 1037 1038 1039 1040 1041 1042 1043
	if (err <= 0) {
		if (err == 0)
			goto done;

		pr_err("failed to read event header\n");
		goto out_err;
	}

1044
	if (session->header.needs_swap)
1045
		perf_event_header__bswap(&event->header);
1046

1047
	size = event->header.size;
1048 1049 1050 1051
	if (size < sizeof(struct perf_event_header)) {
		pr_err("bad event header size\n");
		goto out_err;
	}
1052

1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063
	if (size > cur_size) {
		void *new = realloc(buf, size);
		if (!new) {
			pr_err("failed to allocate memory to read event\n");
			goto out_err;
		}
		buf = new;
		cur_size = size;
		event = buf;
	}
	p = event;
1064 1065
	p += sizeof(struct perf_event_header);

1066
	if (size - sizeof(struct perf_event_header)) {
1067
		err = readn(fd, p, size - sizeof(struct perf_event_header));
1068 1069 1070 1071 1072
		if (err <= 0) {
			if (err == 0) {
				pr_err("unexpected end of event stream\n");
				goto done;
			}
1073

1074 1075 1076
			pr_err("failed to read event data\n");
			goto out_err;
		}
1077 1078
	}

1079
	if ((skip = perf_session__process_event(session, event, tool, head)) < 0) {
1080
		pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1081
		       head, event->header.size, event->header.type);
1082 1083
		err = -EINVAL;
		goto out_err;
1084 1085 1086 1087 1088 1089 1090 1091 1092 1093
	}

	head += size;

	if (skip > 0)
		head += skip;

	if (!session_done())
		goto more;
done:
1094
	/* do the final flush for ordered samples */
1095
	err = ordered_events__flush(session, tool, OE_FLUSH__FINAL);
1096
out_err:
1097
	free(buf);
1098 1099
	perf_session__warn_about_errors(session, tool);
	perf_session_free_sample_buffers(session);
1100 1101 1102
	return err;
}

1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120
static union perf_event *
fetch_mmaped_event(struct perf_session *session,
		   u64 head, size_t mmap_size, char *buf)
{
	union perf_event *event;

	/*
	 * Ensure we have enough space remaining to read
	 * the size of the event in the headers.
	 */
	if (head + sizeof(event->header) > mmap_size)
		return NULL;

	event = (union perf_event *)(buf + head);

	if (session->header.needs_swap)
		perf_event_header__bswap(&event->header);

1121 1122 1123 1124
	if (head + event->header.size > mmap_size) {
		/* We're not fetching the event so swap back again */
		if (session->header.needs_swap)
			perf_event_header__bswap(&event->header);
1125
		return NULL;
1126
	}
1127 1128 1129 1130

	return event;
}

1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142
/*
 * On 64bit we can mmap the data file in one go. No need for tiny mmap
 * slices. On 32bit we use 32MB.
 */
#if BITS_PER_LONG == 64
#define MMAP_SIZE ULLONG_MAX
#define NUM_MMAPS 1
#else
#define MMAP_SIZE (32 * 1024 * 1024ULL)
#define NUM_MMAPS 128
#endif

1143
int __perf_session__process_events(struct perf_session *session,
1144
				   u64 data_offset, u64 data_size,
1145
				   u64 file_size, struct perf_tool *tool)
1146
{
1147
	int fd = perf_data_file__fd(session->file);
1148
	u64 head, page_offset, file_offset, file_pos, size;
1149
	int err, mmap_prot, mmap_flags, map_idx = 0;
1150
	size_t	mmap_size;
1151
	char *buf, *mmaps[NUM_MMAPS];
1152
	union perf_event *event;
1153
	struct ui_progress prog;
1154
	s64 skip;
1155

1156
	perf_tool__fill_defaults(tool);
1157

1158 1159 1160
	page_offset = page_size * (data_offset / page_size);
	file_offset = page_offset;
	head = data_offset - page_offset;
1161

1162
	if (data_size && (data_offset + data_size < file_size))
1163 1164
		file_size = data_offset + data_size;

1165
	ui_progress__init(&prog, file_size, "Processing events...");
1166

1167
	mmap_size = MMAP_SIZE;
1168
	if (mmap_size > file_size) {
1169
		mmap_size = file_size;
1170 1171
		session->one_mmap = true;
	}
1172

1173 1174
	memset(mmaps, 0, sizeof(mmaps));

1175 1176 1177
	mmap_prot  = PROT_READ;
	mmap_flags = MAP_SHARED;

1178
	if (session->header.needs_swap) {
1179 1180 1181
		mmap_prot  |= PROT_WRITE;
		mmap_flags = MAP_PRIVATE;
	}
1182
remap:
1183
	buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, fd,
1184
		   file_offset);
1185 1186 1187 1188 1189
	if (buf == MAP_FAILED) {
		pr_err("failed to mmap file\n");
		err = -errno;
		goto out_err;
	}
1190 1191
	mmaps[map_idx] = buf;
	map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
1192
	file_pos = file_offset + head;
1193 1194 1195 1196
	if (session->one_mmap) {
		session->one_mmap_addr = buf;
		session->one_mmap_offset = file_offset;
	}
1197 1198

more:
1199 1200
	event = fetch_mmaped_event(session, head, mmap_size, buf);
	if (!event) {
1201 1202 1203 1204
		if (mmaps[map_idx]) {
			munmap(mmaps[map_idx], mmap_size);
			mmaps[map_idx] = NULL;
		}
1205

1206 1207 1208
		page_offset = page_size * (head / page_size);
		file_offset += page_offset;
		head -= page_offset;
1209 1210 1211 1212 1213
		goto remap;
	}

	size = event->header.size;

1214
	if (size < sizeof(struct perf_event_header) ||
1215 1216
	    (skip = perf_session__process_event(session, event, tool, file_pos))
									< 0) {
1217 1218 1219 1220 1221
		pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
		       file_offset + head, event->header.size,
		       event->header.type);
		err = -EINVAL;
		goto out_err;
1222 1223
	}

1224 1225 1226
	if (skip)
		size += skip;

1227
	head += size;
1228
	file_pos += size;
1229

1230
	ui_progress__update(&prog, size);
1231

1232
	if (session_done())
1233
		goto out;
1234

1235
	if (file_pos < file_size)
1236
		goto more;
1237

1238
out:
1239
	/* do the final flush for ordered samples */
1240
	err = ordered_events__flush(session, tool, OE_FLUSH__FINAL);
1241
out_err:
N
Namhyung Kim 已提交
1242
	ui_progress__finish();
1243
	perf_session__warn_about_errors(session, tool);
1244
	perf_session_free_sample_buffers(session);
1245
	session->one_mmap = false;
1246 1247
	return err;
}
1248

1249
int perf_session__process_events(struct perf_session *session,
1250
				 struct perf_tool *tool)
1251
{
1252
	u64 size = perf_data_file__size(session->file);
1253 1254
	int err;

1255
	if (perf_session__register_idle_thread(session) == NULL)
1256 1257
		return -ENOMEM;

1258 1259 1260 1261
	if (!perf_data_file__is_pipe(session->file))
		err = __perf_session__process_events(session,
						     session->header.data_offset,
						     session->header.data_size,
1262
						     size, tool);
1263
	else
1264
		err = __perf_session__process_pipe_events(session, tool);
1265

1266 1267 1268
	return err;
}

1269
bool perf_session__has_traces(struct perf_session *session, const char *msg)
1270
{
1271 1272
	struct perf_evsel *evsel;

1273
	evlist__for_each(session->evlist, evsel) {
1274 1275
		if (evsel->attr.type == PERF_TYPE_TRACEPOINT)
			return true;
1276 1277
	}

1278 1279
	pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
	return false;
1280
}
1281

1282 1283
int maps__set_kallsyms_ref_reloc_sym(struct map **maps,
				     const char *symbol_name, u64 addr)
1284 1285
{
	char *bracket;
1286
	enum map_type i;
1287 1288 1289 1290 1291
	struct ref_reloc_sym *ref;

	ref = zalloc(sizeof(struct ref_reloc_sym));
	if (ref == NULL)
		return -ENOMEM;
1292

1293 1294 1295
	ref->name = strdup(symbol_name);
	if (ref->name == NULL) {
		free(ref);
1296
		return -ENOMEM;
1297
	}
1298

1299
	bracket = strchr(ref->name, ']');
1300 1301 1302
	if (bracket)
		*bracket = '\0';

1303
	ref->addr = addr;
1304 1305

	for (i = 0; i < MAP__NR_TYPES; ++i) {
1306 1307
		struct kmap *kmap = map__kmap(maps[i]);
		kmap->ref_reloc_sym = ref;
1308 1309
	}

1310 1311
	return 0;
}
1312

1313
size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
1314
{
1315
	return machines__fprintf_dsos(&session->machines, fp);
1316
}
1317

1318
size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
1319
					  bool (skip)(struct dso *dso, int parm), int parm)
1320
{
1321
	return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
1322
}
1323 1324 1325 1326 1327 1328

size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
{
	struct perf_evsel *pos;
	size_t ret = fprintf(fp, "Aggregated stats:\n");

1329
	ret += events_stats__fprintf(&session->stats, fp);
1330

1331
	evlist__for_each(session->evlist, pos) {
1332
		ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
1333
		ret += events_stats__fprintf(&pos->hists.stats, fp);
1334 1335 1336 1337
	}

	return ret;
}
1338

1339 1340 1341 1342 1343 1344
size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
{
	/*
	 * FIXME: Here we have to actually print all the machines in this
	 * session, not just the host...
	 */
1345
	return machine__fprintf(&session->machines.host, fp);
1346 1347
}

1348 1349 1350 1351 1352
struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
					      unsigned int type)
{
	struct perf_evsel *pos;

1353
	evlist__for_each(session->evlist, pos) {
1354 1355 1356 1357 1358 1359
		if (pos->attr.type == type)
			return pos;
	}
	return NULL;
}

1360
void perf_evsel__print_ip(struct perf_evsel *evsel, struct perf_sample *sample,
1361
			  struct addr_location *al,
1362
			  unsigned int print_opts, unsigned int stack_depth)
1363 1364
{
	struct callchain_cursor_node *node;
1365 1366 1367 1368
	int print_ip = print_opts & PRINT_IP_OPT_IP;
	int print_sym = print_opts & PRINT_IP_OPT_SYM;
	int print_dso = print_opts & PRINT_IP_OPT_DSO;
	int print_symoffset = print_opts & PRINT_IP_OPT_SYMOFFSET;
1369
	int print_oneline = print_opts & PRINT_IP_OPT_ONELINE;
1370
	int print_srcline = print_opts & PRINT_IP_OPT_SRCLINE;
1371
	char s = print_oneline ? ' ' : '\t';
1372 1373

	if (symbol_conf.use_callchain && sample->callchain) {
1374
		struct addr_location node_al;
1375

1376
		if (machine__resolve_callchain(al->machine, evsel, al->thread,
1377 1378
					       sample, NULL, NULL,
					       PERF_MAX_STACK_DEPTH) != 0) {
1379 1380 1381 1382
			if (verbose)
				error("Failed to resolve callchain. Skipping\n");
			return;
		}
1383
		callchain_cursor_commit(&callchain_cursor);
1384

1385 1386 1387
		if (print_symoffset)
			node_al = *al;

1388
		while (stack_depth) {
1389 1390
			u64 addr = 0;

1391
			node = callchain_cursor_current(&callchain_cursor);
1392 1393 1394
			if (!node)
				break;

1395 1396 1397
			if (node->sym && node->sym->ignore)
				goto next;

1398
			if (print_ip)
1399
				printf("%c%16" PRIx64, s, node->ip);
1400

1401 1402 1403
			if (node->map)
				addr = node->map->map_ip(node->map, node->ip);

1404
			if (print_sym) {
1405
				printf(" ");
1406
				if (print_symoffset) {
1407
					node_al.addr = addr;
1408 1409
					node_al.map  = node->map;
					symbol__fprintf_symname_offs(node->sym, &node_al, stdout);
1410 1411
				} else
					symbol__fprintf_symname(node->sym, stdout);
1412
			}
1413

1414
			if (print_dso) {
1415
				printf(" (");
1416
				map__fprintf_dsoname(node->map, stdout);
1417
				printf(")");
1418
			}
1419

1420 1421 1422 1423
			if (print_srcline)
				map__fprintf_srcline(node->map, addr, "\n  ",
						     stdout);

1424 1425
			if (!print_oneline)
				printf("\n");
1426

1427
			stack_depth--;
1428 1429
next:
			callchain_cursor_advance(&callchain_cursor);
1430 1431 1432
		}

	} else {
1433
		if (al->sym && al->sym->ignore)
1434 1435
			return;

1436 1437 1438
		if (print_ip)
			printf("%16" PRIx64, sample->ip);

1439
		if (print_sym) {
1440
			printf(" ");
1441
			if (print_symoffset)
1442
				symbol__fprintf_symname_offs(al->sym, al,
1443 1444
							     stdout);
			else
1445
				symbol__fprintf_symname(al->sym, stdout);
1446 1447 1448
		}

		if (print_dso) {
1449
			printf(" (");
1450
			map__fprintf_dsoname(al->map, stdout);
1451
			printf(")");
1452
		}
1453 1454 1455

		if (print_srcline)
			map__fprintf_srcline(al->map, al->addr, "\n  ", stdout);
1456 1457
	}
}
1458 1459 1460 1461

int perf_session__cpu_bitmap(struct perf_session *session,
			     const char *cpu_list, unsigned long *cpu_bitmap)
{
1462
	int i, err = -1;
1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479
	struct cpu_map *map;

	for (i = 0; i < PERF_TYPE_MAX; ++i) {
		struct perf_evsel *evsel;

		evsel = perf_session__find_first_evtype(session, i);
		if (!evsel)
			continue;

		if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) {
			pr_err("File does not contain CPU events. "
			       "Remove -c option to proceed.\n");
			return -1;
		}
	}

	map = cpu_map__new(cpu_list);
1480 1481 1482 1483
	if (map == NULL) {
		pr_err("Invalid cpu_list\n");
		return -1;
	}
1484 1485 1486 1487 1488 1489 1490

	for (i = 0; i < map->nr; i++) {
		int cpu = map->map[i];

		if (cpu >= MAX_NR_CPUS) {
			pr_err("Requested CPU %d too large. "
			       "Consider raising MAX_NR_CPUS\n", cpu);
1491
			goto out_delete_map;
1492 1493 1494 1495 1496
		}

		set_bit(cpu, cpu_bitmap);
	}

1497 1498 1499 1500 1501
	err = 0;

out_delete_map:
	cpu_map__delete(map);
	return err;
1502
}
1503 1504 1505 1506 1507

void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
				bool full)
{
	struct stat st;
1508
	int fd, ret;
1509 1510 1511 1512

	if (session == NULL || fp == NULL)
		return;

1513 1514
	fd = perf_data_file__fd(session->file);

1515
	ret = fstat(fd, &st);
1516 1517 1518 1519 1520 1521 1522 1523
	if (ret == -1)
		return;

	fprintf(fp, "# ========\n");
	fprintf(fp, "# captured on: %s", ctime(&st.st_ctime));
	perf_header__fprintf_info(session, fp, full);
	fprintf(fp, "# ========\n#\n");
}
1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534


int __perf_session__set_tracepoints_handlers(struct perf_session *session,
					     const struct perf_evsel_str_handler *assocs,
					     size_t nr_assocs)
{
	struct perf_evsel *evsel;
	size_t i;
	int err;

	for (i = 0; i < nr_assocs; i++) {
1535 1536 1537 1538 1539
		/*
		 * Adding a handler for an event not in the session,
		 * just ignore it.
		 */
		evsel = perf_evlist__find_tracepoint_by_name(session->evlist, assocs[i].name);
1540
		if (evsel == NULL)
1541
			continue;
1542 1543

		err = -EEXIST;
1544
		if (evsel->handler != NULL)
1545
			goto out;
1546
		evsel->handler = assocs[i].handler;
1547 1548 1549 1550 1551 1552
	}

	err = 0;
out:
	return err;
}