session.c 58.4 KB
Newer Older
1
#include <linux/kernel.h>
2
#include <traceevent/event-parse.h>
3

4
#include <byteswap.h>
5 6
#include <unistd.h>
#include <sys/types.h>
7
#include <sys/mman.h>
8

9 10
#include "evlist.h"
#include "evsel.h"
11
#include "session.h"
12
#include "tool.h"
13
#include "sort.h"
14
#include "util.h"
15
#include "cpumap.h"
16
#include "perf_regs.h"
17
#include "asm/bug.h"
18
#include "auxtrace.h"
19
#include "thread-stack.h"
20

21 22 23 24 25
static int perf_session__deliver_event(struct perf_session *session,
				       union perf_event *event,
				       struct perf_sample *sample,
				       struct perf_tool *tool,
				       u64 file_offset);
26

27
static int perf_session__open(struct perf_session *session)
28
{
29
	struct perf_data_file *file = session->file;
30

31
	if (perf_session__read_header(session) < 0) {
32
		pr_err("incompatible file format (rerun with -v to learn more)\n");
33
		return -1;
34 35
	}

36 37 38
	if (perf_data_file__is_pipe(file))
		return 0;

39
	if (!perf_evlist__valid_sample_type(session->evlist)) {
40
		pr_err("non matching sample_type\n");
41
		return -1;
42 43
	}

44
	if (!perf_evlist__valid_sample_id_all(session->evlist)) {
45
		pr_err("non matching sample_id_all\n");
46
		return -1;
47 48
	}

49
	if (!perf_evlist__valid_read_format(session->evlist)) {
50
		pr_err("non matching read_format\n");
51
		return -1;
52 53
	}

54 55 56
	return 0;
}

57
void perf_session__set_id_hdr_size(struct perf_session *session)
58
{
59 60 61
	u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);

	machines__set_id_hdr_size(&session->machines, id_hdr_size);
62 63
}

64
int perf_session__create_kernel_maps(struct perf_session *session)
65
{
66
	int ret = machine__create_kernel_maps(&session->machines.host);
67 68

	if (ret >= 0)
69
		ret = machines__create_guest_kernel_maps(&session->machines);
70 71 72
	return ret;
}

73
static void perf_session__destroy_kernel_maps(struct perf_session *session)
74
{
75
	machines__destroy_kernel_maps(&session->machines);
76 77
}

78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96
static bool perf_session__has_comm_exec(struct perf_session *session)
{
	struct perf_evsel *evsel;

	evlist__for_each(session->evlist, evsel) {
		if (evsel->attr.comm_exec)
			return true;
	}

	return false;
}

static void perf_session__set_comm_exec(struct perf_session *session)
{
	bool comm_exec = perf_session__has_comm_exec(session);

	machines__set_comm_exec(&session->machines, comm_exec);
}

97
static int ordered_events__deliver_event(struct ordered_events *oe,
98
					 struct ordered_event *event)
99
{
100 101 102 103 104 105 106 107 108 109
	struct perf_sample sample;
	struct perf_session *session = container_of(oe, struct perf_session,
						    ordered_events);
	int ret = perf_evlist__parse_sample(session->evlist, event->event, &sample);

	if (ret) {
		pr_err("Can't parse sample, err = %d\n", ret);
		return ret;
	}

110 111
	return perf_session__deliver_event(session, event->event, &sample,
					   session->tool, event->file_offset);
112 113
}

114 115
struct perf_session *perf_session__new(struct perf_data_file *file,
				       bool repipe, struct perf_tool *tool)
116
{
117
	struct perf_session *session = zalloc(sizeof(*session));
118

119
	if (!session)
120 121
		goto out;

122
	session->repipe = repipe;
123
	session->tool   = tool;
124
	INIT_LIST_HEAD(&session->auxtrace_index);
125
	machines__init(&session->machines);
126
	ordered_events__init(&session->ordered_events, ordered_events__deliver_event);
127

128 129
	if (file) {
		if (perf_data_file__open(file))
130
			goto out_delete;
131

132
		session->file = file;
133 134

		if (perf_data_file__is_read(file)) {
135
			if (perf_session__open(session) < 0)
136 137
				goto out_close;

138
			perf_session__set_id_hdr_size(session);
139
			perf_session__set_comm_exec(session);
140
		}
141 142
	} else  {
		session->machines.host.env = &perf_env;
143 144 145
	}

	if (!file || perf_data_file__is_write(file)) {
146 147
		/*
		 * In O_RDONLY mode this will be performed when reading the
148
		 * kernel MMAP event, in perf_event__process_mmap().
149
		 */
150
		if (perf_session__create_kernel_maps(session) < 0)
151
			pr_warning("Cannot read kernel map\n");
152
	}
153

154
	if (tool && tool->ordering_requires_timestamps &&
155
	    tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) {
156
		dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
157
		tool->ordered_events = false;
158
	}
159

160
	return session;
161 162 163 164

 out_close:
	perf_data_file__close(file);
 out_delete:
165
	perf_session__delete(session);
166
 out:
167
	return NULL;
168 169
}

170 171
static void perf_session__delete_threads(struct perf_session *session)
{
172
	machine__delete_threads(&session->machines.host);
173 174
}

175
void perf_session__delete(struct perf_session *session)
176
{
177
	auxtrace__free(session);
178
	auxtrace_index__free(&session->auxtrace_index);
179 180
	perf_session__destroy_kernel_maps(session);
	perf_session__delete_threads(session);
181
	perf_env__exit(&session->header.env);
182 183 184 185
	machines__exit(&session->machines);
	if (session->file)
		perf_data_file__close(session->file);
	free(session);
186
}
187

188 189 190
static int process_event_synth_tracing_data_stub(struct perf_tool *tool
						 __maybe_unused,
						 union perf_event *event
191 192 193
						 __maybe_unused,
						 struct perf_session *session
						__maybe_unused)
194 195 196 197 198
{
	dump_printf(": unhandled!\n");
	return 0;
}

199 200
static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
					 union perf_event *event __maybe_unused,
201 202
					 struct perf_evlist **pevlist
					 __maybe_unused)
203 204 205 206 207
{
	dump_printf(": unhandled!\n");
	return 0;
}

208 209 210 211 212
static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
				     union perf_event *event __maybe_unused,
				     struct perf_sample *sample __maybe_unused,
				     struct perf_evsel *evsel __maybe_unused,
				     struct machine *machine __maybe_unused)
213 214 215 216 217
{
	dump_printf(": unhandled!\n");
	return 0;
}

218 219 220 221
static int process_event_stub(struct perf_tool *tool __maybe_unused,
			      union perf_event *event __maybe_unused,
			      struct perf_sample *sample __maybe_unused,
			      struct machine *machine __maybe_unused)
222 223 224 225 226
{
	dump_printf(": unhandled!\n");
	return 0;
}

227 228 229 230 231 232 233 234
static int process_build_id_stub(struct perf_tool *tool __maybe_unused,
				 union perf_event *event __maybe_unused,
				 struct perf_session *session __maybe_unused)
{
	dump_printf(": unhandled!\n");
	return 0;
}

235 236
static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
				       union perf_event *event __maybe_unused,
237
				       struct ordered_events *oe __maybe_unused)
238 239 240 241 242
{
	dump_printf(": unhandled!\n");
	return 0;
}

243
static int process_finished_round(struct perf_tool *tool,
244
				  union perf_event *event,
245
				  struct ordered_events *oe);
246

A
Adrian Hunter 已提交
247 248 249 250 251 252 253 254 255
static int process_id_index_stub(struct perf_tool *tool __maybe_unused,
				 union perf_event *event __maybe_unused,
				 struct perf_session *perf_session
				 __maybe_unused)
{
	dump_printf(": unhandled!\n");
	return 0;
}

256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289
static int process_event_auxtrace_info_stub(struct perf_tool *tool __maybe_unused,
				union perf_event *event __maybe_unused,
				struct perf_session *session __maybe_unused)
{
	dump_printf(": unhandled!\n");
	return 0;
}

static int skipn(int fd, off_t n)
{
	char buf[4096];
	ssize_t ret;

	while (n > 0) {
		ret = read(fd, buf, min(n, (off_t)sizeof(buf)));
		if (ret <= 0)
			return ret;
		n -= ret;
	}

	return 0;
}

static s64 process_event_auxtrace_stub(struct perf_tool *tool __maybe_unused,
				       union perf_event *event,
				       struct perf_session *session
				       __maybe_unused)
{
	dump_printf(": unhandled!\n");
	if (perf_data_file__is_pipe(session->file))
		skipn(perf_data_file__fd(session->file), event->auxtrace.size);
	return event->auxtrace.size;
}

290 291 292 293 294 295 296 297 298
static
int process_event_auxtrace_error_stub(struct perf_tool *tool __maybe_unused,
				      union perf_event *event __maybe_unused,
				      struct perf_session *session __maybe_unused)
{
	dump_printf(": unhandled!\n");
	return 0;
}

299 300 301 302 303 304 305 306 307 308

static
int process_event_thread_map_stub(struct perf_tool *tool __maybe_unused,
				  union perf_event *event __maybe_unused,
				  struct perf_session *session __maybe_unused)
{
	dump_printf(": unhandled!\n");
	return 0;
}

309 310 311 312 313 314 315 316 317
static
int process_event_cpu_map_stub(struct perf_tool *tool __maybe_unused,
			       union perf_event *event __maybe_unused,
			       struct perf_session *session __maybe_unused)
{
	dump_printf(": unhandled!\n");
	return 0;
}

318 319 320 321 322 323 324 325 326
static
int process_event_stat_config_stub(struct perf_tool *tool __maybe_unused,
				   union perf_event *event __maybe_unused,
				   struct perf_session *session __maybe_unused)
{
	dump_printf(": unhandled!\n");
	return 0;
}

J
Jiri Olsa 已提交
327 328 329 330 331 332 333 334 335
static int process_stat_stub(struct perf_tool *tool __maybe_unused,
			     union perf_event *event __maybe_unused,
			     struct perf_session *perf_session
			     __maybe_unused)
{
	dump_printf(": unhandled!\n");
	return 0;
}

336
void perf_tool__fill_defaults(struct perf_tool *tool)
337
{
338 339 340 341
	if (tool->sample == NULL)
		tool->sample = process_event_sample_stub;
	if (tool->mmap == NULL)
		tool->mmap = process_event_stub;
342 343
	if (tool->mmap2 == NULL)
		tool->mmap2 = process_event_stub;
344 345 346 347 348 349 350 351
	if (tool->comm == NULL)
		tool->comm = process_event_stub;
	if (tool->fork == NULL)
		tool->fork = process_event_stub;
	if (tool->exit == NULL)
		tool->exit = process_event_stub;
	if (tool->lost == NULL)
		tool->lost = perf_event__process_lost;
352 353
	if (tool->lost_samples == NULL)
		tool->lost_samples = perf_event__process_lost_samples;
354 355
	if (tool->aux == NULL)
		tool->aux = perf_event__process_aux;
356 357
	if (tool->itrace_start == NULL)
		tool->itrace_start = perf_event__process_itrace_start;
358 359
	if (tool->context_switch == NULL)
		tool->context_switch = perf_event__process_switch;
360 361 362 363 364 365 366 367 368 369 370
	if (tool->read == NULL)
		tool->read = process_event_sample_stub;
	if (tool->throttle == NULL)
		tool->throttle = process_event_stub;
	if (tool->unthrottle == NULL)
		tool->unthrottle = process_event_stub;
	if (tool->attr == NULL)
		tool->attr = process_event_synth_attr_stub;
	if (tool->tracing_data == NULL)
		tool->tracing_data = process_event_synth_tracing_data_stub;
	if (tool->build_id == NULL)
371
		tool->build_id = process_build_id_stub;
372
	if (tool->finished_round == NULL) {
373
		if (tool->ordered_events)
374
			tool->finished_round = process_finished_round;
375
		else
376
			tool->finished_round = process_finished_round_stub;
377
	}
A
Adrian Hunter 已提交
378 379
	if (tool->id_index == NULL)
		tool->id_index = process_id_index_stub;
380 381 382 383
	if (tool->auxtrace_info == NULL)
		tool->auxtrace_info = process_event_auxtrace_info_stub;
	if (tool->auxtrace == NULL)
		tool->auxtrace = process_event_auxtrace_stub;
384 385
	if (tool->auxtrace_error == NULL)
		tool->auxtrace_error = process_event_auxtrace_error_stub;
386 387
	if (tool->thread_map == NULL)
		tool->thread_map = process_event_thread_map_stub;
388 389
	if (tool->cpu_map == NULL)
		tool->cpu_map = process_event_cpu_map_stub;
390 391
	if (tool->stat_config == NULL)
		tool->stat_config = process_event_stat_config_stub;
J
Jiri Olsa 已提交
392 393
	if (tool->stat == NULL)
		tool->stat = process_stat_stub;
394
}
395

396 397 398 399 400 401 402 403 404 405
static void swap_sample_id_all(union perf_event *event, void *data)
{
	void *end = (void *) event + event->header.size;
	int size = end - data;

	BUG_ON(size % sizeof(u64));
	mem_bswap_64(data, size);
}

static void perf_event__all64_swap(union perf_event *event,
406
				   bool sample_id_all __maybe_unused)
407
{
408 409
	struct perf_event_header *hdr = &event->header;
	mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
410 411
}

412
static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
413
{
414 415
	event->comm.pid = bswap_32(event->comm.pid);
	event->comm.tid = bswap_32(event->comm.tid);
416 417 418 419

	if (sample_id_all) {
		void *data = &event->comm.comm;

420
		data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
421 422
		swap_sample_id_all(event, data);
	}
423 424
}

425 426
static void perf_event__mmap_swap(union perf_event *event,
				  bool sample_id_all)
427
{
428 429 430 431 432
	event->mmap.pid	  = bswap_32(event->mmap.pid);
	event->mmap.tid	  = bswap_32(event->mmap.tid);
	event->mmap.start = bswap_64(event->mmap.start);
	event->mmap.len	  = bswap_64(event->mmap.len);
	event->mmap.pgoff = bswap_64(event->mmap.pgoff);
433 434 435 436

	if (sample_id_all) {
		void *data = &event->mmap.filename;

437
		data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
438 439
		swap_sample_id_all(event, data);
	}
440 441
}

442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460
static void perf_event__mmap2_swap(union perf_event *event,
				  bool sample_id_all)
{
	event->mmap2.pid   = bswap_32(event->mmap2.pid);
	event->mmap2.tid   = bswap_32(event->mmap2.tid);
	event->mmap2.start = bswap_64(event->mmap2.start);
	event->mmap2.len   = bswap_64(event->mmap2.len);
	event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
	event->mmap2.maj   = bswap_32(event->mmap2.maj);
	event->mmap2.min   = bswap_32(event->mmap2.min);
	event->mmap2.ino   = bswap_64(event->mmap2.ino);

	if (sample_id_all) {
		void *data = &event->mmap2.filename;

		data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
		swap_sample_id_all(event, data);
	}
}
461
static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
462
{
463 464 465 466 467
	event->fork.pid	 = bswap_32(event->fork.pid);
	event->fork.tid	 = bswap_32(event->fork.tid);
	event->fork.ppid = bswap_32(event->fork.ppid);
	event->fork.ptid = bswap_32(event->fork.ptid);
	event->fork.time = bswap_64(event->fork.time);
468 469 470

	if (sample_id_all)
		swap_sample_id_all(event, &event->fork + 1);
471 472
}

473
static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
474
{
475 476 477 478 479 480
	event->read.pid		 = bswap_32(event->read.pid);
	event->read.tid		 = bswap_32(event->read.tid);
	event->read.value	 = bswap_64(event->read.value);
	event->read.time_enabled = bswap_64(event->read.time_enabled);
	event->read.time_running = bswap_64(event->read.time_running);
	event->read.id		 = bswap_64(event->read.id);
481 482 483

	if (sample_id_all)
		swap_sample_id_all(event, &event->read + 1);
484 485
}

486 487 488 489 490 491 492 493 494 495
static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
{
	event->aux.aux_offset = bswap_64(event->aux.aux_offset);
	event->aux.aux_size   = bswap_64(event->aux.aux_size);
	event->aux.flags      = bswap_64(event->aux.flags);

	if (sample_id_all)
		swap_sample_id_all(event, &event->aux + 1);
}

496 497 498 499 500 501 502 503 504 505
static void perf_event__itrace_start_swap(union perf_event *event,
					  bool sample_id_all)
{
	event->itrace_start.pid	 = bswap_32(event->itrace_start.pid);
	event->itrace_start.tid	 = bswap_32(event->itrace_start.tid);

	if (sample_id_all)
		swap_sample_id_all(event, &event->itrace_start + 1);
}

506 507 508 509 510 511 512 513 514 515 516 517 518
static void perf_event__switch_swap(union perf_event *event, bool sample_id_all)
{
	if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
		event->context_switch.next_prev_pid =
				bswap_32(event->context_switch.next_prev_pid);
		event->context_switch.next_prev_tid =
				bswap_32(event->context_switch.next_prev_tid);
	}

	if (sample_id_all)
		swap_sample_id_all(event, &event->context_switch + 1);
}

519 520 521 522 523 524 525 526 527 528 529
static void perf_event__throttle_swap(union perf_event *event,
				      bool sample_id_all)
{
	event->throttle.time	  = bswap_64(event->throttle.time);
	event->throttle.id	  = bswap_64(event->throttle.id);
	event->throttle.stream_id = bswap_64(event->throttle.stream_id);

	if (sample_id_all)
		swap_sample_id_all(event, &event->throttle + 1);
}

530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561
static u8 revbyte(u8 b)
{
	int rev = (b >> 4) | ((b & 0xf) << 4);
	rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
	rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
	return (u8) rev;
}

/*
 * XXX this is hack in attempt to carry flags bitfield
 * throught endian village. ABI says:
 *
 * Bit-fields are allocated from right to left (least to most significant)
 * on little-endian implementations and from left to right (most to least
 * significant) on big-endian implementations.
 *
 * The above seems to be byte specific, so we need to reverse each
 * byte of the bitfield. 'Internet' also says this might be implementation
 * specific and we probably need proper fix and carry perf_event_attr
 * bitfield flags in separate data file FEAT_ section. Thought this seems
 * to work for now.
 */
static void swap_bitfield(u8 *p, unsigned len)
{
	unsigned i;

	for (i = 0; i < len; i++) {
		*p = revbyte(*p);
		p++;
	}
}

562 563 564 565 566
/* exported for swapping attributes in file header */
void perf_event__attr_swap(struct perf_event_attr *attr)
{
	attr->type		= bswap_32(attr->type);
	attr->size		= bswap_32(attr->size);
567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602

#define bswap_safe(f, n) 					\
	(attr->size > (offsetof(struct perf_event_attr, f) + 	\
		       sizeof(attr->f) * (n)))
#define bswap_field(f, sz) 			\
do { 						\
	if (bswap_safe(f, 0))			\
		attr->f = bswap_##sz(attr->f);	\
} while(0)
#define bswap_field_32(f) bswap_field(f, 32)
#define bswap_field_64(f) bswap_field(f, 64)

	bswap_field_64(config);
	bswap_field_64(sample_period);
	bswap_field_64(sample_type);
	bswap_field_64(read_format);
	bswap_field_32(wakeup_events);
	bswap_field_32(bp_type);
	bswap_field_64(bp_addr);
	bswap_field_64(bp_len);
	bswap_field_64(branch_sample_type);
	bswap_field_64(sample_regs_user);
	bswap_field_32(sample_stack_user);
	bswap_field_32(aux_watermark);

	/*
	 * After read_format are bitfields. Check read_format because
	 * we are unable to use offsetof on bitfield.
	 */
	if (bswap_safe(read_format, 1))
		swap_bitfield((u8 *) (&attr->read_format + 1),
			      sizeof(u64));
#undef bswap_field_64
#undef bswap_field_32
#undef bswap_field
#undef bswap_safe
603 604
}

605
static void perf_event__hdr_attr_swap(union perf_event *event,
606
				      bool sample_id_all __maybe_unused)
607 608 609
{
	size_t size;

610
	perf_event__attr_swap(&event->attr.attr);
611

612 613 614
	size = event->header.size;
	size -= (void *)&event->attr.id - (void *)event;
	mem_bswap_64(event->attr.id, size);
615 616
}

617
static void perf_event__event_type_swap(union perf_event *event,
618
					bool sample_id_all __maybe_unused)
619
{
620 621
	event->event_type.event_type.event_id =
		bswap_64(event->event_type.event_type.event_id);
622 623
}

624
static void perf_event__tracing_data_swap(union perf_event *event,
625
					  bool sample_id_all __maybe_unused)
626
{
627
	event->tracing_data.size = bswap_32(event->tracing_data.size);
628 629
}

630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652
static void perf_event__auxtrace_info_swap(union perf_event *event,
					   bool sample_id_all __maybe_unused)
{
	size_t size;

	event->auxtrace_info.type = bswap_32(event->auxtrace_info.type);

	size = event->header.size;
	size -= (void *)&event->auxtrace_info.priv - (void *)event;
	mem_bswap_64(event->auxtrace_info.priv, size);
}

static void perf_event__auxtrace_swap(union perf_event *event,
				      bool sample_id_all __maybe_unused)
{
	event->auxtrace.size      = bswap_64(event->auxtrace.size);
	event->auxtrace.offset    = bswap_64(event->auxtrace.offset);
	event->auxtrace.reference = bswap_64(event->auxtrace.reference);
	event->auxtrace.idx       = bswap_32(event->auxtrace.idx);
	event->auxtrace.tid       = bswap_32(event->auxtrace.tid);
	event->auxtrace.cpu       = bswap_32(event->auxtrace.cpu);
}

653 654 655 656 657 658 659 660 661 662 663
static void perf_event__auxtrace_error_swap(union perf_event *event,
					    bool sample_id_all __maybe_unused)
{
	event->auxtrace_error.type = bswap_32(event->auxtrace_error.type);
	event->auxtrace_error.code = bswap_32(event->auxtrace_error.code);
	event->auxtrace_error.cpu  = bswap_32(event->auxtrace_error.cpu);
	event->auxtrace_error.pid  = bswap_32(event->auxtrace_error.pid);
	event->auxtrace_error.tid  = bswap_32(event->auxtrace_error.tid);
	event->auxtrace_error.ip   = bswap_64(event->auxtrace_error.ip);
}

664 665 666 667 668 669 670 671 672 673 674
static void perf_event__thread_map_swap(union perf_event *event,
					bool sample_id_all __maybe_unused)
{
	unsigned i;

	event->thread_map.nr = bswap_64(event->thread_map.nr);

	for (i = 0; i < event->thread_map.nr; i++)
		event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid);
}

675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710
static void perf_event__cpu_map_swap(union perf_event *event,
				     bool sample_id_all __maybe_unused)
{
	struct cpu_map_data *data = &event->cpu_map.data;
	struct cpu_map_entries *cpus;
	struct cpu_map_mask *mask;
	unsigned i;

	data->type = bswap_64(data->type);

	switch (data->type) {
	case PERF_CPU_MAP__CPUS:
		cpus = (struct cpu_map_entries *)data->data;

		cpus->nr = bswap_16(cpus->nr);

		for (i = 0; i < cpus->nr; i++)
			cpus->cpu[i] = bswap_16(cpus->cpu[i]);
		break;
	case PERF_CPU_MAP__MASK:
		mask = (struct cpu_map_mask *) data->data;

		mask->nr = bswap_16(mask->nr);
		mask->long_size = bswap_16(mask->long_size);

		switch (mask->long_size) {
		case 4: mem_bswap_32(&mask->mask, mask->nr); break;
		case 8: mem_bswap_64(&mask->mask, mask->nr); break;
		default:
			pr_err("cpu_map swap: unsupported long size\n");
		}
	default:
		break;
	}
}

711 712 713 714 715 716 717 718 719 720
static void perf_event__stat_config_swap(union perf_event *event,
					 bool sample_id_all __maybe_unused)
{
	u64 size;

	size  = event->stat_config.nr * sizeof(event->stat_config.data[0]);
	size += 1; /* nr item itself */
	mem_bswap_64(&event->stat_config.nr, size);
}

J
Jiri Olsa 已提交
721 722 723 724 725 726 727 728 729 730 731
static void perf_event__stat_swap(union perf_event *event,
				  bool sample_id_all __maybe_unused)
{
	event->stat.id     = bswap_64(event->stat.id);
	event->stat.thread = bswap_32(event->stat.thread);
	event->stat.cpu    = bswap_32(event->stat.cpu);
	event->stat.val    = bswap_64(event->stat.val);
	event->stat.ena    = bswap_64(event->stat.ena);
	event->stat.run    = bswap_64(event->stat.run);
}

732 733
typedef void (*perf_event__swap_op)(union perf_event *event,
				    bool sample_id_all);
734

735 736
static perf_event__swap_op perf_event__swap_ops[] = {
	[PERF_RECORD_MMAP]		  = perf_event__mmap_swap,
737
	[PERF_RECORD_MMAP2]		  = perf_event__mmap2_swap,
738 739 740 741 742
	[PERF_RECORD_COMM]		  = perf_event__comm_swap,
	[PERF_RECORD_FORK]		  = perf_event__task_swap,
	[PERF_RECORD_EXIT]		  = perf_event__task_swap,
	[PERF_RECORD_LOST]		  = perf_event__all64_swap,
	[PERF_RECORD_READ]		  = perf_event__read_swap,
743 744
	[PERF_RECORD_THROTTLE]		  = perf_event__throttle_swap,
	[PERF_RECORD_UNTHROTTLE]	  = perf_event__throttle_swap,
745
	[PERF_RECORD_SAMPLE]		  = perf_event__all64_swap,
746
	[PERF_RECORD_AUX]		  = perf_event__aux_swap,
747
	[PERF_RECORD_ITRACE_START]	  = perf_event__itrace_start_swap,
748
	[PERF_RECORD_LOST_SAMPLES]	  = perf_event__all64_swap,
749 750
	[PERF_RECORD_SWITCH]		  = perf_event__switch_swap,
	[PERF_RECORD_SWITCH_CPU_WIDE]	  = perf_event__switch_swap,
751
	[PERF_RECORD_HEADER_ATTR]	  = perf_event__hdr_attr_swap,
752 753 754
	[PERF_RECORD_HEADER_EVENT_TYPE]	  = perf_event__event_type_swap,
	[PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
	[PERF_RECORD_HEADER_BUILD_ID]	  = NULL,
A
Adrian Hunter 已提交
755
	[PERF_RECORD_ID_INDEX]		  = perf_event__all64_swap,
756 757
	[PERF_RECORD_AUXTRACE_INFO]	  = perf_event__auxtrace_info_swap,
	[PERF_RECORD_AUXTRACE]		  = perf_event__auxtrace_swap,
758
	[PERF_RECORD_AUXTRACE_ERROR]	  = perf_event__auxtrace_error_swap,
759
	[PERF_RECORD_THREAD_MAP]	  = perf_event__thread_map_swap,
760
	[PERF_RECORD_CPU_MAP]		  = perf_event__cpu_map_swap,
761
	[PERF_RECORD_STAT_CONFIG]	  = perf_event__stat_config_swap,
J
Jiri Olsa 已提交
762
	[PERF_RECORD_STAT]		  = perf_event__stat_swap,
763
	[PERF_RECORD_HEADER_MAX]	  = NULL,
764 765
};

766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804
/*
 * When perf record finishes a pass on every buffers, it records this pseudo
 * event.
 * We record the max timestamp t found in the pass n.
 * Assuming these timestamps are monotonic across cpus, we know that if
 * a buffer still has events with timestamps below t, they will be all
 * available and then read in the pass n + 1.
 * Hence when we start to read the pass n + 2, we can safely flush every
 * events with timestamps below t.
 *
 *    ============ PASS n =================
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          1          |         2
 *          2          |         3
 *          -          |         4  <--- max recorded
 *
 *    ============ PASS n + 1 ==============
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          3          |         5
 *          4          |         6
 *          5          |         7 <---- max recorded
 *
 *      Flush every events below timestamp 4
 *
 *    ============ PASS n + 2 ==============
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          6          |         8
 *          7          |         9
 *          -          |         10
 *
 *      Flush every events below timestamp 7
 *      etc...
 */
805
static int process_finished_round(struct perf_tool *tool __maybe_unused,
806
				  union perf_event *event __maybe_unused,
807
				  struct ordered_events *oe)
808
{
809 810
	if (dump_trace)
		fprintf(stdout, "\n");
811
	return ordered_events__flush(oe, OE_FLUSH__ROUND);
812 813
}

814 815
int perf_session__queue_event(struct perf_session *s, union perf_event *event,
			      struct perf_sample *sample, u64 file_offset)
816
{
817
	return ordered_events__queue(&s->ordered_events, event, sample, file_offset);
818
}
819

K
Kan Liang 已提交
820
static void callchain__lbr_callstack_printf(struct perf_sample *sample)
821
{
K
Kan Liang 已提交
822 823 824
	struct ip_callchain *callchain = sample->callchain;
	struct branch_stack *lbr_stack = sample->branch_stack;
	u64 kernel_callchain_nr = callchain->nr;
825
	unsigned int i;
826

K
Kan Liang 已提交
827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876
	for (i = 0; i < kernel_callchain_nr; i++) {
		if (callchain->ips[i] == PERF_CONTEXT_USER)
			break;
	}

	if ((i != kernel_callchain_nr) && lbr_stack->nr) {
		u64 total_nr;
		/*
		 * LBR callstack can only get user call chain,
		 * i is kernel call chain number,
		 * 1 is PERF_CONTEXT_USER.
		 *
		 * The user call chain is stored in LBR registers.
		 * LBR are pair registers. The caller is stored
		 * in "from" register, while the callee is stored
		 * in "to" register.
		 * For example, there is a call stack
		 * "A"->"B"->"C"->"D".
		 * The LBR registers will recorde like
		 * "C"->"D", "B"->"C", "A"->"B".
		 * So only the first "to" register and all "from"
		 * registers are needed to construct the whole stack.
		 */
		total_nr = i + 1 + lbr_stack->nr + 1;
		kernel_callchain_nr = i + 1;

		printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr);

		for (i = 0; i < kernel_callchain_nr; i++)
			printf("..... %2d: %016" PRIx64 "\n",
			       i, callchain->ips[i]);

		printf("..... %2d: %016" PRIx64 "\n",
		       (int)(kernel_callchain_nr), lbr_stack->entries[0].to);
		for (i = 0; i < lbr_stack->nr; i++)
			printf("..... %2d: %016" PRIx64 "\n",
			       (int)(i + kernel_callchain_nr + 1), lbr_stack->entries[i].from);
	}
}

static void callchain__printf(struct perf_evsel *evsel,
			      struct perf_sample *sample)
{
	unsigned int i;
	struct ip_callchain *callchain = sample->callchain;

	if (has_branch_callstack(evsel))
		callchain__lbr_callstack_printf(sample);

	printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
877

K
Kan Liang 已提交
878
	for (i = 0; i < callchain->nr; i++)
879
		printf("..... %2d: %016" PRIx64 "\n",
K
Kan Liang 已提交
880
		       i, callchain->ips[i]);
881 882
}

883 884 885 886 887 888
static void branch_stack__printf(struct perf_sample *sample)
{
	uint64_t i;

	printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr);

889 890 891 892 893 894 895 896 897 898 899 900
	for (i = 0; i < sample->branch_stack->nr; i++) {
		struct branch_entry *e = &sample->branch_stack->entries[i];

		printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x\n",
			i, e->from, e->to,
			e->flags.cycles,
			e->flags.mispred ? "M" : " ",
			e->flags.predicted ? "P" : " ",
			e->flags.abort ? "A" : " ",
			e->flags.in_tx ? "T" : " ",
			(unsigned)e->flags.reserved);
	}
901 902
}

903 904 905 906 907 908 909 910 911 912 913 914
static void regs_dump__printf(u64 mask, u64 *regs)
{
	unsigned rid, i = 0;

	for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
		u64 val = regs[i++];

		printf(".... %-5s 0x%" PRIx64 "\n",
		       perf_reg_name(rid), val);
	}
}

915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940
static const char *regs_abi[] = {
	[PERF_SAMPLE_REGS_ABI_NONE] = "none",
	[PERF_SAMPLE_REGS_ABI_32] = "32-bit",
	[PERF_SAMPLE_REGS_ABI_64] = "64-bit",
};

static inline const char *regs_dump_abi(struct regs_dump *d)
{
	if (d->abi > PERF_SAMPLE_REGS_ABI_64)
		return "unknown";

	return regs_abi[d->abi];
}

static void regs__printf(const char *type, struct regs_dump *regs)
{
	u64 mask = regs->mask;

	printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n",
	       type,
	       mask,
	       regs_dump_abi(regs));

	regs_dump__printf(mask, regs->regs);
}

941
static void regs_user__printf(struct perf_sample *sample)
942 943 944
{
	struct regs_dump *user_regs = &sample->user_regs;

945 946 947 948 949 950 951 952 953 954
	if (user_regs->regs)
		regs__printf("user", user_regs);
}

static void regs_intr__printf(struct perf_sample *sample)
{
	struct regs_dump *intr_regs = &sample->intr_regs;

	if (intr_regs->regs)
		regs__printf("intr", intr_regs);
955 956 957 958 959 960 961 962
}

static void stack_user__printf(struct stack_dump *dump)
{
	printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
	       dump->size, dump->offset);
}

963
static void perf_evlist__print_tstamp(struct perf_evlist *evlist,
964
				       union perf_event *event,
965
				       struct perf_sample *sample)
966
{
967
	u64 sample_type = __perf_evlist__combined_sample_type(evlist);
968

969
	if (event->header.type != PERF_RECORD_SAMPLE &&
970
	    !perf_evlist__sample_id_all(evlist)) {
971 972 973 974
		fputs("-1 -1 ", stdout);
		return;
	}

975
	if ((sample_type & PERF_SAMPLE_CPU))
976 977
		printf("%u ", sample->cpu);

978
	if (sample_type & PERF_SAMPLE_TIME)
979
		printf("%" PRIu64 " ", sample->time);
980 981
}

982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011
static void sample_read__printf(struct perf_sample *sample, u64 read_format)
{
	printf("... sample_read:\n");

	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
		printf("...... time enabled %016" PRIx64 "\n",
		       sample->read.time_enabled);

	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
		printf("...... time running %016" PRIx64 "\n",
		       sample->read.time_running);

	if (read_format & PERF_FORMAT_GROUP) {
		u64 i;

		printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);

		for (i = 0; i < sample->read.group.nr; i++) {
			struct sample_read_value *value;

			value = &sample->read.group.values[i];
			printf("..... id %016" PRIx64
			       ", value %016" PRIx64 "\n",
			       value->id, value->value);
		}
	} else
		printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
			sample->read.one.id, sample->read.one.value);
}

1012
static void dump_event(struct perf_evlist *evlist, union perf_event *event,
1013
		       u64 file_offset, struct perf_sample *sample)
1014 1015 1016 1017
{
	if (!dump_trace)
		return;

1018 1019
	printf("\n%#" PRIx64 " [%#x]: event: %d\n",
	       file_offset, event->header.size, event->header.type);
1020 1021 1022 1023

	trace_event(event);

	if (sample)
1024
		perf_evlist__print_tstamp(evlist, event, sample);
1025

1026
	printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
1027
	       event->header.size, perf_event__name(event->header.type));
1028 1029
}

1030
static void dump_sample(struct perf_evsel *evsel, union perf_event *event,
1031
			struct perf_sample *sample)
1032
{
1033 1034
	u64 sample_type;

1035 1036 1037
	if (!dump_trace)
		return;

1038
	printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
1039
	       event->header.misc, sample->pid, sample->tid, sample->ip,
1040
	       sample->period, sample->addr);
1041

1042
	sample_type = evsel->attr.sample_type;
1043 1044

	if (sample_type & PERF_SAMPLE_CALLCHAIN)
K
Kan Liang 已提交
1045
		callchain__printf(evsel, sample);
1046

K
Kan Liang 已提交
1047
	if ((sample_type & PERF_SAMPLE_BRANCH_STACK) && !has_branch_callstack(evsel))
1048
		branch_stack__printf(sample);
1049 1050

	if (sample_type & PERF_SAMPLE_REGS_USER)
1051
		regs_user__printf(sample);
1052

1053 1054 1055
	if (sample_type & PERF_SAMPLE_REGS_INTR)
		regs_intr__printf(sample);

1056 1057
	if (sample_type & PERF_SAMPLE_STACK_USER)
		stack_user__printf(&sample->user_stack);
1058 1059 1060

	if (sample_type & PERF_SAMPLE_WEIGHT)
		printf("... weight: %" PRIu64 "\n", sample->weight);
1061 1062 1063

	if (sample_type & PERF_SAMPLE_DATA_SRC)
		printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
1064

1065 1066 1067
	if (sample_type & PERF_SAMPLE_TRANSACTION)
		printf("... transaction: %" PRIx64 "\n", sample->transaction);

1068 1069
	if (sample_type & PERF_SAMPLE_READ)
		sample_read__printf(sample, evsel->attr.read_format);
1070 1071
}

1072
static struct machine *machines__find_for_cpumode(struct machines *machines,
1073 1074
					       union perf_event *event,
					       struct perf_sample *sample)
1075 1076
{
	const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1077
	struct machine *machine;
1078

1079 1080 1081
	if (perf_guest &&
	    ((cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
	     (cpumode == PERF_RECORD_MISC_GUEST_USER))) {
1082 1083
		u32 pid;

1084 1085
		if (event->header.type == PERF_RECORD_MMAP
		    || event->header.type == PERF_RECORD_MMAP2)
1086 1087
			pid = event->mmap.pid;
		else
1088
			pid = sample->pid;
1089

1090
		machine = machines__find(machines, pid);
1091
		if (!machine)
1092
			machine = machines__find(machines, DEFAULT_GUEST_KERNEL_ID);
1093
		return machine;
1094
	}
1095

1096
	return &machines->host;
1097 1098
}

1099
static int deliver_sample_value(struct perf_evlist *evlist,
1100 1101 1102 1103 1104 1105
				struct perf_tool *tool,
				union perf_event *event,
				struct perf_sample *sample,
				struct sample_read_value *v,
				struct machine *machine)
{
1106
	struct perf_sample_id *sid = perf_evlist__id2sid(evlist, v->id);
1107 1108 1109 1110 1111 1112 1113 1114

	if (sid) {
		sample->id     = v->id;
		sample->period = v->value - sid->period;
		sid->period    = v->value;
	}

	if (!sid || sid->evsel == NULL) {
1115
		++evlist->stats.nr_unknown_id;
1116 1117 1118 1119 1120 1121
		return 0;
	}

	return tool->sample(tool, event, sample, sid->evsel, machine);
}

1122
static int deliver_sample_group(struct perf_evlist *evlist,
1123 1124 1125 1126 1127 1128 1129 1130 1131
				struct perf_tool *tool,
				union  perf_event *event,
				struct perf_sample *sample,
				struct machine *machine)
{
	int ret = -EINVAL;
	u64 i;

	for (i = 0; i < sample->read.group.nr; i++) {
1132
		ret = deliver_sample_value(evlist, tool, event, sample,
1133 1134 1135 1136 1137 1138 1139 1140 1141 1142
					   &sample->read.group.values[i],
					   machine);
		if (ret)
			break;
	}

	return ret;
}

static int
1143
 perf_evlist__deliver_sample(struct perf_evlist *evlist,
1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159
			     struct perf_tool *tool,
			     union  perf_event *event,
			     struct perf_sample *sample,
			     struct perf_evsel *evsel,
			     struct machine *machine)
{
	/* We know evsel != NULL. */
	u64 sample_type = evsel->attr.sample_type;
	u64 read_format = evsel->attr.read_format;

	/* Standard sample delievery. */
	if (!(sample_type & PERF_SAMPLE_READ))
		return tool->sample(tool, event, sample, evsel, machine);

	/* For PERF_SAMPLE_READ we have either single or group mode. */
	if (read_format & PERF_FORMAT_GROUP)
1160
		return deliver_sample_group(evlist, tool, event, sample,
1161 1162
					    machine);
	else
1163
		return deliver_sample_value(evlist, tool, event, sample,
1164 1165 1166
					    &sample->read.one, machine);
}

1167 1168 1169 1170 1171
static int machines__deliver_event(struct machines *machines,
				   struct perf_evlist *evlist,
				   union perf_event *event,
				   struct perf_sample *sample,
				   struct perf_tool *tool, u64 file_offset)
1172
{
1173
	struct perf_evsel *evsel;
1174
	struct machine *machine;
1175

1176
	dump_event(evlist, event, file_offset, sample);
1177

1178
	evsel = perf_evlist__id2evsel(evlist, sample->id);
1179

1180
	machine = machines__find_for_cpumode(machines, event, sample);
1181

1182 1183
	switch (event->header.type) {
	case PERF_RECORD_SAMPLE:
1184
		if (evsel == NULL) {
1185
			++evlist->stats.nr_unknown_id;
1186
			return 0;
1187
		}
1188
		dump_sample(evsel, event, sample);
1189
		if (machine == NULL) {
1190
			++evlist->stats.nr_unprocessable_samples;
1191
			return 0;
1192
		}
1193
		return perf_evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
1194
	case PERF_RECORD_MMAP:
1195
		return tool->mmap(tool, event, sample, machine);
1196
	case PERF_RECORD_MMAP2:
1197 1198
		if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT)
			++evlist->stats.nr_proc_map_timeout;
1199
		return tool->mmap2(tool, event, sample, machine);
1200
	case PERF_RECORD_COMM:
1201
		return tool->comm(tool, event, sample, machine);
1202
	case PERF_RECORD_FORK:
1203
		return tool->fork(tool, event, sample, machine);
1204
	case PERF_RECORD_EXIT:
1205
		return tool->exit(tool, event, sample, machine);
1206
	case PERF_RECORD_LOST:
1207
		if (tool->lost == perf_event__process_lost)
1208
			evlist->stats.total_lost += event->lost.lost;
1209
		return tool->lost(tool, event, sample, machine);
1210 1211 1212 1213
	case PERF_RECORD_LOST_SAMPLES:
		if (tool->lost_samples == perf_event__process_lost_samples)
			evlist->stats.total_lost_samples += event->lost_samples.lost;
		return tool->lost_samples(tool, event, sample, machine);
1214
	case PERF_RECORD_READ:
1215
		return tool->read(tool, event, sample, evsel, machine);
1216
	case PERF_RECORD_THROTTLE:
1217
		return tool->throttle(tool, event, sample, machine);
1218
	case PERF_RECORD_UNTHROTTLE:
1219
		return tool->unthrottle(tool, event, sample, machine);
1220
	case PERF_RECORD_AUX:
1221 1222 1223
		if (tool->aux == perf_event__process_aux &&
		    (event->aux.flags & PERF_AUX_FLAG_TRUNCATED))
			evlist->stats.total_aux_lost += 1;
1224
		return tool->aux(tool, event, sample, machine);
1225 1226
	case PERF_RECORD_ITRACE_START:
		return tool->itrace_start(tool, event, sample, machine);
1227 1228 1229
	case PERF_RECORD_SWITCH:
	case PERF_RECORD_SWITCH_CPU_WIDE:
		return tool->context_switch(tool, event, sample, machine);
1230
	default:
1231
		++evlist->stats.nr_unknown_events;
1232 1233 1234 1235
		return -1;
	}
}

1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253
static int perf_session__deliver_event(struct perf_session *session,
				       union perf_event *event,
				       struct perf_sample *sample,
				       struct perf_tool *tool,
				       u64 file_offset)
{
	int ret;

	ret = auxtrace__process_event(session, event, sample, tool);
	if (ret < 0)
		return ret;
	if (ret > 0)
		return 0;

	return machines__deliver_event(&session->machines, session->evlist,
				       event, sample, tool, file_offset);
}

1254 1255 1256
static s64 perf_session__process_user_event(struct perf_session *session,
					    union perf_event *event,
					    u64 file_offset)
1257
{
1258
	struct ordered_events *oe = &session->ordered_events;
1259
	struct perf_tool *tool = session->tool;
1260
	int fd = perf_data_file__fd(session->file);
1261 1262
	int err;

1263
	dump_event(session->evlist, event, file_offset, NULL);
1264

1265
	/* These events are processed right away */
1266
	switch (event->header.type) {
1267
	case PERF_RECORD_HEADER_ATTR:
1268
		err = tool->attr(tool, event, &session->evlist);
1269
		if (err == 0) {
1270
			perf_session__set_id_hdr_size(session);
1271 1272
			perf_session__set_comm_exec(session);
		}
1273
		return err;
1274 1275 1276 1277 1278 1279
	case PERF_RECORD_HEADER_EVENT_TYPE:
		/*
		 * Depreceated, but we need to handle it for sake
		 * of old data files create in pipe mode.
		 */
		return 0;
1280 1281
	case PERF_RECORD_HEADER_TRACING_DATA:
		/* setup for reading amidst mmap */
1282
		lseek(fd, file_offset, SEEK_SET);
1283
		return tool->tracing_data(tool, event, session);
1284
	case PERF_RECORD_HEADER_BUILD_ID:
1285
		return tool->build_id(tool, event, session);
1286
	case PERF_RECORD_FINISHED_ROUND:
1287
		return tool->finished_round(tool, event, oe);
A
Adrian Hunter 已提交
1288 1289
	case PERF_RECORD_ID_INDEX:
		return tool->id_index(tool, event, session);
1290 1291 1292 1293 1294 1295
	case PERF_RECORD_AUXTRACE_INFO:
		return tool->auxtrace_info(tool, event, session);
	case PERF_RECORD_AUXTRACE:
		/* setup for reading amidst mmap */
		lseek(fd, file_offset + event->header.size, SEEK_SET);
		return tool->auxtrace(tool, event, session);
1296
	case PERF_RECORD_AUXTRACE_ERROR:
1297
		perf_session__auxtrace_error_inc(session, event);
1298
		return tool->auxtrace_error(tool, event, session);
1299 1300
	case PERF_RECORD_THREAD_MAP:
		return tool->thread_map(tool, event, session);
1301 1302
	case PERF_RECORD_CPU_MAP:
		return tool->cpu_map(tool, event, session);
1303 1304
	case PERF_RECORD_STAT_CONFIG:
		return tool->stat_config(tool, event, session);
J
Jiri Olsa 已提交
1305 1306
	case PERF_RECORD_STAT:
		return tool->stat(tool, event, session);
1307
	default:
1308
		return -EINVAL;
1309
	}
1310 1311
}

1312 1313
int perf_session__deliver_synth_event(struct perf_session *session,
				      union perf_event *event,
1314
				      struct perf_sample *sample)
1315
{
1316
	struct perf_evlist *evlist = session->evlist;
1317
	struct perf_tool *tool = session->tool;
1318 1319

	events_stats__inc(&evlist->stats, event->header.type);
1320 1321

	if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1322
		return perf_session__process_user_event(session, event, 0);
1323

1324
	return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0);
1325 1326
}

1327 1328 1329 1330 1331 1332 1333 1334 1335
static void event_swap(union perf_event *event, bool sample_id_all)
{
	perf_event__swap_op swap;

	swap = perf_event__swap_ops[event->header.type];
	if (swap)
		swap(event, sample_id_all);
}

1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360
int perf_session__peek_event(struct perf_session *session, off_t file_offset,
			     void *buf, size_t buf_sz,
			     union perf_event **event_ptr,
			     struct perf_sample *sample)
{
	union perf_event *event;
	size_t hdr_sz, rest;
	int fd;

	if (session->one_mmap && !session->header.needs_swap) {
		event = file_offset - session->one_mmap_offset +
			session->one_mmap_addr;
		goto out_parse_sample;
	}

	if (perf_data_file__is_pipe(session->file))
		return -1;

	fd = perf_data_file__fd(session->file);
	hdr_sz = sizeof(struct perf_event_header);

	if (buf_sz < hdr_sz)
		return -1;

	if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
1361
	    readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz)
1362 1363 1364 1365 1366 1367 1368
		return -1;

	event = (union perf_event *)buf;

	if (session->header.needs_swap)
		perf_event_header__bswap(&event->header);

1369
	if (event->header.size < hdr_sz || event->header.size > buf_sz)
1370 1371 1372 1373
		return -1;

	rest = event->header.size - hdr_sz;

1374
	if (readn(fd, buf, rest) != (ssize_t)rest)
1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390
		return -1;

	if (session->header.needs_swap)
		event_swap(event, perf_evlist__sample_id_all(session->evlist));

out_parse_sample:

	if (sample && event->header.type < PERF_RECORD_USER_TYPE_START &&
	    perf_evlist__parse_sample(session->evlist, event, sample))
		return -1;

	*event_ptr = event;

	return 0;
}

1391
static s64 perf_session__process_event(struct perf_session *session,
1392
				       union perf_event *event, u64 file_offset)
1393
{
1394
	struct perf_evlist *evlist = session->evlist;
1395
	struct perf_tool *tool = session->tool;
1396
	struct perf_sample sample;
1397 1398
	int ret;

1399
	if (session->header.needs_swap)
1400
		event_swap(event, perf_evlist__sample_id_all(evlist));
1401 1402 1403 1404

	if (event->header.type >= PERF_RECORD_HEADER_MAX)
		return -EINVAL;

1405
	events_stats__inc(&evlist->stats, event->header.type);
1406 1407

	if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1408
		return perf_session__process_user_event(session, event, file_offset);
1409

1410 1411 1412
	/*
	 * For all kernel events we get the sample data
	 */
1413
	ret = perf_evlist__parse_sample(evlist, event, &sample);
1414 1415
	if (ret)
		return ret;
1416

1417
	if (tool->ordered_events) {
1418
		ret = perf_session__queue_event(session, event, &sample, file_offset);
1419 1420 1421 1422
		if (ret != -ETIME)
			return ret;
	}

1423 1424
	return perf_session__deliver_event(session, event, &sample, tool,
					   file_offset);
1425 1426
}

1427
void perf_event_header__bswap(struct perf_event_header *hdr)
1428
{
1429 1430 1431
	hdr->type = bswap_32(hdr->type);
	hdr->misc = bswap_16(hdr->misc);
	hdr->size = bswap_16(hdr->size);
1432 1433
}

1434 1435
struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
{
1436
	return machine__findnew_thread(&session->machines.host, -1, pid);
1437 1438
}

1439
int perf_session__register_idle_thread(struct perf_session *session)
1440
{
1441
	struct thread *thread;
1442
	int err = 0;
1443

1444
	thread = machine__findnew_thread(&session->machines.host, 0, 0);
1445
	if (thread == NULL || thread__set_comm(thread, "swapper", 0)) {
1446
		pr_err("problem inserting idle task.\n");
1447
		err = -1;
1448 1449
	}

1450 1451 1452
	/* machine__findnew_thread() got the thread, so put it */
	thread__put(thread);
	return err;
1453 1454
}

1455
static void perf_session__warn_about_errors(const struct perf_session *session)
1456
{
1457 1458 1459 1460
	const struct events_stats *stats = &session->evlist->stats;
	const struct ordered_events *oe = &session->ordered_events;

	if (session->tool->lost == perf_event__process_lost &&
1461
	    stats->nr_events[PERF_RECORD_LOST] != 0) {
1462 1463
		ui__warning("Processed %d events and lost %d chunks!\n\n"
			    "Check IO/CPU overload!\n\n",
1464 1465
			    stats->nr_events[0],
			    stats->nr_events[PERF_RECORD_LOST]);
1466 1467
	}

1468 1469 1470 1471 1472 1473
	if (session->tool->lost_samples == perf_event__process_lost_samples) {
		double drop_rate;

		drop_rate = (double)stats->total_lost_samples /
			    (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples);
		if (drop_rate > 0.05) {
1474
			ui__warning("Processed %" PRIu64 " samples and lost %3.2f%% samples!\n\n",
1475 1476 1477 1478 1479
				    stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples,
				    drop_rate * 100.0);
		}
	}

1480 1481 1482 1483 1484 1485 1486
	if (session->tool->aux == perf_event__process_aux &&
	    stats->total_aux_lost != 0) {
		ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n",
			    stats->total_aux_lost,
			    stats->nr_events[PERF_RECORD_AUX]);
	}

1487
	if (stats->nr_unknown_events != 0) {
1488 1489 1490 1491 1492
		ui__warning("Found %u unknown events!\n\n"
			    "Is this an older tool processing a perf.data "
			    "file generated by a more recent tool?\n\n"
			    "If that is not the case, consider "
			    "reporting to linux-kernel@vger.kernel.org.\n\n",
1493
			    stats->nr_unknown_events);
1494 1495
	}

1496
	if (stats->nr_unknown_id != 0) {
1497
		ui__warning("%u samples with id not present in the header\n",
1498
			    stats->nr_unknown_id);
1499 1500
	}

1501
	if (stats->nr_invalid_chains != 0) {
1502 1503 1504
		ui__warning("Found invalid callchains!\n\n"
			    "%u out of %u events were discarded for this reason.\n\n"
			    "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1505 1506
			    stats->nr_invalid_chains,
			    stats->nr_events[PERF_RECORD_SAMPLE]);
1507
	}
1508

1509
	if (stats->nr_unprocessable_samples != 0) {
1510 1511
		ui__warning("%u unprocessable samples recorded.\n"
			    "Do you have a KVM guest running and not using 'perf kvm'?\n",
1512
			    stats->nr_unprocessable_samples);
1513
	}
1514

1515 1516
	if (oe->nr_unordered_events != 0)
		ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events);
1517 1518

	events_stats__auxtrace_error_warn(stats);
1519 1520 1521 1522 1523 1524

	if (stats->nr_proc_map_timeout != 0) {
		ui__warning("%d map information files for pre-existing threads were\n"
			    "not processed, if there are samples for addresses they\n"
			    "will not be resolved, you may find out which are these\n"
			    "threads by running with -v and redirecting the output\n"
1525 1526 1527
			    "to a file.\n"
			    "The time limit to process proc map is too short?\n"
			    "Increase it by --proc-map-timeout\n",
1528 1529
			    stats->nr_proc_map_timeout);
	}
1530 1531
}

1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544
static int perf_session__flush_thread_stack(struct thread *thread,
					    void *p __maybe_unused)
{
	return thread_stack__flush(thread);
}

static int perf_session__flush_thread_stacks(struct perf_session *session)
{
	return machines__for_each_thread(&session->machines,
					 perf_session__flush_thread_stack,
					 NULL);
}

1545 1546
volatile int session_done;

1547
static int __perf_session__process_pipe_events(struct perf_session *session)
1548
{
1549
	struct ordered_events *oe = &session->ordered_events;
1550
	struct perf_tool *tool = session->tool;
1551
	int fd = perf_data_file__fd(session->file);
1552 1553 1554
	union perf_event *event;
	uint32_t size, cur_size = 0;
	void *buf = NULL;
1555
	s64 skip = 0;
1556
	u64 head;
1557
	ssize_t err;
1558 1559
	void *p;

1560
	perf_tool__fill_defaults(tool);
1561 1562

	head = 0;
1563 1564 1565 1566 1567
	cur_size = sizeof(union perf_event);

	buf = malloc(cur_size);
	if (!buf)
		return -errno;
1568
more:
1569
	event = buf;
1570
	err = readn(fd, event, sizeof(struct perf_event_header));
1571 1572 1573 1574 1575 1576 1577 1578
	if (err <= 0) {
		if (err == 0)
			goto done;

		pr_err("failed to read event header\n");
		goto out_err;
	}

1579
	if (session->header.needs_swap)
1580
		perf_event_header__bswap(&event->header);
1581

1582
	size = event->header.size;
1583 1584 1585 1586
	if (size < sizeof(struct perf_event_header)) {
		pr_err("bad event header size\n");
		goto out_err;
	}
1587

1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598
	if (size > cur_size) {
		void *new = realloc(buf, size);
		if (!new) {
			pr_err("failed to allocate memory to read event\n");
			goto out_err;
		}
		buf = new;
		cur_size = size;
		event = buf;
	}
	p = event;
1599 1600
	p += sizeof(struct perf_event_header);

1601
	if (size - sizeof(struct perf_event_header)) {
1602
		err = readn(fd, p, size - sizeof(struct perf_event_header));
1603 1604 1605 1606 1607
		if (err <= 0) {
			if (err == 0) {
				pr_err("unexpected end of event stream\n");
				goto done;
			}
1608

1609 1610 1611
			pr_err("failed to read event data\n");
			goto out_err;
		}
1612 1613
	}

1614
	if ((skip = perf_session__process_event(session, event, head)) < 0) {
1615
		pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1616
		       head, event->header.size, event->header.type);
1617 1618
		err = -EINVAL;
		goto out_err;
1619 1620 1621 1622 1623 1624 1625 1626 1627 1628
	}

	head += size;

	if (skip > 0)
		head += skip;

	if (!session_done())
		goto more;
done:
1629
	/* do the final flush for ordered samples */
1630
	err = ordered_events__flush(oe, OE_FLUSH__FINAL);
1631 1632 1633
	if (err)
		goto out_err;
	err = auxtrace__flush_events(session, tool);
1634 1635 1636
	if (err)
		goto out_err;
	err = perf_session__flush_thread_stacks(session);
1637
out_err:
1638
	free(buf);
1639
	perf_session__warn_about_errors(session);
1640
	ordered_events__free(&session->ordered_events);
1641
	auxtrace__free_events(session);
1642 1643 1644
	return err;
}

1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662
static union perf_event *
fetch_mmaped_event(struct perf_session *session,
		   u64 head, size_t mmap_size, char *buf)
{
	union perf_event *event;

	/*
	 * Ensure we have enough space remaining to read
	 * the size of the event in the headers.
	 */
	if (head + sizeof(event->header) > mmap_size)
		return NULL;

	event = (union perf_event *)(buf + head);

	if (session->header.needs_swap)
		perf_event_header__bswap(&event->header);

1663 1664 1665 1666
	if (head + event->header.size > mmap_size) {
		/* We're not fetching the event so swap back again */
		if (session->header.needs_swap)
			perf_event_header__bswap(&event->header);
1667
		return NULL;
1668
	}
1669 1670 1671 1672

	return event;
}

1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684
/*
 * On 64bit we can mmap the data file in one go. No need for tiny mmap
 * slices. On 32bit we use 32MB.
 */
#if BITS_PER_LONG == 64
#define MMAP_SIZE ULLONG_MAX
#define NUM_MMAPS 1
#else
#define MMAP_SIZE (32 * 1024 * 1024ULL)
#define NUM_MMAPS 128
#endif

1685 1686
static int __perf_session__process_events(struct perf_session *session,
					  u64 data_offset, u64 data_size,
1687
					  u64 file_size)
1688
{
1689
	struct ordered_events *oe = &session->ordered_events;
1690
	struct perf_tool *tool = session->tool;
1691
	int fd = perf_data_file__fd(session->file);
1692
	u64 head, page_offset, file_offset, file_pos, size;
1693
	int err, mmap_prot, mmap_flags, map_idx = 0;
1694
	size_t	mmap_size;
1695
	char *buf, *mmaps[NUM_MMAPS];
1696
	union perf_event *event;
1697
	struct ui_progress prog;
1698
	s64 skip;
1699

1700
	perf_tool__fill_defaults(tool);
1701

1702 1703 1704
	page_offset = page_size * (data_offset / page_size);
	file_offset = page_offset;
	head = data_offset - page_offset;
1705

1706 1707 1708 1709
	if (data_size == 0)
		goto out;

	if (data_offset + data_size < file_size)
1710 1711
		file_size = data_offset + data_size;

1712
	ui_progress__init(&prog, file_size, "Processing events...");
1713

1714
	mmap_size = MMAP_SIZE;
1715
	if (mmap_size > file_size) {
1716
		mmap_size = file_size;
1717 1718
		session->one_mmap = true;
	}
1719

1720 1721
	memset(mmaps, 0, sizeof(mmaps));

1722 1723 1724
	mmap_prot  = PROT_READ;
	mmap_flags = MAP_SHARED;

1725
	if (session->header.needs_swap) {
1726 1727 1728
		mmap_prot  |= PROT_WRITE;
		mmap_flags = MAP_PRIVATE;
	}
1729
remap:
1730
	buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, fd,
1731
		   file_offset);
1732 1733 1734 1735 1736
	if (buf == MAP_FAILED) {
		pr_err("failed to mmap file\n");
		err = -errno;
		goto out_err;
	}
1737 1738
	mmaps[map_idx] = buf;
	map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
1739
	file_pos = file_offset + head;
1740 1741 1742 1743
	if (session->one_mmap) {
		session->one_mmap_addr = buf;
		session->one_mmap_offset = file_offset;
	}
1744 1745

more:
1746 1747
	event = fetch_mmaped_event(session, head, mmap_size, buf);
	if (!event) {
1748 1749 1750 1751
		if (mmaps[map_idx]) {
			munmap(mmaps[map_idx], mmap_size);
			mmaps[map_idx] = NULL;
		}
1752

1753 1754 1755
		page_offset = page_size * (head / page_size);
		file_offset += page_offset;
		head -= page_offset;
1756 1757 1758 1759 1760
		goto remap;
	}

	size = event->header.size;

1761
	if (size < sizeof(struct perf_event_header) ||
1762
	    (skip = perf_session__process_event(session, event, file_pos)) < 0) {
1763 1764 1765 1766 1767
		pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
		       file_offset + head, event->header.size,
		       event->header.type);
		err = -EINVAL;
		goto out_err;
1768 1769
	}

1770 1771 1772
	if (skip)
		size += skip;

1773
	head += size;
1774
	file_pos += size;
1775

1776
	ui_progress__update(&prog, size);
1777

1778
	if (session_done())
1779
		goto out;
1780

1781
	if (file_pos < file_size)
1782
		goto more;
1783

1784
out:
1785
	/* do the final flush for ordered samples */
1786
	err = ordered_events__flush(oe, OE_FLUSH__FINAL);
1787 1788 1789
	if (err)
		goto out_err;
	err = auxtrace__flush_events(session, tool);
1790 1791 1792
	if (err)
		goto out_err;
	err = perf_session__flush_thread_stacks(session);
1793
out_err:
N
Namhyung Kim 已提交
1794
	ui_progress__finish();
1795
	perf_session__warn_about_errors(session);
1796
	ordered_events__free(&session->ordered_events);
1797
	auxtrace__free_events(session);
1798
	session->one_mmap = false;
1799 1800
	return err;
}
1801

1802
int perf_session__process_events(struct perf_session *session)
1803
{
1804
	u64 size = perf_data_file__size(session->file);
1805 1806
	int err;

1807
	if (perf_session__register_idle_thread(session) < 0)
1808 1809
		return -ENOMEM;

1810 1811 1812
	if (!perf_data_file__is_pipe(session->file))
		err = __perf_session__process_events(session,
						     session->header.data_offset,
1813
						     session->header.data_size, size);
1814
	else
1815
		err = __perf_session__process_pipe_events(session);
1816

1817 1818 1819
	return err;
}

1820
bool perf_session__has_traces(struct perf_session *session, const char *msg)
1821
{
1822 1823
	struct perf_evsel *evsel;

1824
	evlist__for_each(session->evlist, evsel) {
1825 1826
		if (evsel->attr.type == PERF_TYPE_TRACEPOINT)
			return true;
1827 1828
	}

1829 1830
	pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
	return false;
1831
}
1832

1833 1834
int maps__set_kallsyms_ref_reloc_sym(struct map **maps,
				     const char *symbol_name, u64 addr)
1835 1836
{
	char *bracket;
1837
	enum map_type i;
1838 1839 1840 1841 1842
	struct ref_reloc_sym *ref;

	ref = zalloc(sizeof(struct ref_reloc_sym));
	if (ref == NULL)
		return -ENOMEM;
1843

1844 1845 1846
	ref->name = strdup(symbol_name);
	if (ref->name == NULL) {
		free(ref);
1847
		return -ENOMEM;
1848
	}
1849

1850
	bracket = strchr(ref->name, ']');
1851 1852 1853
	if (bracket)
		*bracket = '\0';

1854
	ref->addr = addr;
1855 1856

	for (i = 0; i < MAP__NR_TYPES; ++i) {
1857
		struct kmap *kmap = map__kmap(maps[i]);
1858 1859 1860

		if (!kmap)
			continue;
1861
		kmap->ref_reloc_sym = ref;
1862 1863
	}

1864 1865
	return 0;
}
1866

1867
size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
1868
{
1869
	return machines__fprintf_dsos(&session->machines, fp);
1870
}
1871

1872
size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
1873
					  bool (skip)(struct dso *dso, int parm), int parm)
1874
{
1875
	return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
1876
}
1877 1878 1879

size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
{
1880 1881 1882 1883 1884 1885
	size_t ret;
	const char *msg = "";

	if (perf_header__has_feat(&session->header, HEADER_AUXTRACE))
		msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";

1886
	ret = fprintf(fp, "\nAggregated stats:%s\n", msg);
1887

1888
	ret += events_stats__fprintf(&session->evlist->stats, fp);
1889 1890
	return ret;
}
1891

1892 1893 1894 1895 1896 1897
size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
{
	/*
	 * FIXME: Here we have to actually print all the machines in this
	 * session, not just the host...
	 */
1898
	return machine__fprintf(&session->machines.host, fp);
1899 1900
}

1901 1902 1903 1904 1905
struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
					      unsigned int type)
{
	struct perf_evsel *pos;

1906
	evlist__for_each(session->evlist, pos) {
1907 1908 1909 1910 1911 1912
		if (pos->attr.type == type)
			return pos;
	}
	return NULL;
}

1913
void perf_evsel__print_ip(struct perf_evsel *evsel, struct perf_sample *sample,
1914
			  struct addr_location *al,
1915
			  unsigned int print_opts, unsigned int stack_depth)
1916 1917
{
	struct callchain_cursor_node *node;
1918 1919 1920 1921
	int print_ip = print_opts & PRINT_IP_OPT_IP;
	int print_sym = print_opts & PRINT_IP_OPT_SYM;
	int print_dso = print_opts & PRINT_IP_OPT_DSO;
	int print_symoffset = print_opts & PRINT_IP_OPT_SYMOFFSET;
1922
	int print_oneline = print_opts & PRINT_IP_OPT_ONELINE;
1923
	int print_srcline = print_opts & PRINT_IP_OPT_SRCLINE;
1924
	char s = print_oneline ? ' ' : '\t';
1925 1926

	if (symbol_conf.use_callchain && sample->callchain) {
1927
		struct addr_location node_al;
1928

1929 1930
		if (thread__resolve_callchain(al->thread, evsel,
					      sample, NULL, NULL,
1931
					      stack_depth) != 0) {
1932 1933 1934 1935
			if (verbose)
				error("Failed to resolve callchain. Skipping\n");
			return;
		}
1936
		callchain_cursor_commit(&callchain_cursor);
1937

1938 1939 1940
		if (print_symoffset)
			node_al = *al;

1941
		while (stack_depth) {
1942 1943
			u64 addr = 0;

1944
			node = callchain_cursor_current(&callchain_cursor);
1945 1946 1947
			if (!node)
				break;

1948 1949 1950
			if (node->sym && node->sym->ignore)
				goto next;

1951
			if (print_ip)
1952
				printf("%c%16" PRIx64, s, node->ip);
1953

1954 1955 1956
			if (node->map)
				addr = node->map->map_ip(node->map, node->ip);

1957
			if (print_sym) {
1958
				printf(" ");
1959
				if (print_symoffset) {
1960
					node_al.addr = addr;
1961 1962
					node_al.map  = node->map;
					symbol__fprintf_symname_offs(node->sym, &node_al, stdout);
1963 1964
				} else
					symbol__fprintf_symname(node->sym, stdout);
1965
			}
1966

1967
			if (print_dso) {
1968
				printf(" (");
1969
				map__fprintf_dsoname(node->map, stdout);
1970
				printf(")");
1971
			}
1972

1973 1974 1975 1976
			if (print_srcline)
				map__fprintf_srcline(node->map, addr, "\n  ",
						     stdout);

1977 1978
			if (!print_oneline)
				printf("\n");
1979

1980
			stack_depth--;
1981 1982
next:
			callchain_cursor_advance(&callchain_cursor);
1983 1984 1985
		}

	} else {
1986
		if (al->sym && al->sym->ignore)
1987 1988
			return;

1989 1990 1991
		if (print_ip)
			printf("%16" PRIx64, sample->ip);

1992
		if (print_sym) {
1993
			printf(" ");
1994
			if (print_symoffset)
1995
				symbol__fprintf_symname_offs(al->sym, al,
1996 1997
							     stdout);
			else
1998
				symbol__fprintf_symname(al->sym, stdout);
1999 2000 2001
		}

		if (print_dso) {
2002
			printf(" (");
2003
			map__fprintf_dsoname(al->map, stdout);
2004
			printf(")");
2005
		}
2006 2007 2008

		if (print_srcline)
			map__fprintf_srcline(al->map, al->addr, "\n  ", stdout);
2009 2010
	}
}
2011 2012 2013 2014

int perf_session__cpu_bitmap(struct perf_session *session,
			     const char *cpu_list, unsigned long *cpu_bitmap)
{
2015
	int i, err = -1;
2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032
	struct cpu_map *map;

	for (i = 0; i < PERF_TYPE_MAX; ++i) {
		struct perf_evsel *evsel;

		evsel = perf_session__find_first_evtype(session, i);
		if (!evsel)
			continue;

		if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) {
			pr_err("File does not contain CPU events. "
			       "Remove -c option to proceed.\n");
			return -1;
		}
	}

	map = cpu_map__new(cpu_list);
2033 2034 2035 2036
	if (map == NULL) {
		pr_err("Invalid cpu_list\n");
		return -1;
	}
2037 2038 2039 2040 2041 2042 2043

	for (i = 0; i < map->nr; i++) {
		int cpu = map->map[i];

		if (cpu >= MAX_NR_CPUS) {
			pr_err("Requested CPU %d too large. "
			       "Consider raising MAX_NR_CPUS\n", cpu);
2044
			goto out_delete_map;
2045 2046 2047 2048 2049
		}

		set_bit(cpu, cpu_bitmap);
	}

2050 2051 2052
	err = 0;

out_delete_map:
2053
	cpu_map__put(map);
2054
	return err;
2055
}
2056 2057 2058 2059 2060

void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
				bool full)
{
	struct stat st;
2061
	int fd, ret;
2062 2063 2064 2065

	if (session == NULL || fp == NULL)
		return;

2066 2067
	fd = perf_data_file__fd(session->file);

2068
	ret = fstat(fd, &st);
2069 2070 2071 2072 2073 2074 2075 2076
	if (ret == -1)
		return;

	fprintf(fp, "# ========\n");
	fprintf(fp, "# captured on: %s", ctime(&st.st_ctime));
	perf_header__fprintf_info(session, fp, full);
	fprintf(fp, "# ========\n#\n");
}
2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087


int __perf_session__set_tracepoints_handlers(struct perf_session *session,
					     const struct perf_evsel_str_handler *assocs,
					     size_t nr_assocs)
{
	struct perf_evsel *evsel;
	size_t i;
	int err;

	for (i = 0; i < nr_assocs; i++) {
2088 2089 2090 2091 2092
		/*
		 * Adding a handler for an event not in the session,
		 * just ignore it.
		 */
		evsel = perf_evlist__find_tracepoint_by_name(session->evlist, assocs[i].name);
2093
		if (evsel == NULL)
2094
			continue;
2095 2096

		err = -EEXIST;
2097
		if (evsel->handler != NULL)
2098
			goto out;
2099
		evsel->handler = assocs[i].handler;
2100 2101 2102 2103 2104 2105
	}

	err = 0;
out:
	return err;
}
A
Adrian Hunter 已提交
2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159

int perf_event__process_id_index(struct perf_tool *tool __maybe_unused,
				 union perf_event *event,
				 struct perf_session *session)
{
	struct perf_evlist *evlist = session->evlist;
	struct id_index_event *ie = &event->id_index;
	size_t i, nr, max_nr;

	max_nr = (ie->header.size - sizeof(struct id_index_event)) /
		 sizeof(struct id_index_entry);
	nr = ie->nr;
	if (nr > max_nr)
		return -EINVAL;

	if (dump_trace)
		fprintf(stdout, " nr: %zu\n", nr);

	for (i = 0; i < nr; i++) {
		struct id_index_entry *e = &ie->entries[i];
		struct perf_sample_id *sid;

		if (dump_trace) {
			fprintf(stdout,	" ... id: %"PRIu64, e->id);
			fprintf(stdout,	"  idx: %"PRIu64, e->idx);
			fprintf(stdout,	"  cpu: %"PRId64, e->cpu);
			fprintf(stdout,	"  tid: %"PRId64"\n", e->tid);
		}

		sid = perf_evlist__id2sid(evlist, e->id);
		if (!sid)
			return -ENOENT;
		sid->idx = e->idx;
		sid->cpu = e->cpu;
		sid->tid = e->tid;
	}
	return 0;
}

int perf_event__synthesize_id_index(struct perf_tool *tool,
				    perf_event__handler_t process,
				    struct perf_evlist *evlist,
				    struct machine *machine)
{
	union perf_event *ev;
	struct perf_evsel *evsel;
	size_t nr = 0, i = 0, sz, max_nr, n;
	int err;

	pr_debug2("Synthesizing id index\n");

	max_nr = (UINT16_MAX - sizeof(struct id_index_event)) /
		 sizeof(struct id_index_entry);

2160
	evlist__for_each(evlist, evsel)
A
Adrian Hunter 已提交
2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172
		nr += evsel->ids;

	n = nr > max_nr ? max_nr : nr;
	sz = sizeof(struct id_index_event) + n * sizeof(struct id_index_entry);
	ev = zalloc(sz);
	if (!ev)
		return -ENOMEM;

	ev->id_index.header.type = PERF_RECORD_ID_INDEX;
	ev->id_index.header.size = sz;
	ev->id_index.nr = n;

2173
	evlist__for_each(evlist, evsel) {
A
Adrian Hunter 已提交
2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213
		u32 j;

		for (j = 0; j < evsel->ids; j++) {
			struct id_index_entry *e;
			struct perf_sample_id *sid;

			if (i >= n) {
				err = process(tool, ev, NULL, machine);
				if (err)
					goto out_err;
				nr -= n;
				i = 0;
			}

			e = &ev->id_index.entries[i++];

			e->id = evsel->id[j];

			sid = perf_evlist__id2sid(evlist, e->id);
			if (!sid) {
				free(ev);
				return -ENOENT;
			}

			e->idx = sid->idx;
			e->cpu = sid->cpu;
			e->tid = sid->tid;
		}
	}

	sz = sizeof(struct id_index_event) + nr * sizeof(struct id_index_entry);
	ev->id_index.header.size = sz;
	ev->id_index.nr = nr;

	err = process(tool, ev, NULL, machine);
out_err:
	free(ev);

	return err;
}