session.c 66.3 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2
#include <errno.h>
3
#include <inttypes.h>
4
#include <linux/err.h>
5
#include <linux/kernel.h>
6
#include <linux/zalloc.h>
7
#include <api/fs/fs.h>
8

9
#include <byteswap.h>
10 11
#include <unistd.h>
#include <sys/types.h>
12
#include <sys/mman.h>
13
#include <perf/cpumap.h>
14

15 16
#include "map_symbol.h"
#include "branch.h"
17
#include "debug.h"
18 19
#include "evlist.h"
#include "evsel.h"
20
#include "memswap.h"
21
#include "map.h"
22
#include "symbol.h"
23
#include "session.h"
24
#include "tool.h"
25
#include "perf_regs.h"
26
#include "asm/bug.h"
27
#include "auxtrace.h"
28
#include "thread.h"
29
#include "thread-stack.h"
30
#include "sample-raw.h"
31
#include "stat.h"
32
#include "ui/progress.h"
33
#include "../perf.h"
34
#include "arch/common.h"
35
#include <internal/lib.h>
36
#include <linux/err.h>
37

38 39 40 41 42 43 44
#ifdef HAVE_ZSTD_SUPPORT
static int perf_session__process_compressed_event(struct perf_session *session,
						  union perf_event *event, u64 file_offset)
{
	void *src;
	size_t decomp_size, src_size;
	u64 decomp_last_rem = 0;
45
	size_t mmap_len, decomp_len = session->header.env.comp_mmap_len;
46 47
	struct decomp *decomp, *decomp_last = session->decomp_last;

48 49 50 51 52 53 54
	if (decomp_last) {
		decomp_last_rem = decomp_last->size - decomp_last->head;
		decomp_len += decomp_last_rem;
	}

	mmap_len = sizeof(struct decomp) + decomp_len;
	decomp = mmap(NULL, mmap_len, PROT_READ|PROT_WRITE,
55 56 57 58 59 60 61
		      MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
	if (decomp == MAP_FAILED) {
		pr_err("Couldn't allocate memory for decompression\n");
		return -1;
	}

	decomp->file_pos = file_offset;
62
	decomp->mmap_len = mmap_len;
63 64
	decomp->head = 0;

65
	if (decomp_last_rem) {
66 67 68 69
		memcpy(decomp->data, &(decomp_last->data[decomp_last->head]), decomp_last_rem);
		decomp->size = decomp_last_rem;
	}

70 71
	src = (void *)event + sizeof(struct perf_record_compressed);
	src_size = event->pack.header.size - sizeof(struct perf_record_compressed);
72 73 74 75

	decomp_size = zstd_decompress_stream(&(session->zstd_data), src, src_size,
				&(decomp->data[decomp_last_rem]), decomp_len - decomp_last_rem);
	if (!decomp_size) {
76
		munmap(decomp, mmap_len);
77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98
		pr_err("Couldn't decompress data\n");
		return -1;
	}

	decomp->size += decomp_size;

	if (session->decomp == NULL) {
		session->decomp = decomp;
		session->decomp_last = decomp;
	} else {
		session->decomp_last->next = decomp;
		session->decomp_last = decomp;
	}

	pr_debug("decomp (B): %ld to %ld\n", src_size, decomp_size);

	return 0;
}
#else /* !HAVE_ZSTD_SUPPORT */
#define perf_session__process_compressed_event perf_session__process_compressed_event_stub
#endif

99 100 101 102
static int perf_session__deliver_event(struct perf_session *session,
				       union perf_event *event,
				       struct perf_tool *tool,
				       u64 file_offset);
103

104
static int perf_session__open(struct perf_session *session)
105
{
106
	struct perf_data *data = session->data;
107

108
	if (perf_session__read_header(session) < 0) {
109
		pr_err("incompatible file format (rerun with -v to learn more)\n");
110
		return -1;
111 112
	}

113
	if (perf_data__is_pipe(data))
114 115
		return 0;

116 117 118
	if (perf_header__has_feat(&session->header, HEADER_STAT))
		return 0;

119
	if (!perf_evlist__valid_sample_type(session->evlist)) {
120
		pr_err("non matching sample_type\n");
121
		return -1;
122 123
	}

124
	if (!perf_evlist__valid_sample_id_all(session->evlist)) {
125
		pr_err("non matching sample_id_all\n");
126
		return -1;
127 128
	}

129
	if (!perf_evlist__valid_read_format(session->evlist)) {
130
		pr_err("non matching read_format\n");
131
		return -1;
132 133
	}

134 135 136
	return 0;
}

137
void perf_session__set_id_hdr_size(struct perf_session *session)
138
{
139 140 141
	u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);

	machines__set_id_hdr_size(&session->machines, id_hdr_size);
142 143
}

144
int perf_session__create_kernel_maps(struct perf_session *session)
145
{
146
	int ret = machine__create_kernel_maps(&session->machines.host);
147 148

	if (ret >= 0)
149
		ret = machines__create_guest_kernel_maps(&session->machines);
150 151 152
	return ret;
}

153
static void perf_session__destroy_kernel_maps(struct perf_session *session)
154
{
155
	machines__destroy_kernel_maps(&session->machines);
156 157
}

158 159
static bool perf_session__has_comm_exec(struct perf_session *session)
{
160
	struct evsel *evsel;
161

162
	evlist__for_each_entry(session->evlist, evsel) {
163
		if (evsel->core.attr.comm_exec)
164 165 166 167 168 169 170 171 172 173 174 175 176
			return true;
	}

	return false;
}

static void perf_session__set_comm_exec(struct perf_session *session)
{
	bool comm_exec = perf_session__has_comm_exec(session);

	machines__set_comm_exec(&session->machines, comm_exec);
}

177
static int ordered_events__deliver_event(struct ordered_events *oe,
178
					 struct ordered_event *event)
179
{
180 181 182
	struct perf_session *session = container_of(oe, struct perf_session,
						    ordered_events);

183
	return perf_session__deliver_event(session, event->event,
184
					   session->tool, event->file_offset);
185 186
}

187
struct perf_session *perf_session__new(struct perf_data *data,
188
				       bool repipe, struct perf_tool *tool)
189
{
190
	int ret = -ENOMEM;
191
	struct perf_session *session = zalloc(sizeof(*session));
192

193
	if (!session)
194 195
		goto out;

196
	session->repipe = repipe;
197
	session->tool   = tool;
198
	INIT_LIST_HEAD(&session->auxtrace_index);
199
	machines__init(&session->machines);
200 201
	ordered_events__init(&session->ordered_events,
			     ordered_events__deliver_event, NULL);
202

203
	perf_env__init(&session->header.env);
204
	if (data) {
205 206
		ret = perf_data__open(data);
		if (ret < 0)
207
			goto out_delete;
208

209
		session->data = data;
210

211
		if (perf_data__is_read(data)) {
212 213
			ret = perf_session__open(session);
			if (ret < 0)
214
				goto out_delete;
215

216 217 218 219
			/*
			 * set session attributes that are present in perf.data
			 * but not in pipe-mode.
			 */
220
			if (!data->is_pipe) {
221 222 223
				perf_session__set_id_hdr_size(session);
				perf_session__set_comm_exec(session);
			}
224 225

			perf_evlist__init_trace_event_sample_raw(session->evlist);
226 227

			/* Open the directory data. */
228 229
			if (data->is_dir) {
				ret = perf_data__open_dir(data);
230 231
				if (ret)
					goto out_delete;
232
			}
233 234 235 236

			if (!symbol_conf.kallsyms_name &&
			    !symbol_conf.vmlinux_name)
				symbol_conf.kallsyms_name = perf_data__kallsyms_name(data);
237
		}
238 239
	} else  {
		session->machines.host.env = &perf_env;
240 241
	}

242 243 244
	session->machines.host.single_address_space =
		perf_env__single_address_space(session->machines.host.env);

245
	if (!data || perf_data__is_write(data)) {
246 247
		/*
		 * In O_RDONLY mode this will be performed when reading the
248
		 * kernel MMAP event, in perf_event__process_mmap().
249
		 */
250
		if (perf_session__create_kernel_maps(session) < 0)
251
			pr_warning("Cannot read kernel map\n");
252
	}
253

254 255 256 257
	/*
	 * In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is
	 * processed, so perf_evlist__sample_id_all is not meaningful here.
	 */
258
	if ((!data || !data->is_pipe) && tool && tool->ordering_requires_timestamps &&
259
	    tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) {
260
		dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
261
		tool->ordered_events = false;
262
	}
263

264
	return session;
265 266

 out_delete:
267
	perf_session__delete(session);
268
 out:
269
	return ERR_PTR(ret);
270 271
}

272 273
static void perf_session__delete_threads(struct perf_session *session)
{
274
	machine__delete_threads(&session->machines.host);
275 276
}

277 278 279
static void perf_session__release_decomp_events(struct perf_session *session)
{
	struct decomp *next, *decomp;
280
	size_t mmap_len;
281 282 283 284 285 286
	next = session->decomp;
	do {
		decomp = next;
		if (decomp == NULL)
			break;
		next = decomp->next;
287 288
		mmap_len = decomp->mmap_len;
		munmap(decomp, mmap_len);
289 290 291
	} while (1);
}

292
void perf_session__delete(struct perf_session *session)
293
{
294 295
	if (session == NULL)
		return;
296
	auxtrace__free(session);
297
	auxtrace_index__free(&session->auxtrace_index);
298 299
	perf_session__destroy_kernel_maps(session);
	perf_session__delete_threads(session);
300
	perf_session__release_decomp_events(session);
301
	perf_env__exit(&session->header.env);
302
	machines__exit(&session->machines);
303 304
	if (session->data)
		perf_data__close(session->data);
305
	free(session);
306
}
307

308
static int process_event_synth_tracing_data_stub(struct perf_session *session
309 310
						 __maybe_unused,
						 union perf_event *event
311
						 __maybe_unused)
312 313 314 315 316
{
	dump_printf(": unhandled!\n");
	return 0;
}

317 318
static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
					 union perf_event *event __maybe_unused,
319
					 struct evlist **pevlist
320
					 __maybe_unused)
321 322 323 324 325
{
	dump_printf(": unhandled!\n");
	return 0;
}

326 327
static int process_event_synth_event_update_stub(struct perf_tool *tool __maybe_unused,
						 union perf_event *event __maybe_unused,
328
						 struct evlist **pevlist
329 330
						 __maybe_unused)
{
331 332 333
	if (dump_trace)
		perf_event__fprintf_event_update(event, stdout);

334 335 336 337
	dump_printf(": unhandled!\n");
	return 0;
}

338 339 340
static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
				     union perf_event *event __maybe_unused,
				     struct perf_sample *sample __maybe_unused,
341
				     struct evsel *evsel __maybe_unused,
342
				     struct machine *machine __maybe_unused)
343 344 345 346 347
{
	dump_printf(": unhandled!\n");
	return 0;
}

348 349 350 351
static int process_event_stub(struct perf_tool *tool __maybe_unused,
			      union perf_event *event __maybe_unused,
			      struct perf_sample *sample __maybe_unused,
			      struct machine *machine __maybe_unused)
352 353 354 355 356
{
	dump_printf(": unhandled!\n");
	return 0;
}

357 358
static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
				       union perf_event *event __maybe_unused,
359
				       struct ordered_events *oe __maybe_unused)
360 361 362 363 364
{
	dump_printf(": unhandled!\n");
	return 0;
}

365
static int process_finished_round(struct perf_tool *tool,
366
				  union perf_event *event,
367
				  struct ordered_events *oe);
368

369 370 371 372 373 374 375 376 377 378 379 380 381 382 383
static int skipn(int fd, off_t n)
{
	char buf[4096];
	ssize_t ret;

	while (n > 0) {
		ret = read(fd, buf, min(n, (off_t)sizeof(buf)));
		if (ret <= 0)
			return ret;
		n -= ret;
	}

	return 0;
}

384 385
static s64 process_event_auxtrace_stub(struct perf_session *session __maybe_unused,
				       union perf_event *event)
386 387
{
	dump_printf(": unhandled!\n");
388 389
	if (perf_data__is_pipe(session->data))
		skipn(perf_data__fd(session->data), event->auxtrace.size);
390 391 392
	return event->auxtrace.size;
}

393 394
static int process_event_op2_stub(struct perf_session *session __maybe_unused,
				  union perf_event *event __maybe_unused)
395 396 397 398 399
{
	dump_printf(": unhandled!\n");
	return 0;
}

400 401

static
402 403
int process_event_thread_map_stub(struct perf_session *session __maybe_unused,
				  union perf_event *event __maybe_unused)
404
{
405 406 407
	if (dump_trace)
		perf_event__fprintf_thread_map(event, stdout);

408 409 410 411
	dump_printf(": unhandled!\n");
	return 0;
}

412
static
413 414
int process_event_cpu_map_stub(struct perf_session *session __maybe_unused,
			       union perf_event *event __maybe_unused)
415
{
416 417 418
	if (dump_trace)
		perf_event__fprintf_cpu_map(event, stdout);

419 420 421 422
	dump_printf(": unhandled!\n");
	return 0;
}

423
static
424 425
int process_event_stat_config_stub(struct perf_session *session __maybe_unused,
				   union perf_event *event __maybe_unused)
426
{
427 428 429
	if (dump_trace)
		perf_event__fprintf_stat_config(event, stdout);

430 431 432 433
	dump_printf(": unhandled!\n");
	return 0;
}

434 435
static int process_stat_stub(struct perf_session *perf_session __maybe_unused,
			     union perf_event *event)
J
Jiri Olsa 已提交
436
{
437 438 439
	if (dump_trace)
		perf_event__fprintf_stat(event, stdout);

J
Jiri Olsa 已提交
440 441 442 443
	dump_printf(": unhandled!\n");
	return 0;
}

444 445
static int process_stat_round_stub(struct perf_session *perf_session __maybe_unused,
				   union perf_event *event)
446
{
447 448 449
	if (dump_trace)
		perf_event__fprintf_stat_round(event, stdout);

450 451 452 453
	dump_printf(": unhandled!\n");
	return 0;
}

454 455 456 457 458 459 460 461
static int perf_session__process_compressed_event_stub(struct perf_session *session __maybe_unused,
						       union perf_event *event __maybe_unused,
						       u64 file_offset __maybe_unused)
{
       dump_printf(": unhandled!\n");
       return 0;
}

462
void perf_tool__fill_defaults(struct perf_tool *tool)
463
{
464 465 466 467
	if (tool->sample == NULL)
		tool->sample = process_event_sample_stub;
	if (tool->mmap == NULL)
		tool->mmap = process_event_stub;
468 469
	if (tool->mmap2 == NULL)
		tool->mmap2 = process_event_stub;
470 471
	if (tool->comm == NULL)
		tool->comm = process_event_stub;
472 473
	if (tool->namespaces == NULL)
		tool->namespaces = process_event_stub;
474 475
	if (tool->cgroup == NULL)
		tool->cgroup = process_event_stub;
476 477 478 479 480 481
	if (tool->fork == NULL)
		tool->fork = process_event_stub;
	if (tool->exit == NULL)
		tool->exit = process_event_stub;
	if (tool->lost == NULL)
		tool->lost = perf_event__process_lost;
482 483
	if (tool->lost_samples == NULL)
		tool->lost_samples = perf_event__process_lost_samples;
484 485
	if (tool->aux == NULL)
		tool->aux = perf_event__process_aux;
486 487
	if (tool->itrace_start == NULL)
		tool->itrace_start = perf_event__process_itrace_start;
488 489
	if (tool->context_switch == NULL)
		tool->context_switch = perf_event__process_switch;
490 491
	if (tool->ksymbol == NULL)
		tool->ksymbol = perf_event__process_ksymbol;
492 493
	if (tool->bpf == NULL)
		tool->bpf = perf_event__process_bpf;
494 495 496 497 498 499 500 501
	if (tool->read == NULL)
		tool->read = process_event_sample_stub;
	if (tool->throttle == NULL)
		tool->throttle = process_event_stub;
	if (tool->unthrottle == NULL)
		tool->unthrottle = process_event_stub;
	if (tool->attr == NULL)
		tool->attr = process_event_synth_attr_stub;
502 503
	if (tool->event_update == NULL)
		tool->event_update = process_event_synth_event_update_stub;
504 505 506
	if (tool->tracing_data == NULL)
		tool->tracing_data = process_event_synth_tracing_data_stub;
	if (tool->build_id == NULL)
507
		tool->build_id = process_event_op2_stub;
508
	if (tool->finished_round == NULL) {
509
		if (tool->ordered_events)
510
			tool->finished_round = process_finished_round;
511
		else
512
			tool->finished_round = process_finished_round_stub;
513
	}
A
Adrian Hunter 已提交
514
	if (tool->id_index == NULL)
515
		tool->id_index = process_event_op2_stub;
516
	if (tool->auxtrace_info == NULL)
517
		tool->auxtrace_info = process_event_op2_stub;
518 519
	if (tool->auxtrace == NULL)
		tool->auxtrace = process_event_auxtrace_stub;
520
	if (tool->auxtrace_error == NULL)
521
		tool->auxtrace_error = process_event_op2_stub;
522 523
	if (tool->thread_map == NULL)
		tool->thread_map = process_event_thread_map_stub;
524 525
	if (tool->cpu_map == NULL)
		tool->cpu_map = process_event_cpu_map_stub;
526 527
	if (tool->stat_config == NULL)
		tool->stat_config = process_event_stat_config_stub;
J
Jiri Olsa 已提交
528 529
	if (tool->stat == NULL)
		tool->stat = process_stat_stub;
530 531
	if (tool->stat_round == NULL)
		tool->stat_round = process_stat_round_stub;
532 533
	if (tool->time_conv == NULL)
		tool->time_conv = process_event_op2_stub;
534 535
	if (tool->feature == NULL)
		tool->feature = process_event_op2_stub;
536
	if (tool->compressed == NULL)
537
		tool->compressed = perf_session__process_compressed_event;
538
}
539

540 541 542 543 544 545 546 547 548 549
static void swap_sample_id_all(union perf_event *event, void *data)
{
	void *end = (void *) event + event->header.size;
	int size = end - data;

	BUG_ON(size % sizeof(u64));
	mem_bswap_64(data, size);
}

static void perf_event__all64_swap(union perf_event *event,
550
				   bool sample_id_all __maybe_unused)
551
{
552 553
	struct perf_event_header *hdr = &event->header;
	mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
554 555
}

556
static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
557
{
558 559
	event->comm.pid = bswap_32(event->comm.pid);
	event->comm.tid = bswap_32(event->comm.tid);
560 561 562 563

	if (sample_id_all) {
		void *data = &event->comm.comm;

564
		data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
565 566
		swap_sample_id_all(event, data);
	}
567 568
}

569 570
static void perf_event__mmap_swap(union perf_event *event,
				  bool sample_id_all)
571
{
572 573 574 575 576
	event->mmap.pid	  = bswap_32(event->mmap.pid);
	event->mmap.tid	  = bswap_32(event->mmap.tid);
	event->mmap.start = bswap_64(event->mmap.start);
	event->mmap.len	  = bswap_64(event->mmap.len);
	event->mmap.pgoff = bswap_64(event->mmap.pgoff);
577 578 579 580

	if (sample_id_all) {
		void *data = &event->mmap.filename;

581
		data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
582 583
		swap_sample_id_all(event, data);
	}
584 585
}

586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604
static void perf_event__mmap2_swap(union perf_event *event,
				  bool sample_id_all)
{
	event->mmap2.pid   = bswap_32(event->mmap2.pid);
	event->mmap2.tid   = bswap_32(event->mmap2.tid);
	event->mmap2.start = bswap_64(event->mmap2.start);
	event->mmap2.len   = bswap_64(event->mmap2.len);
	event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
	event->mmap2.maj   = bswap_32(event->mmap2.maj);
	event->mmap2.min   = bswap_32(event->mmap2.min);
	event->mmap2.ino   = bswap_64(event->mmap2.ino);

	if (sample_id_all) {
		void *data = &event->mmap2.filename;

		data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
		swap_sample_id_all(event, data);
	}
}
605
static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
606
{
607 608 609 610 611
	event->fork.pid	 = bswap_32(event->fork.pid);
	event->fork.tid	 = bswap_32(event->fork.tid);
	event->fork.ppid = bswap_32(event->fork.ppid);
	event->fork.ptid = bswap_32(event->fork.ptid);
	event->fork.time = bswap_64(event->fork.time);
612 613 614

	if (sample_id_all)
		swap_sample_id_all(event, &event->fork + 1);
615 616
}

617
static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
618
{
619 620 621 622 623 624
	event->read.pid		 = bswap_32(event->read.pid);
	event->read.tid		 = bswap_32(event->read.tid);
	event->read.value	 = bswap_64(event->read.value);
	event->read.time_enabled = bswap_64(event->read.time_enabled);
	event->read.time_running = bswap_64(event->read.time_running);
	event->read.id		 = bswap_64(event->read.id);
625 626 627

	if (sample_id_all)
		swap_sample_id_all(event, &event->read + 1);
628 629
}

630 631 632 633 634 635 636 637 638 639
static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
{
	event->aux.aux_offset = bswap_64(event->aux.aux_offset);
	event->aux.aux_size   = bswap_64(event->aux.aux_size);
	event->aux.flags      = bswap_64(event->aux.flags);

	if (sample_id_all)
		swap_sample_id_all(event, &event->aux + 1);
}

640 641 642 643 644 645 646 647 648 649
static void perf_event__itrace_start_swap(union perf_event *event,
					  bool sample_id_all)
{
	event->itrace_start.pid	 = bswap_32(event->itrace_start.pid);
	event->itrace_start.tid	 = bswap_32(event->itrace_start.tid);

	if (sample_id_all)
		swap_sample_id_all(event, &event->itrace_start + 1);
}

650 651 652 653 654 655 656 657 658 659 660 661 662
static void perf_event__switch_swap(union perf_event *event, bool sample_id_all)
{
	if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
		event->context_switch.next_prev_pid =
				bswap_32(event->context_switch.next_prev_pid);
		event->context_switch.next_prev_tid =
				bswap_32(event->context_switch.next_prev_tid);
	}

	if (sample_id_all)
		swap_sample_id_all(event, &event->context_switch + 1);
}

663 664 665 666 667 668 669 670 671 672 673
static void perf_event__throttle_swap(union perf_event *event,
				      bool sample_id_all)
{
	event->throttle.time	  = bswap_64(event->throttle.time);
	event->throttle.id	  = bswap_64(event->throttle.id);
	event->throttle.stream_id = bswap_64(event->throttle.stream_id);

	if (sample_id_all)
		swap_sample_id_all(event, &event->throttle + 1);
}

674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693
static void perf_event__namespaces_swap(union perf_event *event,
					bool sample_id_all)
{
	u64 i;

	event->namespaces.pid		= bswap_32(event->namespaces.pid);
	event->namespaces.tid		= bswap_32(event->namespaces.tid);
	event->namespaces.nr_namespaces	= bswap_64(event->namespaces.nr_namespaces);

	for (i = 0; i < event->namespaces.nr_namespaces; i++) {
		struct perf_ns_link_info *ns = &event->namespaces.link_info[i];

		ns->dev = bswap_64(ns->dev);
		ns->ino = bswap_64(ns->ino);
	}

	if (sample_id_all)
		swap_sample_id_all(event, &event->namespaces.link_info[i]);
}

694 695 696 697 698 699 700 701 702 703
static u8 revbyte(u8 b)
{
	int rev = (b >> 4) | ((b & 0xf) << 4);
	rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
	rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
	return (u8) rev;
}

/*
 * XXX this is hack in attempt to carry flags bitfield
704
 * through endian village. ABI says:
705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725
 *
 * Bit-fields are allocated from right to left (least to most significant)
 * on little-endian implementations and from left to right (most to least
 * significant) on big-endian implementations.
 *
 * The above seems to be byte specific, so we need to reverse each
 * byte of the bitfield. 'Internet' also says this might be implementation
 * specific and we probably need proper fix and carry perf_event_attr
 * bitfield flags in separate data file FEAT_ section. Thought this seems
 * to work for now.
 */
static void swap_bitfield(u8 *p, unsigned len)
{
	unsigned i;

	for (i = 0; i < len; i++) {
		*p = revbyte(*p);
		p++;
	}
}

726 727 728 729 730
/* exported for swapping attributes in file header */
void perf_event__attr_swap(struct perf_event_attr *attr)
{
	attr->type		= bswap_32(attr->type);
	attr->size		= bswap_32(attr->size);
731 732 733 734 735 736 737 738 739

#define bswap_safe(f, n) 					\
	(attr->size > (offsetof(struct perf_event_attr, f) + 	\
		       sizeof(attr->f) * (n)))
#define bswap_field(f, sz) 			\
do { 						\
	if (bswap_safe(f, 0))			\
		attr->f = bswap_##sz(attr->f);	\
} while(0)
740
#define bswap_field_16(f) bswap_field(f, 16)
741 742 743 744 745 746 747 748 749 750 751 752 753 754 755
#define bswap_field_32(f) bswap_field(f, 32)
#define bswap_field_64(f) bswap_field(f, 64)

	bswap_field_64(config);
	bswap_field_64(sample_period);
	bswap_field_64(sample_type);
	bswap_field_64(read_format);
	bswap_field_32(wakeup_events);
	bswap_field_32(bp_type);
	bswap_field_64(bp_addr);
	bswap_field_64(bp_len);
	bswap_field_64(branch_sample_type);
	bswap_field_64(sample_regs_user);
	bswap_field_32(sample_stack_user);
	bswap_field_32(aux_watermark);
756
	bswap_field_16(sample_max_stack);
757
	bswap_field_32(aux_sample_size);
758 759 760 761 762 763 764 765 766 767 768 769

	/*
	 * After read_format are bitfields. Check read_format because
	 * we are unable to use offsetof on bitfield.
	 */
	if (bswap_safe(read_format, 1))
		swap_bitfield((u8 *) (&attr->read_format + 1),
			      sizeof(u64));
#undef bswap_field_64
#undef bswap_field_32
#undef bswap_field
#undef bswap_safe
770 771
}

772
static void perf_event__hdr_attr_swap(union perf_event *event,
773
				      bool sample_id_all __maybe_unused)
774 775 776
{
	size_t size;

777
	perf_event__attr_swap(&event->attr.attr);
778

779 780 781
	size = event->header.size;
	size -= (void *)&event->attr.id - (void *)event;
	mem_bswap_64(event->attr.id, size);
782 783
}

784 785 786 787 788 789 790
static void perf_event__event_update_swap(union perf_event *event,
					  bool sample_id_all __maybe_unused)
{
	event->event_update.type = bswap_64(event->event_update.type);
	event->event_update.id   = bswap_64(event->event_update.id);
}

791
static void perf_event__event_type_swap(union perf_event *event,
792
					bool sample_id_all __maybe_unused)
793
{
794 795
	event->event_type.event_type.event_id =
		bswap_64(event->event_type.event_type.event_id);
796 797
}

798
static void perf_event__tracing_data_swap(union perf_event *event,
799
					  bool sample_id_all __maybe_unused)
800
{
801
	event->tracing_data.size = bswap_32(event->tracing_data.size);
802 803
}

804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826
static void perf_event__auxtrace_info_swap(union perf_event *event,
					   bool sample_id_all __maybe_unused)
{
	size_t size;

	event->auxtrace_info.type = bswap_32(event->auxtrace_info.type);

	size = event->header.size;
	size -= (void *)&event->auxtrace_info.priv - (void *)event;
	mem_bswap_64(event->auxtrace_info.priv, size);
}

static void perf_event__auxtrace_swap(union perf_event *event,
				      bool sample_id_all __maybe_unused)
{
	event->auxtrace.size      = bswap_64(event->auxtrace.size);
	event->auxtrace.offset    = bswap_64(event->auxtrace.offset);
	event->auxtrace.reference = bswap_64(event->auxtrace.reference);
	event->auxtrace.idx       = bswap_32(event->auxtrace.idx);
	event->auxtrace.tid       = bswap_32(event->auxtrace.tid);
	event->auxtrace.cpu       = bswap_32(event->auxtrace.cpu);
}

827 828 829 830 831 832 833 834
static void perf_event__auxtrace_error_swap(union perf_event *event,
					    bool sample_id_all __maybe_unused)
{
	event->auxtrace_error.type = bswap_32(event->auxtrace_error.type);
	event->auxtrace_error.code = bswap_32(event->auxtrace_error.code);
	event->auxtrace_error.cpu  = bswap_32(event->auxtrace_error.cpu);
	event->auxtrace_error.pid  = bswap_32(event->auxtrace_error.pid);
	event->auxtrace_error.tid  = bswap_32(event->auxtrace_error.tid);
835
	event->auxtrace_error.fmt  = bswap_32(event->auxtrace_error.fmt);
836
	event->auxtrace_error.ip   = bswap_64(event->auxtrace_error.ip);
837 838
	if (event->auxtrace_error.fmt)
		event->auxtrace_error.time = bswap_64(event->auxtrace_error.time);
839 840
}

841 842 843 844 845 846 847 848 849 850 851
static void perf_event__thread_map_swap(union perf_event *event,
					bool sample_id_all __maybe_unused)
{
	unsigned i;

	event->thread_map.nr = bswap_64(event->thread_map.nr);

	for (i = 0; i < event->thread_map.nr; i++)
		event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid);
}

852 853 854
static void perf_event__cpu_map_swap(union perf_event *event,
				     bool sample_id_all __maybe_unused)
{
855
	struct perf_record_cpu_map_data *data = &event->cpu_map.data;
856
	struct cpu_map_entries *cpus;
857
	struct perf_record_record_cpu_map *mask;
858 859 860 861 862 863 864 865 866 867 868 869 870 871
	unsigned i;

	data->type = bswap_64(data->type);

	switch (data->type) {
	case PERF_CPU_MAP__CPUS:
		cpus = (struct cpu_map_entries *)data->data;

		cpus->nr = bswap_16(cpus->nr);

		for (i = 0; i < cpus->nr; i++)
			cpus->cpu[i] = bswap_16(cpus->cpu[i]);
		break;
	case PERF_CPU_MAP__MASK:
872
		mask = (struct perf_record_record_cpu_map *)data->data;
873 874 875 876 877 878 879 880 881 882 883 884 885 886 887

		mask->nr = bswap_16(mask->nr);
		mask->long_size = bswap_16(mask->long_size);

		switch (mask->long_size) {
		case 4: mem_bswap_32(&mask->mask, mask->nr); break;
		case 8: mem_bswap_64(&mask->mask, mask->nr); break;
		default:
			pr_err("cpu_map swap: unsupported long size\n");
		}
	default:
		break;
	}
}

888 889 890 891 892 893 894 895 896 897
static void perf_event__stat_config_swap(union perf_event *event,
					 bool sample_id_all __maybe_unused)
{
	u64 size;

	size  = event->stat_config.nr * sizeof(event->stat_config.data[0]);
	size += 1; /* nr item itself */
	mem_bswap_64(&event->stat_config.nr, size);
}

J
Jiri Olsa 已提交
898 899 900 901 902 903 904 905 906 907 908
static void perf_event__stat_swap(union perf_event *event,
				  bool sample_id_all __maybe_unused)
{
	event->stat.id     = bswap_64(event->stat.id);
	event->stat.thread = bswap_32(event->stat.thread);
	event->stat.cpu    = bswap_32(event->stat.cpu);
	event->stat.val    = bswap_64(event->stat.val);
	event->stat.ena    = bswap_64(event->stat.ena);
	event->stat.run    = bswap_64(event->stat.run);
}

909 910 911 912 913 914 915
static void perf_event__stat_round_swap(union perf_event *event,
					bool sample_id_all __maybe_unused)
{
	event->stat_round.type = bswap_64(event->stat_round.type);
	event->stat_round.time = bswap_64(event->stat_round.time);
}

916 917
typedef void (*perf_event__swap_op)(union perf_event *event,
				    bool sample_id_all);
918

919 920
static perf_event__swap_op perf_event__swap_ops[] = {
	[PERF_RECORD_MMAP]		  = perf_event__mmap_swap,
921
	[PERF_RECORD_MMAP2]		  = perf_event__mmap2_swap,
922 923 924 925 926
	[PERF_RECORD_COMM]		  = perf_event__comm_swap,
	[PERF_RECORD_FORK]		  = perf_event__task_swap,
	[PERF_RECORD_EXIT]		  = perf_event__task_swap,
	[PERF_RECORD_LOST]		  = perf_event__all64_swap,
	[PERF_RECORD_READ]		  = perf_event__read_swap,
927 928
	[PERF_RECORD_THROTTLE]		  = perf_event__throttle_swap,
	[PERF_RECORD_UNTHROTTLE]	  = perf_event__throttle_swap,
929
	[PERF_RECORD_SAMPLE]		  = perf_event__all64_swap,
930
	[PERF_RECORD_AUX]		  = perf_event__aux_swap,
931
	[PERF_RECORD_ITRACE_START]	  = perf_event__itrace_start_swap,
932
	[PERF_RECORD_LOST_SAMPLES]	  = perf_event__all64_swap,
933 934
	[PERF_RECORD_SWITCH]		  = perf_event__switch_swap,
	[PERF_RECORD_SWITCH_CPU_WIDE]	  = perf_event__switch_swap,
935
	[PERF_RECORD_NAMESPACES]	  = perf_event__namespaces_swap,
936
	[PERF_RECORD_HEADER_ATTR]	  = perf_event__hdr_attr_swap,
937 938 939
	[PERF_RECORD_HEADER_EVENT_TYPE]	  = perf_event__event_type_swap,
	[PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
	[PERF_RECORD_HEADER_BUILD_ID]	  = NULL,
A
Adrian Hunter 已提交
940
	[PERF_RECORD_ID_INDEX]		  = perf_event__all64_swap,
941 942
	[PERF_RECORD_AUXTRACE_INFO]	  = perf_event__auxtrace_info_swap,
	[PERF_RECORD_AUXTRACE]		  = perf_event__auxtrace_swap,
943
	[PERF_RECORD_AUXTRACE_ERROR]	  = perf_event__auxtrace_error_swap,
944
	[PERF_RECORD_THREAD_MAP]	  = perf_event__thread_map_swap,
945
	[PERF_RECORD_CPU_MAP]		  = perf_event__cpu_map_swap,
946
	[PERF_RECORD_STAT_CONFIG]	  = perf_event__stat_config_swap,
J
Jiri Olsa 已提交
947
	[PERF_RECORD_STAT]		  = perf_event__stat_swap,
948
	[PERF_RECORD_STAT_ROUND]	  = perf_event__stat_round_swap,
949
	[PERF_RECORD_EVENT_UPDATE]	  = perf_event__event_update_swap,
950
	[PERF_RECORD_TIME_CONV]		  = perf_event__all64_swap,
951
	[PERF_RECORD_HEADER_MAX]	  = NULL,
952 953
};

954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992
/*
 * When perf record finishes a pass on every buffers, it records this pseudo
 * event.
 * We record the max timestamp t found in the pass n.
 * Assuming these timestamps are monotonic across cpus, we know that if
 * a buffer still has events with timestamps below t, they will be all
 * available and then read in the pass n + 1.
 * Hence when we start to read the pass n + 2, we can safely flush every
 * events with timestamps below t.
 *
 *    ============ PASS n =================
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          1          |         2
 *          2          |         3
 *          -          |         4  <--- max recorded
 *
 *    ============ PASS n + 1 ==============
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          3          |         5
 *          4          |         6
 *          5          |         7 <---- max recorded
 *
 *      Flush every events below timestamp 4
 *
 *    ============ PASS n + 2 ==============
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          6          |         8
 *          7          |         9
 *          -          |         10
 *
 *      Flush every events below timestamp 7
 *      etc...
 */
993
static int process_finished_round(struct perf_tool *tool __maybe_unused,
994
				  union perf_event *event __maybe_unused,
995
				  struct ordered_events *oe)
996
{
997 998
	if (dump_trace)
		fprintf(stdout, "\n");
999
	return ordered_events__flush(oe, OE_FLUSH__ROUND);
1000 1001
}

1002
int perf_session__queue_event(struct perf_session *s, union perf_event *event,
1003
			      u64 timestamp, u64 file_offset)
1004
{
1005
	return ordered_events__queue(&s->ordered_events, event, timestamp, file_offset);
1006
}
1007

K
Kan Liang 已提交
1008
static void callchain__lbr_callstack_printf(struct perf_sample *sample)
1009
{
K
Kan Liang 已提交
1010 1011
	struct ip_callchain *callchain = sample->callchain;
	struct branch_stack *lbr_stack = sample->branch_stack;
1012
	struct branch_entry *entries = perf_sample__branch_entries(sample);
K
Kan Liang 已提交
1013
	u64 kernel_callchain_nr = callchain->nr;
1014
	unsigned int i;
1015

K
Kan Liang 已提交
1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048
	for (i = 0; i < kernel_callchain_nr; i++) {
		if (callchain->ips[i] == PERF_CONTEXT_USER)
			break;
	}

	if ((i != kernel_callchain_nr) && lbr_stack->nr) {
		u64 total_nr;
		/*
		 * LBR callstack can only get user call chain,
		 * i is kernel call chain number,
		 * 1 is PERF_CONTEXT_USER.
		 *
		 * The user call chain is stored in LBR registers.
		 * LBR are pair registers. The caller is stored
		 * in "from" register, while the callee is stored
		 * in "to" register.
		 * For example, there is a call stack
		 * "A"->"B"->"C"->"D".
		 * The LBR registers will recorde like
		 * "C"->"D", "B"->"C", "A"->"B".
		 * So only the first "to" register and all "from"
		 * registers are needed to construct the whole stack.
		 */
		total_nr = i + 1 + lbr_stack->nr + 1;
		kernel_callchain_nr = i + 1;

		printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr);

		for (i = 0; i < kernel_callchain_nr; i++)
			printf("..... %2d: %016" PRIx64 "\n",
			       i, callchain->ips[i]);

		printf("..... %2d: %016" PRIx64 "\n",
1049
		       (int)(kernel_callchain_nr), entries[0].to);
K
Kan Liang 已提交
1050 1051
		for (i = 0; i < lbr_stack->nr; i++)
			printf("..... %2d: %016" PRIx64 "\n",
1052
			       (int)(i + kernel_callchain_nr + 1), entries[i].from);
K
Kan Liang 已提交
1053 1054 1055
	}
}

1056
static void callchain__printf(struct evsel *evsel,
K
Kan Liang 已提交
1057 1058 1059 1060 1061
			      struct perf_sample *sample)
{
	unsigned int i;
	struct ip_callchain *callchain = sample->callchain;

1062
	if (evsel__has_branch_callstack(evsel))
K
Kan Liang 已提交
1063 1064 1065
		callchain__lbr_callstack_printf(sample);

	printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
1066

K
Kan Liang 已提交
1067
	for (i = 0; i < callchain->nr; i++)
1068
		printf("..... %2d: %016" PRIx64 "\n",
K
Kan Liang 已提交
1069
		       i, callchain->ips[i]);
1070 1071
}

1072
static void branch_stack__printf(struct perf_sample *sample, bool callstack)
1073
{
1074
	struct branch_entry *entries = perf_sample__branch_entries(sample);
1075 1076
	uint64_t i;

1077 1078 1079
	printf("%s: nr:%" PRIu64 "\n",
		!callstack ? "... branch stack" : "... branch callstack",
		sample->branch_stack->nr);
1080

1081
	for (i = 0; i < sample->branch_stack->nr; i++) {
1082
		struct branch_entry *e = &entries[i];
1083

1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096
		if (!callstack) {
			printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x\n",
				i, e->from, e->to,
				(unsigned short)e->flags.cycles,
				e->flags.mispred ? "M" : " ",
				e->flags.predicted ? "P" : " ",
				e->flags.abort ? "A" : " ",
				e->flags.in_tx ? "T" : " ",
				(unsigned)e->flags.reserved);
		} else {
			printf("..... %2"PRIu64": %016" PRIx64 "\n",
				i, i > 0 ? e->from : e->to);
		}
1097
	}
1098 1099
}

1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111
static void regs_dump__printf(u64 mask, u64 *regs)
{
	unsigned rid, i = 0;

	for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
		u64 val = regs[i++];

		printf(".... %-5s 0x%" PRIx64 "\n",
		       perf_reg_name(rid), val);
	}
}

1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137
static const char *regs_abi[] = {
	[PERF_SAMPLE_REGS_ABI_NONE] = "none",
	[PERF_SAMPLE_REGS_ABI_32] = "32-bit",
	[PERF_SAMPLE_REGS_ABI_64] = "64-bit",
};

static inline const char *regs_dump_abi(struct regs_dump *d)
{
	if (d->abi > PERF_SAMPLE_REGS_ABI_64)
		return "unknown";

	return regs_abi[d->abi];
}

static void regs__printf(const char *type, struct regs_dump *regs)
{
	u64 mask = regs->mask;

	printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n",
	       type,
	       mask,
	       regs_dump_abi(regs));

	regs_dump__printf(mask, regs->regs);
}

1138
static void regs_user__printf(struct perf_sample *sample)
1139 1140 1141
{
	struct regs_dump *user_regs = &sample->user_regs;

1142 1143 1144 1145 1146 1147 1148 1149 1150 1151
	if (user_regs->regs)
		regs__printf("user", user_regs);
}

static void regs_intr__printf(struct perf_sample *sample)
{
	struct regs_dump *intr_regs = &sample->intr_regs;

	if (intr_regs->regs)
		regs__printf("intr", intr_regs);
1152 1153 1154 1155 1156 1157 1158 1159
}

static void stack_user__printf(struct stack_dump *dump)
{
	printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
	       dump->size, dump->offset);
}

1160
static void perf_evlist__print_tstamp(struct evlist *evlist,
1161
				       union perf_event *event,
1162
				       struct perf_sample *sample)
1163
{
1164
	u64 sample_type = __perf_evlist__combined_sample_type(evlist);
1165

1166
	if (event->header.type != PERF_RECORD_SAMPLE &&
1167
	    !perf_evlist__sample_id_all(evlist)) {
1168 1169 1170 1171
		fputs("-1 -1 ", stdout);
		return;
	}

1172
	if ((sample_type & PERF_SAMPLE_CPU))
1173 1174
		printf("%u ", sample->cpu);

1175
	if (sample_type & PERF_SAMPLE_TIME)
1176
		printf("%" PRIu64 " ", sample->time);
1177 1178
}

1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208
static void sample_read__printf(struct perf_sample *sample, u64 read_format)
{
	printf("... sample_read:\n");

	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
		printf("...... time enabled %016" PRIx64 "\n",
		       sample->read.time_enabled);

	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
		printf("...... time running %016" PRIx64 "\n",
		       sample->read.time_running);

	if (read_format & PERF_FORMAT_GROUP) {
		u64 i;

		printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);

		for (i = 0; i < sample->read.group.nr; i++) {
			struct sample_read_value *value;

			value = &sample->read.group.values[i];
			printf("..... id %016" PRIx64
			       ", value %016" PRIx64 "\n",
			       value->id, value->value);
		}
	} else
		printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
			sample->read.one.id, sample->read.one.value);
}

1209
static void dump_event(struct evlist *evlist, union perf_event *event,
1210
		       u64 file_offset, struct perf_sample *sample)
1211 1212 1213 1214
{
	if (!dump_trace)
		return;

1215 1216
	printf("\n%#" PRIx64 " [%#x]: event: %d\n",
	       file_offset, event->header.size, event->header.type);
1217 1218

	trace_event(event);
1219 1220
	if (event->header.type == PERF_RECORD_SAMPLE && evlist->trace_event_sample_raw)
		evlist->trace_event_sample_raw(evlist, event, sample);
1221 1222

	if (sample)
1223
		perf_evlist__print_tstamp(evlist, event, sample);
1224

1225
	printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
1226
	       event->header.size, perf_event__name(event->header.type));
1227 1228
}

1229
static void dump_sample(struct evsel *evsel, union perf_event *event,
1230
			struct perf_sample *sample)
1231
{
1232 1233
	u64 sample_type;

1234 1235 1236
	if (!dump_trace)
		return;

1237
	printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
1238
	       event->header.misc, sample->pid, sample->tid, sample->ip,
1239
	       sample->period, sample->addr);
1240

1241
	sample_type = evsel->core.attr.sample_type;
1242

1243
	if (evsel__has_callchain(evsel))
K
Kan Liang 已提交
1244
		callchain__printf(evsel, sample);
1245

1246
	if (evsel__has_br_stack(evsel))
1247
		branch_stack__printf(sample, evsel__has_branch_callstack(evsel));
1248 1249

	if (sample_type & PERF_SAMPLE_REGS_USER)
1250
		regs_user__printf(sample);
1251

1252 1253 1254
	if (sample_type & PERF_SAMPLE_REGS_INTR)
		regs_intr__printf(sample);

1255 1256
	if (sample_type & PERF_SAMPLE_STACK_USER)
		stack_user__printf(&sample->user_stack);
1257 1258 1259

	if (sample_type & PERF_SAMPLE_WEIGHT)
		printf("... weight: %" PRIu64 "\n", sample->weight);
1260 1261 1262

	if (sample_type & PERF_SAMPLE_DATA_SRC)
		printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
1263

1264 1265 1266
	if (sample_type & PERF_SAMPLE_PHYS_ADDR)
		printf(" .. phys_addr: 0x%"PRIx64"\n", sample->phys_addr);

1267 1268 1269
	if (sample_type & PERF_SAMPLE_TRANSACTION)
		printf("... transaction: %" PRIx64 "\n", sample->transaction);

1270
	if (sample_type & PERF_SAMPLE_READ)
1271
		sample_read__printf(sample, evsel->core.attr.read_format);
1272 1273
}

1274
static void dump_read(struct evsel *evsel, union perf_event *event)
J
Jiri Olsa 已提交
1275
{
1276
	struct perf_record_read *read_event = &event->read;
J
Jiri Olsa 已提交
1277 1278 1279 1280 1281
	u64 read_format;

	if (!dump_trace)
		return;

1282
	printf(": %d %d %s %" PRI_lu64 "\n", event->read.pid, event->read.tid,
1283
	       evsel__name(evsel), event->read.value);
J
Jiri Olsa 已提交
1284

1285 1286 1287
	if (!evsel)
		return;

1288
	read_format = evsel->core.attr.read_format;
J
Jiri Olsa 已提交
1289 1290

	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1291
		printf("... time enabled : %" PRI_lu64 "\n", read_event->time_enabled);
J
Jiri Olsa 已提交
1292 1293

	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1294
		printf("... time running : %" PRI_lu64 "\n", read_event->time_running);
J
Jiri Olsa 已提交
1295 1296

	if (read_format & PERF_FORMAT_ID)
1297
		printf("... id           : %" PRI_lu64 "\n", read_event->id);
J
Jiri Olsa 已提交
1298 1299
}

1300
static struct machine *machines__find_for_cpumode(struct machines *machines,
1301 1302
					       union perf_event *event,
					       struct perf_sample *sample)
1303
{
1304
	struct machine *machine;
1305

1306
	if (perf_guest &&
1307 1308
	    ((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
	     (sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) {
1309 1310
		u32 pid;

1311 1312
		if (event->header.type == PERF_RECORD_MMAP
		    || event->header.type == PERF_RECORD_MMAP2)
1313 1314
			pid = event->mmap.pid;
		else
1315
			pid = sample->pid;
1316

1317
		machine = machines__find(machines, pid);
1318
		if (!machine)
1319
			machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
1320
		return machine;
1321
	}
1322

1323
	return &machines->host;
1324 1325
}

1326
static int deliver_sample_value(struct evlist *evlist,
1327 1328 1329 1330 1331 1332
				struct perf_tool *tool,
				union perf_event *event,
				struct perf_sample *sample,
				struct sample_read_value *v,
				struct machine *machine)
{
1333
	struct perf_sample_id *sid = perf_evlist__id2sid(evlist, v->id);
1334
	struct evsel *evsel;
1335 1336 1337 1338 1339 1340 1341 1342

	if (sid) {
		sample->id     = v->id;
		sample->period = v->value - sid->period;
		sid->period    = v->value;
	}

	if (!sid || sid->evsel == NULL) {
1343
		++evlist->stats.nr_unknown_id;
1344 1345 1346
		return 0;
	}

1347 1348 1349 1350 1351 1352 1353
	/*
	 * There's no reason to deliver sample
	 * for zero period, bail out.
	 */
	if (!sample->period)
		return 0;

1354 1355
	evsel = container_of(sid->evsel, struct evsel, core);
	return tool->sample(tool, event, sample, evsel, machine);
1356 1357
}

1358
static int deliver_sample_group(struct evlist *evlist,
1359 1360 1361 1362 1363 1364 1365 1366 1367
				struct perf_tool *tool,
				union  perf_event *event,
				struct perf_sample *sample,
				struct machine *machine)
{
	int ret = -EINVAL;
	u64 i;

	for (i = 0; i < sample->read.group.nr; i++) {
1368
		ret = deliver_sample_value(evlist, tool, event, sample,
1369 1370 1371 1372 1373 1374 1375 1376 1377 1378
					   &sample->read.group.values[i],
					   machine);
		if (ret)
			break;
	}

	return ret;
}

static int
1379
 perf_evlist__deliver_sample(struct evlist *evlist,
1380 1381 1382
			     struct perf_tool *tool,
			     union  perf_event *event,
			     struct perf_sample *sample,
1383
			     struct evsel *evsel,
1384 1385 1386
			     struct machine *machine)
{
	/* We know evsel != NULL. */
1387 1388
	u64 sample_type = evsel->core.attr.sample_type;
	u64 read_format = evsel->core.attr.read_format;
1389

1390
	/* Standard sample delivery. */
1391 1392 1393 1394 1395
	if (!(sample_type & PERF_SAMPLE_READ))
		return tool->sample(tool, event, sample, evsel, machine);

	/* For PERF_SAMPLE_READ we have either single or group mode. */
	if (read_format & PERF_FORMAT_GROUP)
1396
		return deliver_sample_group(evlist, tool, event, sample,
1397 1398
					    machine);
	else
1399
		return deliver_sample_value(evlist, tool, event, sample,
1400 1401 1402
					    &sample->read.one, machine);
}

1403
static int machines__deliver_event(struct machines *machines,
1404
				   struct evlist *evlist,
1405 1406 1407
				   union perf_event *event,
				   struct perf_sample *sample,
				   struct perf_tool *tool, u64 file_offset)
1408
{
1409
	struct evsel *evsel;
1410
	struct machine *machine;
1411

1412
	dump_event(evlist, event, file_offset, sample);
1413

1414
	evsel = perf_evlist__id2evsel(evlist, sample->id);
1415

1416
	machine = machines__find_for_cpumode(machines, event, sample);
1417

1418 1419
	switch (event->header.type) {
	case PERF_RECORD_SAMPLE:
1420
		if (evsel == NULL) {
1421
			++evlist->stats.nr_unknown_id;
1422
			return 0;
1423
		}
1424
		dump_sample(evsel, event, sample);
1425
		if (machine == NULL) {
1426
			++evlist->stats.nr_unprocessable_samples;
1427
			return 0;
1428
		}
1429
		return perf_evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
1430
	case PERF_RECORD_MMAP:
1431
		return tool->mmap(tool, event, sample, machine);
1432
	case PERF_RECORD_MMAP2:
1433 1434
		if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT)
			++evlist->stats.nr_proc_map_timeout;
1435
		return tool->mmap2(tool, event, sample, machine);
1436
	case PERF_RECORD_COMM:
1437
		return tool->comm(tool, event, sample, machine);
1438 1439
	case PERF_RECORD_NAMESPACES:
		return tool->namespaces(tool, event, sample, machine);
1440 1441
	case PERF_RECORD_CGROUP:
		return tool->cgroup(tool, event, sample, machine);
1442
	case PERF_RECORD_FORK:
1443
		return tool->fork(tool, event, sample, machine);
1444
	case PERF_RECORD_EXIT:
1445
		return tool->exit(tool, event, sample, machine);
1446
	case PERF_RECORD_LOST:
1447
		if (tool->lost == perf_event__process_lost)
1448
			evlist->stats.total_lost += event->lost.lost;
1449
		return tool->lost(tool, event, sample, machine);
1450 1451 1452 1453
	case PERF_RECORD_LOST_SAMPLES:
		if (tool->lost_samples == perf_event__process_lost_samples)
			evlist->stats.total_lost_samples += event->lost_samples.lost;
		return tool->lost_samples(tool, event, sample, machine);
1454
	case PERF_RECORD_READ:
J
Jiri Olsa 已提交
1455
		dump_read(evsel, event);
1456
		return tool->read(tool, event, sample, evsel, machine);
1457
	case PERF_RECORD_THROTTLE:
1458
		return tool->throttle(tool, event, sample, machine);
1459
	case PERF_RECORD_UNTHROTTLE:
1460
		return tool->unthrottle(tool, event, sample, machine);
1461
	case PERF_RECORD_AUX:
1462 1463 1464 1465 1466 1467
		if (tool->aux == perf_event__process_aux) {
			if (event->aux.flags & PERF_AUX_FLAG_TRUNCATED)
				evlist->stats.total_aux_lost += 1;
			if (event->aux.flags & PERF_AUX_FLAG_PARTIAL)
				evlist->stats.total_aux_partial += 1;
		}
1468
		return tool->aux(tool, event, sample, machine);
1469 1470
	case PERF_RECORD_ITRACE_START:
		return tool->itrace_start(tool, event, sample, machine);
1471 1472 1473
	case PERF_RECORD_SWITCH:
	case PERF_RECORD_SWITCH_CPU_WIDE:
		return tool->context_switch(tool, event, sample, machine);
1474 1475
	case PERF_RECORD_KSYMBOL:
		return tool->ksymbol(tool, event, sample, machine);
1476
	case PERF_RECORD_BPF_EVENT:
1477
		return tool->bpf(tool, event, sample, machine);
1478
	default:
1479
		++evlist->stats.nr_unknown_events;
1480 1481 1482 1483
		return -1;
	}
}

1484 1485 1486 1487 1488
static int perf_session__deliver_event(struct perf_session *session,
				       union perf_event *event,
				       struct perf_tool *tool,
				       u64 file_offset)
{
1489
	struct perf_sample sample;
1490 1491
	int ret;

1492 1493 1494 1495 1496 1497 1498
	ret = perf_evlist__parse_sample(session->evlist, event, &sample);
	if (ret) {
		pr_err("Can't parse sample, err = %d\n", ret);
		return ret;
	}

	ret = auxtrace__process_event(session, event, &sample, tool);
1499 1500 1501 1502 1503
	if (ret < 0)
		return ret;
	if (ret > 0)
		return 0;

1504 1505 1506 1507 1508 1509 1510
	ret = machines__deliver_event(&session->machines, session->evlist,
				      event, &sample, tool, file_offset);

	if (dump_trace && sample.aux_sample.size)
		auxtrace__dump_auxtrace_sample(session, &sample);

	return ret;
1511 1512
}

1513 1514 1515
static s64 perf_session__process_user_event(struct perf_session *session,
					    union perf_event *event,
					    u64 file_offset)
1516
{
1517
	struct ordered_events *oe = &session->ordered_events;
1518
	struct perf_tool *tool = session->tool;
1519
	struct perf_sample sample = { .time = 0, };
1520
	int fd = perf_data__fd(session->data);
1521 1522
	int err;

1523 1524 1525
	if (event->header.type != PERF_RECORD_COMPRESSED ||
	    tool->compressed == perf_session__process_compressed_event_stub)
		dump_event(session->evlist, event, file_offset, &sample);
1526

1527
	/* These events are processed right away */
1528
	switch (event->header.type) {
1529
	case PERF_RECORD_HEADER_ATTR:
1530
		err = tool->attr(tool, event, &session->evlist);
1531
		if (err == 0) {
1532
			perf_session__set_id_hdr_size(session);
1533 1534
			perf_session__set_comm_exec(session);
		}
1535
		return err;
1536 1537
	case PERF_RECORD_EVENT_UPDATE:
		return tool->event_update(tool, event, &session->evlist);
1538 1539 1540 1541 1542 1543
	case PERF_RECORD_HEADER_EVENT_TYPE:
		/*
		 * Depreceated, but we need to handle it for sake
		 * of old data files create in pipe mode.
		 */
		return 0;
1544
	case PERF_RECORD_HEADER_TRACING_DATA:
1545 1546 1547 1548 1549 1550 1551
		/*
		 * Setup for reading amidst mmap, but only when we
		 * are in 'file' mode. The 'pipe' fd is in proper
		 * place already.
		 */
		if (!perf_data__is_pipe(session->data))
			lseek(fd, file_offset, SEEK_SET);
1552
		return tool->tracing_data(session, event);
1553
	case PERF_RECORD_HEADER_BUILD_ID:
1554
		return tool->build_id(session, event);
1555
	case PERF_RECORD_FINISHED_ROUND:
1556
		return tool->finished_round(tool, event, oe);
A
Adrian Hunter 已提交
1557
	case PERF_RECORD_ID_INDEX:
1558
		return tool->id_index(session, event);
1559
	case PERF_RECORD_AUXTRACE_INFO:
1560
		return tool->auxtrace_info(session, event);
1561 1562 1563
	case PERF_RECORD_AUXTRACE:
		/* setup for reading amidst mmap */
		lseek(fd, file_offset + event->header.size, SEEK_SET);
1564
		return tool->auxtrace(session, event);
1565
	case PERF_RECORD_AUXTRACE_ERROR:
1566
		perf_session__auxtrace_error_inc(session, event);
1567
		return tool->auxtrace_error(session, event);
1568
	case PERF_RECORD_THREAD_MAP:
1569
		return tool->thread_map(session, event);
1570
	case PERF_RECORD_CPU_MAP:
1571
		return tool->cpu_map(session, event);
1572
	case PERF_RECORD_STAT_CONFIG:
1573
		return tool->stat_config(session, event);
J
Jiri Olsa 已提交
1574
	case PERF_RECORD_STAT:
1575
		return tool->stat(session, event);
1576
	case PERF_RECORD_STAT_ROUND:
1577
		return tool->stat_round(session, event);
1578 1579
	case PERF_RECORD_TIME_CONV:
		session->time_conv = event->time_conv;
1580
		return tool->time_conv(session, event);
1581
	case PERF_RECORD_HEADER_FEATURE:
1582
		return tool->feature(session, event);
1583 1584 1585 1586 1587
	case PERF_RECORD_COMPRESSED:
		err = tool->compressed(session, event, file_offset);
		if (err)
			dump_event(session->evlist, event, file_offset, &sample);
		return err;
1588
	default:
1589
		return -EINVAL;
1590
	}
1591 1592
}

1593 1594
int perf_session__deliver_synth_event(struct perf_session *session,
				      union perf_event *event,
1595
				      struct perf_sample *sample)
1596
{
1597
	struct evlist *evlist = session->evlist;
1598
	struct perf_tool *tool = session->tool;
1599 1600

	events_stats__inc(&evlist->stats, event->header.type);
1601 1602

	if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1603
		return perf_session__process_user_event(session, event, 0);
1604

1605
	return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0);
1606 1607
}

1608 1609 1610 1611 1612 1613 1614 1615 1616
static void event_swap(union perf_event *event, bool sample_id_all)
{
	perf_event__swap_op swap;

	swap = perf_event__swap_ops[event->header.type];
	if (swap)
		swap(event, sample_id_all);
}

1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631
int perf_session__peek_event(struct perf_session *session, off_t file_offset,
			     void *buf, size_t buf_sz,
			     union perf_event **event_ptr,
			     struct perf_sample *sample)
{
	union perf_event *event;
	size_t hdr_sz, rest;
	int fd;

	if (session->one_mmap && !session->header.needs_swap) {
		event = file_offset - session->one_mmap_offset +
			session->one_mmap_addr;
		goto out_parse_sample;
	}

1632
	if (perf_data__is_pipe(session->data))
1633 1634
		return -1;

1635
	fd = perf_data__fd(session->data);
1636 1637 1638 1639 1640 1641
	hdr_sz = sizeof(struct perf_event_header);

	if (buf_sz < hdr_sz)
		return -1;

	if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
1642
	    readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz)
1643 1644 1645 1646 1647 1648 1649
		return -1;

	event = (union perf_event *)buf;

	if (session->header.needs_swap)
		perf_event_header__bswap(&event->header);

1650
	if (event->header.size < hdr_sz || event->header.size > buf_sz)
1651 1652 1653 1654
		return -1;

	rest = event->header.size - hdr_sz;

1655
	if (readn(fd, buf, rest) != (ssize_t)rest)
1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671
		return -1;

	if (session->header.needs_swap)
		event_swap(event, perf_evlist__sample_id_all(session->evlist));

out_parse_sample:

	if (sample && event->header.type < PERF_RECORD_USER_TYPE_START &&
	    perf_evlist__parse_sample(session->evlist, event, sample))
		return -1;

	*event_ptr = event;

	return 0;
}

1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699
int perf_session__peek_events(struct perf_session *session, u64 offset,
			      u64 size, peek_events_cb_t cb, void *data)
{
	u64 max_offset = offset + size;
	char buf[PERF_SAMPLE_MAX_SIZE];
	union perf_event *event;
	int err;

	do {
		err = perf_session__peek_event(session, offset, buf,
					       PERF_SAMPLE_MAX_SIZE, &event,
					       NULL);
		if (err)
			return err;

		err = cb(session, event, offset, data);
		if (err)
			return err;

		offset += event->header.size;
		if (event->header.type == PERF_RECORD_AUXTRACE)
			offset += event->auxtrace.size;

	} while (offset < max_offset);

	return err;
}

1700
static s64 perf_session__process_event(struct perf_session *session,
1701
				       union perf_event *event, u64 file_offset)
1702
{
1703
	struct evlist *evlist = session->evlist;
1704
	struct perf_tool *tool = session->tool;
1705 1706
	int ret;

1707
	if (session->header.needs_swap)
1708
		event_swap(event, perf_evlist__sample_id_all(evlist));
1709 1710 1711 1712

	if (event->header.type >= PERF_RECORD_HEADER_MAX)
		return -EINVAL;

1713
	events_stats__inc(&evlist->stats, event->header.type);
1714 1715

	if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1716
		return perf_session__process_user_event(session, event, file_offset);
1717

1718
	if (tool->ordered_events) {
1719
		u64 timestamp = -1ULL;
1720 1721

		ret = perf_evlist__parse_sample_timestamp(evlist, event, &timestamp);
1722
		if (ret && ret != -1)
1723 1724 1725
			return ret;

		ret = perf_session__queue_event(session, event, timestamp, file_offset);
1726 1727 1728 1729
		if (ret != -ETIME)
			return ret;
	}

1730
	return perf_session__deliver_event(session, event, tool, file_offset);
1731 1732
}

1733
void perf_event_header__bswap(struct perf_event_header *hdr)
1734
{
1735 1736 1737
	hdr->type = bswap_32(hdr->type);
	hdr->misc = bswap_16(hdr->misc);
	hdr->size = bswap_16(hdr->size);
1738 1739
}

1740 1741
struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
{
1742
	return machine__findnew_thread(&session->machines.host, -1, pid);
1743 1744
}

1745 1746 1747 1748 1749 1750 1751
/*
 * Threads are identified by pid and tid, and the idle task has pid == tid == 0.
 * So here a single thread is created for that, but actually there is a separate
 * idle task per cpu, so there should be one 'struct thread' per cpu, but there
 * is only 1. That causes problems for some tools, requiring workarounds. For
 * example get_idle_thread() in builtin-sched.c, or thread_stack__per_cpu().
 */
1752
int perf_session__register_idle_thread(struct perf_session *session)
1753
{
1754
	struct thread *thread;
1755
	int err = 0;
1756

1757
	thread = machine__findnew_thread(&session->machines.host, 0, 0);
1758
	if (thread == NULL || thread__set_comm(thread, "swapper", 0)) {
1759
		pr_err("problem inserting idle task.\n");
1760
		err = -1;
1761 1762
	}

1763 1764 1765 1766 1767
	if (thread == NULL || thread__set_namespaces(thread, 0, NULL)) {
		pr_err("problem inserting idle task.\n");
		err = -1;
	}

1768 1769 1770
	/* machine__findnew_thread() got the thread, so put it */
	thread__put(thread);
	return err;
1771 1772
}

1773 1774 1775 1776
static void
perf_session__warn_order(const struct perf_session *session)
{
	const struct ordered_events *oe = &session->ordered_events;
1777
	struct evsel *evsel;
1778 1779 1780
	bool should_warn = true;

	evlist__for_each_entry(session->evlist, evsel) {
1781
		if (evsel->core.attr.write_backward)
1782 1783 1784 1785 1786 1787 1788 1789 1790
			should_warn = false;
	}

	if (!should_warn)
		return;
	if (oe->nr_unordered_events != 0)
		ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events);
}

1791
static void perf_session__warn_about_errors(const struct perf_session *session)
1792
{
1793 1794 1795
	const struct events_stats *stats = &session->evlist->stats;

	if (session->tool->lost == perf_event__process_lost &&
1796
	    stats->nr_events[PERF_RECORD_LOST] != 0) {
1797 1798
		ui__warning("Processed %d events and lost %d chunks!\n\n"
			    "Check IO/CPU overload!\n\n",
1799 1800
			    stats->nr_events[0],
			    stats->nr_events[PERF_RECORD_LOST]);
1801 1802
	}

1803 1804 1805 1806 1807 1808
	if (session->tool->lost_samples == perf_event__process_lost_samples) {
		double drop_rate;

		drop_rate = (double)stats->total_lost_samples /
			    (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples);
		if (drop_rate > 0.05) {
1809
			ui__warning("Processed %" PRIu64 " samples and lost %3.2f%%!\n\n",
1810 1811 1812 1813 1814
				    stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples,
				    drop_rate * 100.0);
		}
	}

1815 1816 1817 1818 1819 1820 1821
	if (session->tool->aux == perf_event__process_aux &&
	    stats->total_aux_lost != 0) {
		ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n",
			    stats->total_aux_lost,
			    stats->nr_events[PERF_RECORD_AUX]);
	}

1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838
	if (session->tool->aux == perf_event__process_aux &&
	    stats->total_aux_partial != 0) {
		bool vmm_exclusive = false;

		(void)sysfs__read_bool("module/kvm_intel/parameters/vmm_exclusive",
		                       &vmm_exclusive);

		ui__warning("AUX data had gaps in it %" PRIu64 " times out of %u!\n\n"
		            "Are you running a KVM guest in the background?%s\n\n",
			    stats->total_aux_partial,
			    stats->nr_events[PERF_RECORD_AUX],
			    vmm_exclusive ?
			    "\nReloading kvm_intel module with vmm_exclusive=0\n"
			    "will reduce the gaps to only guest's timeslices." :
			    "");
	}

1839
	if (stats->nr_unknown_events != 0) {
1840 1841 1842 1843 1844
		ui__warning("Found %u unknown events!\n\n"
			    "Is this an older tool processing a perf.data "
			    "file generated by a more recent tool?\n\n"
			    "If that is not the case, consider "
			    "reporting to linux-kernel@vger.kernel.org.\n\n",
1845
			    stats->nr_unknown_events);
1846 1847
	}

1848
	if (stats->nr_unknown_id != 0) {
1849
		ui__warning("%u samples with id not present in the header\n",
1850
			    stats->nr_unknown_id);
1851 1852
	}

1853
	if (stats->nr_invalid_chains != 0) {
1854 1855 1856
		ui__warning("Found invalid callchains!\n\n"
			    "%u out of %u events were discarded for this reason.\n\n"
			    "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1857 1858
			    stats->nr_invalid_chains,
			    stats->nr_events[PERF_RECORD_SAMPLE]);
1859
	}
1860

1861
	if (stats->nr_unprocessable_samples != 0) {
1862 1863
		ui__warning("%u unprocessable samples recorded.\n"
			    "Do you have a KVM guest running and not using 'perf kvm'?\n",
1864
			    stats->nr_unprocessable_samples);
1865
	}
1866

1867
	perf_session__warn_order(session);
1868 1869

	events_stats__auxtrace_error_warn(stats);
1870 1871 1872 1873 1874 1875

	if (stats->nr_proc_map_timeout != 0) {
		ui__warning("%d map information files for pre-existing threads were\n"
			    "not processed, if there are samples for addresses they\n"
			    "will not be resolved, you may find out which are these\n"
			    "threads by running with -v and redirecting the output\n"
1876 1877 1878
			    "to a file.\n"
			    "The time limit to process proc map is too short?\n"
			    "Increase it by --proc-map-timeout\n",
1879 1880
			    stats->nr_proc_map_timeout);
	}
1881 1882
}

1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895
static int perf_session__flush_thread_stack(struct thread *thread,
					    void *p __maybe_unused)
{
	return thread_stack__flush(thread);
}

static int perf_session__flush_thread_stacks(struct perf_session *session)
{
	return machines__for_each_thread(&session->machines,
					 perf_session__flush_thread_stack,
					 NULL);
}

1896 1897
volatile int session_done;

1898 1899
static int __perf_session__process_decomp_events(struct perf_session *session);

1900
static int __perf_session__process_pipe_events(struct perf_session *session)
1901
{
1902
	struct ordered_events *oe = &session->ordered_events;
1903
	struct perf_tool *tool = session->tool;
1904
	int fd = perf_data__fd(session->data);
1905 1906 1907
	union perf_event *event;
	uint32_t size, cur_size = 0;
	void *buf = NULL;
1908
	s64 skip = 0;
1909
	u64 head;
1910
	ssize_t err;
1911 1912
	void *p;

1913
	perf_tool__fill_defaults(tool);
1914 1915

	head = 0;
1916 1917 1918 1919 1920
	cur_size = sizeof(union perf_event);

	buf = malloc(cur_size);
	if (!buf)
		return -errno;
1921
	ordered_events__set_copy_on_queue(oe, true);
1922
more:
1923
	event = buf;
1924
	err = readn(fd, event, sizeof(struct perf_event_header));
1925 1926 1927 1928 1929 1930 1931 1932
	if (err <= 0) {
		if (err == 0)
			goto done;

		pr_err("failed to read event header\n");
		goto out_err;
	}

1933
	if (session->header.needs_swap)
1934
		perf_event_header__bswap(&event->header);
1935

1936
	size = event->header.size;
1937 1938 1939 1940
	if (size < sizeof(struct perf_event_header)) {
		pr_err("bad event header size\n");
		goto out_err;
	}
1941

1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952
	if (size > cur_size) {
		void *new = realloc(buf, size);
		if (!new) {
			pr_err("failed to allocate memory to read event\n");
			goto out_err;
		}
		buf = new;
		cur_size = size;
		event = buf;
	}
	p = event;
1953 1954
	p += sizeof(struct perf_event_header);

1955
	if (size - sizeof(struct perf_event_header)) {
1956
		err = readn(fd, p, size - sizeof(struct perf_event_header));
1957 1958 1959 1960 1961
		if (err <= 0) {
			if (err == 0) {
				pr_err("unexpected end of event stream\n");
				goto done;
			}
1962

1963 1964 1965
			pr_err("failed to read event data\n");
			goto out_err;
		}
1966 1967
	}

1968
	if ((skip = perf_session__process_event(session, event, head)) < 0) {
1969
		pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1970
		       head, event->header.size, event->header.type);
1971 1972
		err = -EINVAL;
		goto out_err;
1973 1974 1975 1976 1977 1978 1979
	}

	head += size;

	if (skip > 0)
		head += skip;

1980 1981 1982 1983
	err = __perf_session__process_decomp_events(session);
	if (err)
		goto out_err;

1984 1985 1986
	if (!session_done())
		goto more;
done:
1987
	/* do the final flush for ordered samples */
1988
	err = ordered_events__flush(oe, OE_FLUSH__FINAL);
1989 1990 1991
	if (err)
		goto out_err;
	err = auxtrace__flush_events(session, tool);
1992 1993 1994
	if (err)
		goto out_err;
	err = perf_session__flush_thread_stacks(session);
1995
out_err:
1996
	free(buf);
1997 1998
	if (!tool->no_warn)
		perf_session__warn_about_errors(session);
1999
	ordered_events__free(&session->ordered_events);
2000
	auxtrace__free_events(session);
2001 2002 2003
	return err;
}

2004
static union perf_event *
2005 2006
prefetch_event(char *buf, u64 head, size_t mmap_size,
	       bool needs_swap, union perf_event *error)
2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017
{
	union perf_event *event;

	/*
	 * Ensure we have enough space remaining to read
	 * the size of the event in the headers.
	 */
	if (head + sizeof(event->header) > mmap_size)
		return NULL;

	event = (union perf_event *)(buf + head);
2018 2019
	if (needs_swap)
		perf_event_header__bswap(&event->header);
2020

2021 2022 2023 2024 2025
	if (head + event->header.size <= mmap_size)
		return event;

	/* We're not fetching the event so swap back again */
	if (needs_swap)
2026 2027
		perf_event_header__bswap(&event->header);

2028 2029
	pr_debug("%s: head=%#" PRIx64 " event->header_size=%#x, mmap_size=%#zx:"
		 " fuzzed or compressed perf.data?\n",__func__, head, event->header.size, mmap_size);
2030

2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043
	return error;
}

static union perf_event *
fetch_mmaped_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
{
	return prefetch_event(buf, head, mmap_size, needs_swap, ERR_PTR(-EINVAL));
}

static union perf_event *
fetch_decomp_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
{
	return prefetch_event(buf, head, mmap_size, needs_swap, NULL);
2044 2045
}

2046 2047 2048 2049 2050 2051 2052 2053 2054 2055
static int __perf_session__process_decomp_events(struct perf_session *session)
{
	s64 skip;
	u64 size, file_pos = 0;
	struct decomp *decomp = session->decomp_last;

	if (!decomp)
		return 0;

	while (decomp->head < decomp->size && !session_done()) {
2056 2057
		union perf_event *event = fetch_decomp_event(decomp->head, decomp->size, decomp->data,
							     session->header.needs_swap);
2058

2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079
		if (!event)
			break;

		size = event->header.size;

		if (size < sizeof(struct perf_event_header) ||
		    (skip = perf_session__process_event(session, event, file_pos)) < 0) {
			pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
				decomp->file_pos + decomp->head, event->header.size, event->header.type);
			return -EINVAL;
		}

		if (skip)
			size += skip;

		decomp->head += size;
	}

	return 0;
}

2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091
/*
 * On 64bit we can mmap the data file in one go. No need for tiny mmap
 * slices. On 32bit we use 32MB.
 */
#if BITS_PER_LONG == 64
#define MMAP_SIZE ULLONG_MAX
#define NUM_MMAPS 1
#else
#define MMAP_SIZE (32 * 1024 * 1024ULL)
#define NUM_MMAPS 128
#endif

2092 2093 2094 2095 2096 2097
struct reader;

typedef s64 (*reader_cb_t)(struct perf_session *session,
			   union perf_event *event,
			   u64 file_offset);

J
Jiri Olsa 已提交
2098
struct reader {
2099 2100 2101 2102
	int		 fd;
	u64		 data_size;
	u64		 data_offset;
	reader_cb_t	 process;
J
Jiri Olsa 已提交
2103 2104
};

2105 2106 2107
static int
reader__process_events(struct reader *rd, struct perf_session *session,
		       struct ui_progress *prog)
2108
{
2109
	u64 data_size = rd->data_size;
2110
	u64 head, page_offset, file_offset, file_pos, size;
2111
	int err = 0, mmap_prot, mmap_flags, map_idx = 0;
2112
	size_t	mmap_size;
2113
	char *buf, *mmaps[NUM_MMAPS];
2114
	union perf_event *event;
2115
	s64 skip;
2116

2117
	page_offset = page_size * (rd->data_offset / page_size);
2118
	file_offset = page_offset;
2119
	head = rd->data_offset - page_offset;
2120

2121
	ui_progress__init_size(prog, data_size, "Processing events...");
2122

2123
	data_size += rd->data_offset;
2124

2125
	mmap_size = MMAP_SIZE;
2126 2127
	if (mmap_size > data_size) {
		mmap_size = data_size;
2128 2129
		session->one_mmap = true;
	}
2130

2131 2132
	memset(mmaps, 0, sizeof(mmaps));

2133 2134 2135
	mmap_prot  = PROT_READ;
	mmap_flags = MAP_SHARED;

2136
	if (session->header.needs_swap) {
2137 2138 2139
		mmap_prot  |= PROT_WRITE;
		mmap_flags = MAP_PRIVATE;
	}
2140
remap:
2141
	buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, rd->fd,
2142
		   file_offset);
2143 2144 2145
	if (buf == MAP_FAILED) {
		pr_err("failed to mmap file\n");
		err = -errno;
2146
		goto out;
2147
	}
2148 2149
	mmaps[map_idx] = buf;
	map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
2150
	file_pos = file_offset + head;
2151 2152 2153 2154
	if (session->one_mmap) {
		session->one_mmap_addr = buf;
		session->one_mmap_offset = file_offset;
	}
2155 2156

more:
2157
	event = fetch_mmaped_event(head, mmap_size, buf, session->header.needs_swap);
2158 2159 2160
	if (IS_ERR(event))
		return PTR_ERR(event);

2161
	if (!event) {
2162 2163 2164 2165
		if (mmaps[map_idx]) {
			munmap(mmaps[map_idx], mmap_size);
			mmaps[map_idx] = NULL;
		}
2166

2167 2168 2169
		page_offset = page_size * (head / page_size);
		file_offset += page_offset;
		head -= page_offset;
2170 2171 2172 2173 2174
		goto remap;
	}

	size = event->header.size;

2175 2176
	skip = -EINVAL;

2177
	if (size < sizeof(struct perf_event_header) ||
2178
	    (skip = rd->process(session, event, file_pos)) < 0) {
2179
		pr_err("%#" PRIx64 " [%#x]: failed to process type: %d [%s]\n",
2180
		       file_offset + head, event->header.size,
2181 2182
		       event->header.type, strerror(-skip));
		err = skip;
2183
		goto out;
2184 2185
	}

2186 2187 2188
	if (skip)
		size += skip;

2189
	head += size;
2190
	file_pos += size;
2191

2192 2193 2194 2195
	err = __perf_session__process_decomp_events(session);
	if (err)
		goto out;

2196
	ui_progress__update(prog, size);
2197

2198
	if (session_done())
2199
		goto out;
2200

2201
	if (file_pos < data_size)
2202
		goto more;
2203

2204
out:
2205 2206 2207
	return err;
}

2208 2209 2210 2211 2212 2213 2214
static s64 process_simple(struct perf_session *session,
			  union perf_event *event,
			  u64 file_offset)
{
	return perf_session__process_event(session, event, file_offset);
}

2215 2216 2217 2218 2219 2220
static int __perf_session__process_events(struct perf_session *session)
{
	struct reader rd = {
		.fd		= perf_data__fd(session->data),
		.data_size	= session->header.data_size,
		.data_offset	= session->header.data_offset,
2221
		.process	= process_simple,
2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237
	};
	struct ordered_events *oe = &session->ordered_events;
	struct perf_tool *tool = session->tool;
	struct ui_progress prog;
	int err;

	perf_tool__fill_defaults(tool);

	if (rd.data_size == 0)
		return -1;

	ui_progress__init_size(&prog, rd.data_size, "Processing events...");

	err = reader__process_events(&rd, session, &prog);
	if (err)
		goto out_err;
2238
	/* do the final flush for ordered samples */
2239
	err = ordered_events__flush(oe, OE_FLUSH__FINAL);
2240 2241 2242
	if (err)
		goto out_err;
	err = auxtrace__flush_events(session, tool);
2243 2244 2245
	if (err)
		goto out_err;
	err = perf_session__flush_thread_stacks(session);
2246
out_err:
N
Namhyung Kim 已提交
2247
	ui_progress__finish();
2248 2249
	if (!tool->no_warn)
		perf_session__warn_about_errors(session);
2250 2251 2252 2253 2254
	/*
	 * We may switching perf.data output, make ordered_events
	 * reusable.
	 */
	ordered_events__reinit(&session->ordered_events);
2255
	auxtrace__free_events(session);
2256
	session->one_mmap = false;
2257 2258
	return err;
}
2259

2260
int perf_session__process_events(struct perf_session *session)
2261
{
2262
	if (perf_session__register_idle_thread(session) < 0)
2263 2264
		return -ENOMEM;

2265 2266
	if (perf_data__is_pipe(session->data))
		return __perf_session__process_pipe_events(session);
2267

2268
	return __perf_session__process_events(session);
2269 2270
}

2271
bool perf_session__has_traces(struct perf_session *session, const char *msg)
2272
{
2273
	struct evsel *evsel;
2274

2275
	evlist__for_each_entry(session->evlist, evsel) {
2276
		if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT)
2277
			return true;
2278 2279
	}

2280 2281
	pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
	return false;
2282
}
2283

2284
int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, u64 addr)
2285 2286
{
	char *bracket;
2287
	struct ref_reloc_sym *ref;
2288
	struct kmap *kmap;
2289 2290 2291 2292

	ref = zalloc(sizeof(struct ref_reloc_sym));
	if (ref == NULL)
		return -ENOMEM;
2293

2294 2295 2296
	ref->name = strdup(symbol_name);
	if (ref->name == NULL) {
		free(ref);
2297
		return -ENOMEM;
2298
	}
2299

2300
	bracket = strchr(ref->name, ']');
2301 2302 2303
	if (bracket)
		*bracket = '\0';

2304
	ref->addr = addr;
2305

2306 2307
	kmap = map__kmap(map);
	if (kmap)
2308
		kmap->ref_reloc_sym = ref;
2309

2310 2311
	return 0;
}
2312

2313
size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
2314
{
2315
	return machines__fprintf_dsos(&session->machines, fp);
2316
}
2317

2318
size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
2319
					  bool (skip)(struct dso *dso, int parm), int parm)
2320
{
2321
	return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
2322
}
2323 2324 2325

size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
{
2326 2327 2328 2329 2330 2331
	size_t ret;
	const char *msg = "";

	if (perf_header__has_feat(&session->header, HEADER_AUXTRACE))
		msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";

2332
	ret = fprintf(fp, "\nAggregated stats:%s\n", msg);
2333

2334
	ret += events_stats__fprintf(&session->evlist->stats, fp);
2335 2336
	return ret;
}
2337

2338 2339 2340 2341 2342 2343
size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
{
	/*
	 * FIXME: Here we have to actually print all the machines in this
	 * session, not just the host...
	 */
2344
	return machine__fprintf(&session->machines.host, fp);
2345 2346
}

2347
struct evsel *perf_session__find_first_evtype(struct perf_session *session,
2348 2349
					      unsigned int type)
{
2350
	struct evsel *pos;
2351

2352
	evlist__for_each_entry(session->evlist, pos) {
2353
		if (pos->core.attr.type == type)
2354 2355 2356 2357 2358
			return pos;
	}
	return NULL;
}

2359 2360 2361
int perf_session__cpu_bitmap(struct perf_session *session,
			     const char *cpu_list, unsigned long *cpu_bitmap)
{
2362
	int i, err = -1;
2363
	struct perf_cpu_map *map;
2364
	int nr_cpus = min(session->header.env.nr_cpus_online, MAX_NR_CPUS);
2365 2366

	for (i = 0; i < PERF_TYPE_MAX; ++i) {
2367
		struct evsel *evsel;
2368 2369 2370 2371 2372

		evsel = perf_session__find_first_evtype(session, i);
		if (!evsel)
			continue;

2373
		if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CPU)) {
2374
			pr_err("File does not contain CPU events. "
2375
			       "Remove -C option to proceed.\n");
2376 2377 2378 2379
			return -1;
		}
	}

2380
	map = perf_cpu_map__new(cpu_list);
2381 2382 2383 2384
	if (map == NULL) {
		pr_err("Invalid cpu_list\n");
		return -1;
	}
2385 2386 2387 2388

	for (i = 0; i < map->nr; i++) {
		int cpu = map->map[i];

2389
		if (cpu >= nr_cpus) {
2390 2391
			pr_err("Requested CPU %d too large. "
			       "Consider raising MAX_NR_CPUS\n", cpu);
2392
			goto out_delete_map;
2393 2394 2395 2396 2397
		}

		set_bit(cpu, cpu_bitmap);
	}

2398 2399 2400
	err = 0;

out_delete_map:
2401
	perf_cpu_map__put(map);
2402
	return err;
2403
}
2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414

void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
				bool full)
{
	if (session == NULL || fp == NULL)
		return;

	fprintf(fp, "# ========\n");
	perf_header__fprintf_info(session, fp, full);
	fprintf(fp, "# ========\n#\n");
}
2415

2416 2417
int perf_event__process_id_index(struct perf_session *session,
				 union perf_event *event)
A
Adrian Hunter 已提交
2418
{
2419
	struct evlist *evlist = session->evlist;
2420
	struct perf_record_id_index *ie = &event->id_index;
A
Adrian Hunter 已提交
2421 2422
	size_t i, nr, max_nr;

2423
	max_nr = (ie->header.size - sizeof(struct perf_record_id_index)) /
A
Adrian Hunter 已提交
2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436
		 sizeof(struct id_index_entry);
	nr = ie->nr;
	if (nr > max_nr)
		return -EINVAL;

	if (dump_trace)
		fprintf(stdout, " nr: %zu\n", nr);

	for (i = 0; i < nr; i++) {
		struct id_index_entry *e = &ie->entries[i];
		struct perf_sample_id *sid;

		if (dump_trace) {
2437 2438 2439 2440
			fprintf(stdout,	" ... id: %"PRI_lu64, e->id);
			fprintf(stdout,	"  idx: %"PRI_lu64, e->idx);
			fprintf(stdout,	"  cpu: %"PRI_ld64, e->cpu);
			fprintf(stdout,	"  tid: %"PRI_ld64"\n", e->tid);
A
Adrian Hunter 已提交
2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451
		}

		sid = perf_evlist__id2sid(evlist, e->id);
		if (!sid)
			return -ENOENT;
		sid->idx = e->idx;
		sid->cpu = e->cpu;
		sid->tid = e->tid;
	}
	return 0;
}