session.c 46.7 KB
Newer Older
1
#include <linux/kernel.h>
2
#include <traceevent/event-parse.h>
3

4
#include <byteswap.h>
5 6
#include <unistd.h>
#include <sys/types.h>
7
#include <sys/mman.h>
8

9 10
#include "evlist.h"
#include "evsel.h"
11
#include "session.h"
12
#include "tool.h"
13
#include "sort.h"
14
#include "util.h"
15
#include "cpumap.h"
16
#include "perf_regs.h"
17
#include "asm/bug.h"
18

19 20 21 22 23 24
static int machines__deliver_event(struct machines *machines,
				   struct perf_evlist *evlist,
				   union perf_event *event,
				   struct perf_sample *sample,
				   struct perf_tool *tool, u64 file_offset);

25
static int perf_session__open(struct perf_session *session)
26
{
27
	struct perf_data_file *file = session->file;
28

29
	if (perf_session__read_header(session) < 0) {
30
		pr_err("incompatible file format (rerun with -v to learn more)");
31
		return -1;
32 33
	}

34 35 36
	if (perf_data_file__is_pipe(file))
		return 0;

37
	if (!perf_evlist__valid_sample_type(session->evlist)) {
38
		pr_err("non matching sample_type");
39
		return -1;
40 41
	}

42
	if (!perf_evlist__valid_sample_id_all(session->evlist)) {
43
		pr_err("non matching sample_id_all");
44
		return -1;
45 46
	}

47
	if (!perf_evlist__valid_read_format(session->evlist)) {
48
		pr_err("non matching read_format");
49
		return -1;
50 51
	}

52 53 54
	return 0;
}

55
void perf_session__set_id_hdr_size(struct perf_session *session)
56
{
57 58 59
	u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);

	machines__set_id_hdr_size(&session->machines, id_hdr_size);
60 61
}

62
int perf_session__create_kernel_maps(struct perf_session *session)
63
{
64
	int ret = machine__create_kernel_maps(&session->machines.host);
65 66

	if (ret >= 0)
67
		ret = machines__create_guest_kernel_maps(&session->machines);
68 69 70
	return ret;
}

71
static void perf_session__destroy_kernel_maps(struct perf_session *session)
72
{
73
	machines__destroy_kernel_maps(&session->machines);
74 75
}

76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
static bool perf_session__has_comm_exec(struct perf_session *session)
{
	struct perf_evsel *evsel;

	evlist__for_each(session->evlist, evsel) {
		if (evsel->attr.comm_exec)
			return true;
	}

	return false;
}

static void perf_session__set_comm_exec(struct perf_session *session)
{
	bool comm_exec = perf_session__has_comm_exec(session);

	machines__set_comm_exec(&session->machines, comm_exec);
}

95 96 97 98 99 100 101 102
static int ordered_events__deliver_event(struct ordered_events *oe,
					 struct ordered_event *event,
					 struct perf_sample *sample)
{
	return machines__deliver_event(oe->machines, oe->evlist, event->event,
				       sample, oe->tool, event->file_offset);
}

103 104
struct perf_session *perf_session__new(struct perf_data_file *file,
				       bool repipe, struct perf_tool *tool)
105
{
106
	struct perf_session *session = zalloc(sizeof(*session));
107

108
	if (!session)
109 110
		goto out;

111 112
	session->repipe = repipe;
	machines__init(&session->machines);
113

114 115
	if (file) {
		if (perf_data_file__open(file))
116
			goto out_delete;
117

118
		session->file = file;
119 120

		if (perf_data_file__is_read(file)) {
121
			if (perf_session__open(session) < 0)
122 123
				goto out_close;

124
			perf_session__set_id_hdr_size(session);
125
			perf_session__set_comm_exec(session);
126 127 128 129
		}
	}

	if (!file || perf_data_file__is_write(file)) {
130 131
		/*
		 * In O_RDONLY mode this will be performed when reading the
132
		 * kernel MMAP event, in perf_event__process_mmap().
133
		 */
134
		if (perf_session__create_kernel_maps(session) < 0)
135
			pr_warning("Cannot read kernel map\n");
136
	}
137

138
	if (tool && tool->ordering_requires_timestamps &&
139
	    tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) {
140
		dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
141
		tool->ordered_events = false;
142 143 144 145
	} else {
		ordered_events__init(&session->ordered_events, &session->machines,
				     session->evlist, tool, ordered_events__deliver_event);
	}
146

147
	return session;
148 149 150 151

 out_close:
	perf_data_file__close(file);
 out_delete:
152
	perf_session__delete(session);
153
 out:
154
	return NULL;
155 156
}

157 158
static void perf_session__delete_threads(struct perf_session *session)
{
159
	machine__delete_threads(&session->machines.host);
160 161
}

162 163
static void perf_session_env__delete(struct perf_session_env *env)
{
164 165 166 167 168 169 170 171 172 173 174 175
	zfree(&env->hostname);
	zfree(&env->os_release);
	zfree(&env->version);
	zfree(&env->arch);
	zfree(&env->cpu_desc);
	zfree(&env->cpuid);

	zfree(&env->cmdline);
	zfree(&env->sibling_cores);
	zfree(&env->sibling_threads);
	zfree(&env->numa_nodes);
	zfree(&env->pmu_mappings);
176 177
}

178
void perf_session__delete(struct perf_session *session)
179
{
180 181 182 183 184 185 186
	perf_session__destroy_kernel_maps(session);
	perf_session__delete_threads(session);
	perf_session_env__delete(&session->header.env);
	machines__exit(&session->machines);
	if (session->file)
		perf_data_file__close(session->file);
	free(session);
187
}
188

189 190 191
static int process_event_synth_tracing_data_stub(struct perf_tool *tool
						 __maybe_unused,
						 union perf_event *event
192 193 194
						 __maybe_unused,
						 struct perf_session *session
						__maybe_unused)
195 196 197 198 199
{
	dump_printf(": unhandled!\n");
	return 0;
}

200 201
static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
					 union perf_event *event __maybe_unused,
202 203
					 struct perf_evlist **pevlist
					 __maybe_unused)
204 205 206 207 208
{
	dump_printf(": unhandled!\n");
	return 0;
}

209 210 211 212 213
static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
				     union perf_event *event __maybe_unused,
				     struct perf_sample *sample __maybe_unused,
				     struct perf_evsel *evsel __maybe_unused,
				     struct machine *machine __maybe_unused)
214 215 216 217 218
{
	dump_printf(": unhandled!\n");
	return 0;
}

219 220 221 222
static int process_event_stub(struct perf_tool *tool __maybe_unused,
			      union perf_event *event __maybe_unused,
			      struct perf_sample *sample __maybe_unused,
			      struct machine *machine __maybe_unused)
223 224 225 226 227
{
	dump_printf(": unhandled!\n");
	return 0;
}

228 229 230 231 232 233 234 235
static int process_build_id_stub(struct perf_tool *tool __maybe_unused,
				 union perf_event *event __maybe_unused,
				 struct perf_session *session __maybe_unused)
{
	dump_printf(": unhandled!\n");
	return 0;
}

236 237
static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
				       union perf_event *event __maybe_unused,
238
				       struct ordered_events *oe __maybe_unused)
239 240 241 242 243
{
	dump_printf(": unhandled!\n");
	return 0;
}

244
static int process_finished_round(struct perf_tool *tool,
245
				  union perf_event *event,
246
				  struct ordered_events *oe);
247

A
Adrian Hunter 已提交
248 249 250 251 252 253 254 255 256
static int process_id_index_stub(struct perf_tool *tool __maybe_unused,
				 union perf_event *event __maybe_unused,
				 struct perf_session *perf_session
				 __maybe_unused)
{
	dump_printf(": unhandled!\n");
	return 0;
}

257
void perf_tool__fill_defaults(struct perf_tool *tool)
258
{
259 260 261 262
	if (tool->sample == NULL)
		tool->sample = process_event_sample_stub;
	if (tool->mmap == NULL)
		tool->mmap = process_event_stub;
263 264
	if (tool->mmap2 == NULL)
		tool->mmap2 = process_event_stub;
265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283
	if (tool->comm == NULL)
		tool->comm = process_event_stub;
	if (tool->fork == NULL)
		tool->fork = process_event_stub;
	if (tool->exit == NULL)
		tool->exit = process_event_stub;
	if (tool->lost == NULL)
		tool->lost = perf_event__process_lost;
	if (tool->read == NULL)
		tool->read = process_event_sample_stub;
	if (tool->throttle == NULL)
		tool->throttle = process_event_stub;
	if (tool->unthrottle == NULL)
		tool->unthrottle = process_event_stub;
	if (tool->attr == NULL)
		tool->attr = process_event_synth_attr_stub;
	if (tool->tracing_data == NULL)
		tool->tracing_data = process_event_synth_tracing_data_stub;
	if (tool->build_id == NULL)
284
		tool->build_id = process_build_id_stub;
285
	if (tool->finished_round == NULL) {
286
		if (tool->ordered_events)
287
			tool->finished_round = process_finished_round;
288
		else
289
			tool->finished_round = process_finished_round_stub;
290
	}
A
Adrian Hunter 已提交
291 292
	if (tool->id_index == NULL)
		tool->id_index = process_id_index_stub;
293
}
294

295 296 297 298 299 300 301 302 303 304
static void swap_sample_id_all(union perf_event *event, void *data)
{
	void *end = (void *) event + event->header.size;
	int size = end - data;

	BUG_ON(size % sizeof(u64));
	mem_bswap_64(data, size);
}

static void perf_event__all64_swap(union perf_event *event,
305
				   bool sample_id_all __maybe_unused)
306
{
307 308
	struct perf_event_header *hdr = &event->header;
	mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
309 310
}

311
static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
312
{
313 314
	event->comm.pid = bswap_32(event->comm.pid);
	event->comm.tid = bswap_32(event->comm.tid);
315 316 317 318

	if (sample_id_all) {
		void *data = &event->comm.comm;

319
		data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
320 321
		swap_sample_id_all(event, data);
	}
322 323
}

324 325
static void perf_event__mmap_swap(union perf_event *event,
				  bool sample_id_all)
326
{
327 328 329 330 331
	event->mmap.pid	  = bswap_32(event->mmap.pid);
	event->mmap.tid	  = bswap_32(event->mmap.tid);
	event->mmap.start = bswap_64(event->mmap.start);
	event->mmap.len	  = bswap_64(event->mmap.len);
	event->mmap.pgoff = bswap_64(event->mmap.pgoff);
332 333 334 335

	if (sample_id_all) {
		void *data = &event->mmap.filename;

336
		data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
337 338
		swap_sample_id_all(event, data);
	}
339 340
}

341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359
static void perf_event__mmap2_swap(union perf_event *event,
				  bool sample_id_all)
{
	event->mmap2.pid   = bswap_32(event->mmap2.pid);
	event->mmap2.tid   = bswap_32(event->mmap2.tid);
	event->mmap2.start = bswap_64(event->mmap2.start);
	event->mmap2.len   = bswap_64(event->mmap2.len);
	event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
	event->mmap2.maj   = bswap_32(event->mmap2.maj);
	event->mmap2.min   = bswap_32(event->mmap2.min);
	event->mmap2.ino   = bswap_64(event->mmap2.ino);

	if (sample_id_all) {
		void *data = &event->mmap2.filename;

		data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
		swap_sample_id_all(event, data);
	}
}
360
static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
361
{
362 363 364 365 366
	event->fork.pid	 = bswap_32(event->fork.pid);
	event->fork.tid	 = bswap_32(event->fork.tid);
	event->fork.ppid = bswap_32(event->fork.ppid);
	event->fork.ptid = bswap_32(event->fork.ptid);
	event->fork.time = bswap_64(event->fork.time);
367 368 369

	if (sample_id_all)
		swap_sample_id_all(event, &event->fork + 1);
370 371
}

372
static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
373
{
374 375 376 377 378 379
	event->read.pid		 = bswap_32(event->read.pid);
	event->read.tid		 = bswap_32(event->read.tid);
	event->read.value	 = bswap_64(event->read.value);
	event->read.time_enabled = bswap_64(event->read.time_enabled);
	event->read.time_running = bswap_64(event->read.time_running);
	event->read.id		 = bswap_64(event->read.id);
380 381 382

	if (sample_id_all)
		swap_sample_id_all(event, &event->read + 1);
383 384
}

385 386 387 388 389 390 391 392 393 394 395
static void perf_event__throttle_swap(union perf_event *event,
				      bool sample_id_all)
{
	event->throttle.time	  = bswap_64(event->throttle.time);
	event->throttle.id	  = bswap_64(event->throttle.id);
	event->throttle.stream_id = bswap_64(event->throttle.stream_id);

	if (sample_id_all)
		swap_sample_id_all(event, &event->throttle + 1);
}

396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427
static u8 revbyte(u8 b)
{
	int rev = (b >> 4) | ((b & 0xf) << 4);
	rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
	rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
	return (u8) rev;
}

/*
 * XXX this is hack in attempt to carry flags bitfield
 * throught endian village. ABI says:
 *
 * Bit-fields are allocated from right to left (least to most significant)
 * on little-endian implementations and from left to right (most to least
 * significant) on big-endian implementations.
 *
 * The above seems to be byte specific, so we need to reverse each
 * byte of the bitfield. 'Internet' also says this might be implementation
 * specific and we probably need proper fix and carry perf_event_attr
 * bitfield flags in separate data file FEAT_ section. Thought this seems
 * to work for now.
 */
static void swap_bitfield(u8 *p, unsigned len)
{
	unsigned i;

	for (i = 0; i < len; i++) {
		*p = revbyte(*p);
		p++;
	}
}

428 429 430 431 432 433 434 435 436 437 438 439 440
/* exported for swapping attributes in file header */
void perf_event__attr_swap(struct perf_event_attr *attr)
{
	attr->type		= bswap_32(attr->type);
	attr->size		= bswap_32(attr->size);
	attr->config		= bswap_64(attr->config);
	attr->sample_period	= bswap_64(attr->sample_period);
	attr->sample_type	= bswap_64(attr->sample_type);
	attr->read_format	= bswap_64(attr->read_format);
	attr->wakeup_events	= bswap_32(attr->wakeup_events);
	attr->bp_type		= bswap_32(attr->bp_type);
	attr->bp_addr		= bswap_64(attr->bp_addr);
	attr->bp_len		= bswap_64(attr->bp_len);
441 442 443
	attr->branch_sample_type = bswap_64(attr->branch_sample_type);
	attr->sample_regs_user	 = bswap_64(attr->sample_regs_user);
	attr->sample_stack_user  = bswap_32(attr->sample_stack_user);
444 445

	swap_bitfield((u8 *) (&attr->read_format + 1), sizeof(u64));
446 447
}

448
static void perf_event__hdr_attr_swap(union perf_event *event,
449
				      bool sample_id_all __maybe_unused)
450 451 452
{
	size_t size;

453
	perf_event__attr_swap(&event->attr.attr);
454

455 456 457
	size = event->header.size;
	size -= (void *)&event->attr.id - (void *)event;
	mem_bswap_64(event->attr.id, size);
458 459
}

460
static void perf_event__event_type_swap(union perf_event *event,
461
					bool sample_id_all __maybe_unused)
462
{
463 464
	event->event_type.event_type.event_id =
		bswap_64(event->event_type.event_type.event_id);
465 466
}

467
static void perf_event__tracing_data_swap(union perf_event *event,
468
					  bool sample_id_all __maybe_unused)
469
{
470
	event->tracing_data.size = bswap_32(event->tracing_data.size);
471 472
}

473 474
typedef void (*perf_event__swap_op)(union perf_event *event,
				    bool sample_id_all);
475

476 477
static perf_event__swap_op perf_event__swap_ops[] = {
	[PERF_RECORD_MMAP]		  = perf_event__mmap_swap,
478
	[PERF_RECORD_MMAP2]		  = perf_event__mmap2_swap,
479 480 481 482 483
	[PERF_RECORD_COMM]		  = perf_event__comm_swap,
	[PERF_RECORD_FORK]		  = perf_event__task_swap,
	[PERF_RECORD_EXIT]		  = perf_event__task_swap,
	[PERF_RECORD_LOST]		  = perf_event__all64_swap,
	[PERF_RECORD_READ]		  = perf_event__read_swap,
484 485
	[PERF_RECORD_THROTTLE]		  = perf_event__throttle_swap,
	[PERF_RECORD_UNTHROTTLE]	  = perf_event__throttle_swap,
486
	[PERF_RECORD_SAMPLE]		  = perf_event__all64_swap,
487
	[PERF_RECORD_HEADER_ATTR]	  = perf_event__hdr_attr_swap,
488 489 490
	[PERF_RECORD_HEADER_EVENT_TYPE]	  = perf_event__event_type_swap,
	[PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
	[PERF_RECORD_HEADER_BUILD_ID]	  = NULL,
A
Adrian Hunter 已提交
491
	[PERF_RECORD_ID_INDEX]		  = perf_event__all64_swap,
492
	[PERF_RECORD_HEADER_MAX]	  = NULL,
493 494
};

495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533
/*
 * When perf record finishes a pass on every buffers, it records this pseudo
 * event.
 * We record the max timestamp t found in the pass n.
 * Assuming these timestamps are monotonic across cpus, we know that if
 * a buffer still has events with timestamps below t, they will be all
 * available and then read in the pass n + 1.
 * Hence when we start to read the pass n + 2, we can safely flush every
 * events with timestamps below t.
 *
 *    ============ PASS n =================
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          1          |         2
 *          2          |         3
 *          -          |         4  <--- max recorded
 *
 *    ============ PASS n + 1 ==============
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          3          |         5
 *          4          |         6
 *          5          |         7 <---- max recorded
 *
 *      Flush every events below timestamp 4
 *
 *    ============ PASS n + 2 ==============
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          6          |         8
 *          7          |         9
 *          -          |         10
 *
 *      Flush every events below timestamp 7
 *      etc...
 */
534
static int process_finished_round(struct perf_tool *tool __maybe_unused,
535
				  union perf_event *event __maybe_unused,
536
				  struct ordered_events *oe)
537
{
538
	return ordered_events__flush(oe, OE_FLUSH__ROUND);
539 540
}

541 542
int perf_session__queue_event(struct perf_session *s, union perf_event *event,
			      struct perf_sample *sample, u64 file_offset)
543
{
544
	struct ordered_events *oe = &s->ordered_events;
545

546
	u64 timestamp = sample->time;
547
	struct ordered_event *new;
548

549
	if (!timestamp || timestamp == ~0ULL)
550 551
		return -ETIME;

552
	if (timestamp < oe->last_flush) {
553
		pr_oe_time(timestamp,      "out of order event\n");
554 555 556
		pr_oe_time(oe->last_flush, "last flush, last_flush_type %d\n",
			   oe->last_flush_type);

557
		s->evlist->stats.nr_unordered_events++;
558 559
	}

560
	new = ordered_events__new(oe, timestamp, event);
561
	if (!new) {
562
		ordered_events__flush(oe, OE_FLUSH__HALF);
563
		new = ordered_events__new(oe, timestamp, event);
564 565
	}

566 567
	if (!new)
		return -ENOMEM;
568

569
	new->file_offset = file_offset;
570 571
	return 0;
}
572

K
Kan Liang 已提交
573
static void callchain__lbr_callstack_printf(struct perf_sample *sample)
574
{
K
Kan Liang 已提交
575 576 577
	struct ip_callchain *callchain = sample->callchain;
	struct branch_stack *lbr_stack = sample->branch_stack;
	u64 kernel_callchain_nr = callchain->nr;
578
	unsigned int i;
579

K
Kan Liang 已提交
580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629
	for (i = 0; i < kernel_callchain_nr; i++) {
		if (callchain->ips[i] == PERF_CONTEXT_USER)
			break;
	}

	if ((i != kernel_callchain_nr) && lbr_stack->nr) {
		u64 total_nr;
		/*
		 * LBR callstack can only get user call chain,
		 * i is kernel call chain number,
		 * 1 is PERF_CONTEXT_USER.
		 *
		 * The user call chain is stored in LBR registers.
		 * LBR are pair registers. The caller is stored
		 * in "from" register, while the callee is stored
		 * in "to" register.
		 * For example, there is a call stack
		 * "A"->"B"->"C"->"D".
		 * The LBR registers will recorde like
		 * "C"->"D", "B"->"C", "A"->"B".
		 * So only the first "to" register and all "from"
		 * registers are needed to construct the whole stack.
		 */
		total_nr = i + 1 + lbr_stack->nr + 1;
		kernel_callchain_nr = i + 1;

		printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr);

		for (i = 0; i < kernel_callchain_nr; i++)
			printf("..... %2d: %016" PRIx64 "\n",
			       i, callchain->ips[i]);

		printf("..... %2d: %016" PRIx64 "\n",
		       (int)(kernel_callchain_nr), lbr_stack->entries[0].to);
		for (i = 0; i < lbr_stack->nr; i++)
			printf("..... %2d: %016" PRIx64 "\n",
			       (int)(i + kernel_callchain_nr + 1), lbr_stack->entries[i].from);
	}
}

static void callchain__printf(struct perf_evsel *evsel,
			      struct perf_sample *sample)
{
	unsigned int i;
	struct ip_callchain *callchain = sample->callchain;

	if (has_branch_callstack(evsel))
		callchain__lbr_callstack_printf(sample);

	printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
630

K
Kan Liang 已提交
631
	for (i = 0; i < callchain->nr; i++)
632
		printf("..... %2d: %016" PRIx64 "\n",
K
Kan Liang 已提交
633
		       i, callchain->ips[i]);
634 635
}

636 637 638 639 640 641 642 643 644 645 646 647
static void branch_stack__printf(struct perf_sample *sample)
{
	uint64_t i;

	printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr);

	for (i = 0; i < sample->branch_stack->nr; i++)
		printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 "\n",
			i, sample->branch_stack->entries[i].from,
			sample->branch_stack->entries[i].to);
}

648 649 650 651 652 653 654 655 656 657 658 659
static void regs_dump__printf(u64 mask, u64 *regs)
{
	unsigned rid, i = 0;

	for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
		u64 val = regs[i++];

		printf(".... %-5s 0x%" PRIx64 "\n",
		       perf_reg_name(rid), val);
	}
}

660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685
static const char *regs_abi[] = {
	[PERF_SAMPLE_REGS_ABI_NONE] = "none",
	[PERF_SAMPLE_REGS_ABI_32] = "32-bit",
	[PERF_SAMPLE_REGS_ABI_64] = "64-bit",
};

static inline const char *regs_dump_abi(struct regs_dump *d)
{
	if (d->abi > PERF_SAMPLE_REGS_ABI_64)
		return "unknown";

	return regs_abi[d->abi];
}

static void regs__printf(const char *type, struct regs_dump *regs)
{
	u64 mask = regs->mask;

	printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n",
	       type,
	       mask,
	       regs_dump_abi(regs));

	regs_dump__printf(mask, regs->regs);
}

686
static void regs_user__printf(struct perf_sample *sample)
687 688 689
{
	struct regs_dump *user_regs = &sample->user_regs;

690 691 692 693 694 695 696 697 698 699
	if (user_regs->regs)
		regs__printf("user", user_regs);
}

static void regs_intr__printf(struct perf_sample *sample)
{
	struct regs_dump *intr_regs = &sample->intr_regs;

	if (intr_regs->regs)
		regs__printf("intr", intr_regs);
700 701 702 703 704 705 706 707
}

static void stack_user__printf(struct stack_dump *dump)
{
	printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
	       dump->size, dump->offset);
}

708
static void perf_evlist__print_tstamp(struct perf_evlist *evlist,
709
				       union perf_event *event,
710
				       struct perf_sample *sample)
711
{
712
	u64 sample_type = __perf_evlist__combined_sample_type(evlist);
713

714
	if (event->header.type != PERF_RECORD_SAMPLE &&
715
	    !perf_evlist__sample_id_all(evlist)) {
716 717 718 719
		fputs("-1 -1 ", stdout);
		return;
	}

720
	if ((sample_type & PERF_SAMPLE_CPU))
721 722
		printf("%u ", sample->cpu);

723
	if (sample_type & PERF_SAMPLE_TIME)
724
		printf("%" PRIu64 " ", sample->time);
725 726
}

727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756
static void sample_read__printf(struct perf_sample *sample, u64 read_format)
{
	printf("... sample_read:\n");

	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
		printf("...... time enabled %016" PRIx64 "\n",
		       sample->read.time_enabled);

	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
		printf("...... time running %016" PRIx64 "\n",
		       sample->read.time_running);

	if (read_format & PERF_FORMAT_GROUP) {
		u64 i;

		printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);

		for (i = 0; i < sample->read.group.nr; i++) {
			struct sample_read_value *value;

			value = &sample->read.group.values[i];
			printf("..... id %016" PRIx64
			       ", value %016" PRIx64 "\n",
			       value->id, value->value);
		}
	} else
		printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
			sample->read.one.id, sample->read.one.value);
}

757
static void dump_event(struct perf_evlist *evlist, union perf_event *event,
758
		       u64 file_offset, struct perf_sample *sample)
759 760 761 762
{
	if (!dump_trace)
		return;

763 764
	printf("\n%#" PRIx64 " [%#x]: event: %d\n",
	       file_offset, event->header.size, event->header.type);
765 766 767 768

	trace_event(event);

	if (sample)
769
		perf_evlist__print_tstamp(evlist, event, sample);
770

771
	printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
772
	       event->header.size, perf_event__name(event->header.type));
773 774
}

775
static void dump_sample(struct perf_evsel *evsel, union perf_event *event,
776
			struct perf_sample *sample)
777
{
778 779
	u64 sample_type;

780 781 782
	if (!dump_trace)
		return;

783
	printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
784
	       event->header.misc, sample->pid, sample->tid, sample->ip,
785
	       sample->period, sample->addr);
786

787
	sample_type = evsel->attr.sample_type;
788 789

	if (sample_type & PERF_SAMPLE_CALLCHAIN)
K
Kan Liang 已提交
790
		callchain__printf(evsel, sample);
791

K
Kan Liang 已提交
792
	if ((sample_type & PERF_SAMPLE_BRANCH_STACK) && !has_branch_callstack(evsel))
793
		branch_stack__printf(sample);
794 795

	if (sample_type & PERF_SAMPLE_REGS_USER)
796
		regs_user__printf(sample);
797

798 799 800
	if (sample_type & PERF_SAMPLE_REGS_INTR)
		regs_intr__printf(sample);

801 802
	if (sample_type & PERF_SAMPLE_STACK_USER)
		stack_user__printf(&sample->user_stack);
803 804 805

	if (sample_type & PERF_SAMPLE_WEIGHT)
		printf("... weight: %" PRIu64 "\n", sample->weight);
806 807 808

	if (sample_type & PERF_SAMPLE_DATA_SRC)
		printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
809

810 811 812
	if (sample_type & PERF_SAMPLE_TRANSACTION)
		printf("... transaction: %" PRIx64 "\n", sample->transaction);

813 814
	if (sample_type & PERF_SAMPLE_READ)
		sample_read__printf(sample, evsel->attr.read_format);
815 816
}

817
static struct machine *machines__find_for_cpumode(struct machines *machines,
818 819
					       union perf_event *event,
					       struct perf_sample *sample)
820 821
{
	const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
822
	struct machine *machine;
823

824 825 826
	if (perf_guest &&
	    ((cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
	     (cpumode == PERF_RECORD_MISC_GUEST_USER))) {
827 828
		u32 pid;

829 830
		if (event->header.type == PERF_RECORD_MMAP
		    || event->header.type == PERF_RECORD_MMAP2)
831 832
			pid = event->mmap.pid;
		else
833
			pid = sample->pid;
834

835
		machine = machines__find(machines, pid);
836
		if (!machine)
837
			machine = machines__find(machines, DEFAULT_GUEST_KERNEL_ID);
838
		return machine;
839
	}
840

841
	return &machines->host;
842 843
}

844
static int deliver_sample_value(struct perf_evlist *evlist,
845 846 847 848 849 850
				struct perf_tool *tool,
				union perf_event *event,
				struct perf_sample *sample,
				struct sample_read_value *v,
				struct machine *machine)
{
851
	struct perf_sample_id *sid = perf_evlist__id2sid(evlist, v->id);
852 853 854 855 856 857 858 859

	if (sid) {
		sample->id     = v->id;
		sample->period = v->value - sid->period;
		sid->period    = v->value;
	}

	if (!sid || sid->evsel == NULL) {
860
		++evlist->stats.nr_unknown_id;
861 862 863 864 865 866
		return 0;
	}

	return tool->sample(tool, event, sample, sid->evsel, machine);
}

867
static int deliver_sample_group(struct perf_evlist *evlist,
868 869 870 871 872 873 874 875 876
				struct perf_tool *tool,
				union  perf_event *event,
				struct perf_sample *sample,
				struct machine *machine)
{
	int ret = -EINVAL;
	u64 i;

	for (i = 0; i < sample->read.group.nr; i++) {
877
		ret = deliver_sample_value(evlist, tool, event, sample,
878 879 880 881 882 883 884 885 886 887
					   &sample->read.group.values[i],
					   machine);
		if (ret)
			break;
	}

	return ret;
}

static int
888
 perf_evlist__deliver_sample(struct perf_evlist *evlist,
889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904
			     struct perf_tool *tool,
			     union  perf_event *event,
			     struct perf_sample *sample,
			     struct perf_evsel *evsel,
			     struct machine *machine)
{
	/* We know evsel != NULL. */
	u64 sample_type = evsel->attr.sample_type;
	u64 read_format = evsel->attr.read_format;

	/* Standard sample delievery. */
	if (!(sample_type & PERF_SAMPLE_READ))
		return tool->sample(tool, event, sample, evsel, machine);

	/* For PERF_SAMPLE_READ we have either single or group mode. */
	if (read_format & PERF_FORMAT_GROUP)
905
		return deliver_sample_group(evlist, tool, event, sample,
906 907
					    machine);
	else
908
		return deliver_sample_value(evlist, tool, event, sample,
909 910 911
					    &sample->read.one, machine);
}

912 913 914 915 916
static int machines__deliver_event(struct machines *machines,
				   struct perf_evlist *evlist,
				   union perf_event *event,
				   struct perf_sample *sample,
				   struct perf_tool *tool, u64 file_offset)
917
{
918
	struct perf_evsel *evsel;
919
	struct machine *machine;
920

921
	dump_event(evlist, event, file_offset, sample);
922

923
	evsel = perf_evlist__id2evsel(evlist, sample->id);
924

925
	machine = machines__find_for_cpumode(machines, event, sample);
926

927 928
	switch (event->header.type) {
	case PERF_RECORD_SAMPLE:
929
		dump_sample(evsel, event, sample);
930
		if (evsel == NULL) {
931
			++evlist->stats.nr_unknown_id;
932
			return 0;
933
		}
934
		if (machine == NULL) {
935
			++evlist->stats.nr_unprocessable_samples;
936
			return 0;
937
		}
938
		return perf_evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
939
	case PERF_RECORD_MMAP:
940
		return tool->mmap(tool, event, sample, machine);
941 942
	case PERF_RECORD_MMAP2:
		return tool->mmap2(tool, event, sample, machine);
943
	case PERF_RECORD_COMM:
944
		return tool->comm(tool, event, sample, machine);
945
	case PERF_RECORD_FORK:
946
		return tool->fork(tool, event, sample, machine);
947
	case PERF_RECORD_EXIT:
948
		return tool->exit(tool, event, sample, machine);
949
	case PERF_RECORD_LOST:
950
		if (tool->lost == perf_event__process_lost)
951
			evlist->stats.total_lost += event->lost.lost;
952
		return tool->lost(tool, event, sample, machine);
953
	case PERF_RECORD_READ:
954
		return tool->read(tool, event, sample, evsel, machine);
955
	case PERF_RECORD_THROTTLE:
956
		return tool->throttle(tool, event, sample, machine);
957
	case PERF_RECORD_UNTHROTTLE:
958
		return tool->unthrottle(tool, event, sample, machine);
959
	default:
960
		++evlist->stats.nr_unknown_events;
961 962 963 964
		return -1;
	}
}

965 966 967
static s64 perf_session__process_user_event(struct perf_session *session,
					    union perf_event *event,
					    u64 file_offset)
968
{
969 970
	struct ordered_events *oe = &session->ordered_events;
	struct perf_tool *tool = oe->tool;
971
	int fd = perf_data_file__fd(session->file);
972 973
	int err;

974
	dump_event(session->evlist, event, file_offset, NULL);
975

976
	/* These events are processed right away */
977
	switch (event->header.type) {
978
	case PERF_RECORD_HEADER_ATTR:
979
		err = tool->attr(tool, event, &session->evlist);
980
		if (err == 0) {
981
			perf_session__set_id_hdr_size(session);
982 983
			perf_session__set_comm_exec(session);
		}
984
		return err;
985 986 987 988 989 990
	case PERF_RECORD_HEADER_EVENT_TYPE:
		/*
		 * Depreceated, but we need to handle it for sake
		 * of old data files create in pipe mode.
		 */
		return 0;
991 992
	case PERF_RECORD_HEADER_TRACING_DATA:
		/* setup for reading amidst mmap */
993
		lseek(fd, file_offset, SEEK_SET);
994
		return tool->tracing_data(tool, event, session);
995
	case PERF_RECORD_HEADER_BUILD_ID:
996
		return tool->build_id(tool, event, session);
997
	case PERF_RECORD_FINISHED_ROUND:
998
		return tool->finished_round(tool, event, oe);
A
Adrian Hunter 已提交
999 1000
	case PERF_RECORD_ID_INDEX:
		return tool->id_index(tool, event, session);
1001
	default:
1002
		return -EINVAL;
1003
	}
1004 1005
}

1006 1007
int perf_session__deliver_synth_event(struct perf_session *session,
				      union perf_event *event,
1008
				      struct perf_sample *sample)
1009
{
1010
	struct perf_evlist *evlist = session->evlist;
1011
	struct perf_tool *tool = session->ordered_events.tool;
1012 1013

	events_stats__inc(&evlist->stats, event->header.type);
1014 1015

	if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1016
		return perf_session__process_user_event(session, event, 0);
1017

1018
	return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0);
1019 1020
}

1021 1022 1023 1024 1025 1026 1027 1028 1029
static void event_swap(union perf_event *event, bool sample_id_all)
{
	perf_event__swap_op swap;

	swap = perf_event__swap_ops[event->header.type];
	if (swap)
		swap(event, sample_id_all);
}

1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084
int perf_session__peek_event(struct perf_session *session, off_t file_offset,
			     void *buf, size_t buf_sz,
			     union perf_event **event_ptr,
			     struct perf_sample *sample)
{
	union perf_event *event;
	size_t hdr_sz, rest;
	int fd;

	if (session->one_mmap && !session->header.needs_swap) {
		event = file_offset - session->one_mmap_offset +
			session->one_mmap_addr;
		goto out_parse_sample;
	}

	if (perf_data_file__is_pipe(session->file))
		return -1;

	fd = perf_data_file__fd(session->file);
	hdr_sz = sizeof(struct perf_event_header);

	if (buf_sz < hdr_sz)
		return -1;

	if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
	    readn(fd, &buf, hdr_sz) != (ssize_t)hdr_sz)
		return -1;

	event = (union perf_event *)buf;

	if (session->header.needs_swap)
		perf_event_header__bswap(&event->header);

	if (event->header.size < hdr_sz)
		return -1;

	rest = event->header.size - hdr_sz;

	if (readn(fd, &buf, rest) != (ssize_t)rest)
		return -1;

	if (session->header.needs_swap)
		event_swap(event, perf_evlist__sample_id_all(session->evlist));

out_parse_sample:

	if (sample && event->header.type < PERF_RECORD_USER_TYPE_START &&
	    perf_evlist__parse_sample(session->evlist, event, sample))
		return -1;

	*event_ptr = event;

	return 0;
}

1085
static s64 perf_session__process_event(struct perf_session *session,
1086
				       union perf_event *event, u64 file_offset)
1087
{
1088
	struct perf_evlist *evlist = session->evlist;
1089
	struct perf_tool *tool = session->ordered_events.tool;
1090
	struct perf_sample sample;
1091 1092
	int ret;

1093
	if (session->header.needs_swap)
1094
		event_swap(event, perf_evlist__sample_id_all(evlist));
1095 1096 1097 1098

	if (event->header.type >= PERF_RECORD_HEADER_MAX)
		return -EINVAL;

1099
	events_stats__inc(&evlist->stats, event->header.type);
1100 1101

	if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1102
		return perf_session__process_user_event(session, event, file_offset);
1103

1104 1105 1106
	/*
	 * For all kernel events we get the sample data
	 */
1107
	ret = perf_evlist__parse_sample(evlist, event, &sample);
1108 1109
	if (ret)
		return ret;
1110

1111
	if (tool->ordered_events) {
1112
		ret = perf_session__queue_event(session, event, &sample, file_offset);
1113 1114 1115 1116
		if (ret != -ETIME)
			return ret;
	}

1117 1118
	return machines__deliver_event(&session->machines, evlist, event,
				       &sample, tool, file_offset);
1119 1120
}

1121
void perf_event_header__bswap(struct perf_event_header *hdr)
1122
{
1123 1124 1125
	hdr->type = bswap_32(hdr->type);
	hdr->misc = bswap_16(hdr->misc);
	hdr->size = bswap_16(hdr->size);
1126 1127
}

1128 1129
struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
{
1130
	return machine__findnew_thread(&session->machines.host, -1, pid);
1131 1132
}

1133
static struct thread *perf_session__register_idle_thread(struct perf_session *session)
1134
{
1135
	struct thread *thread;
1136

1137
	thread = machine__findnew_thread(&session->machines.host, 0, 0);
1138
	if (thread == NULL || thread__set_comm(thread, "swapper", 0)) {
1139 1140 1141 1142 1143 1144 1145
		pr_err("problem inserting idle task.\n");
		thread = NULL;
	}

	return thread;
}

1146 1147
static void perf_tool__warn_about_errors(const struct perf_tool *tool,
					 const struct events_stats *stats)
1148
{
1149
	if (tool->lost == perf_event__process_lost &&
1150
	    stats->nr_events[PERF_RECORD_LOST] != 0) {
1151 1152
		ui__warning("Processed %d events and lost %d chunks!\n\n"
			    "Check IO/CPU overload!\n\n",
1153 1154
			    stats->nr_events[0],
			    stats->nr_events[PERF_RECORD_LOST]);
1155 1156
	}

1157
	if (stats->nr_unknown_events != 0) {
1158 1159 1160 1161 1162
		ui__warning("Found %u unknown events!\n\n"
			    "Is this an older tool processing a perf.data "
			    "file generated by a more recent tool?\n\n"
			    "If that is not the case, consider "
			    "reporting to linux-kernel@vger.kernel.org.\n\n",
1163
			    stats->nr_unknown_events);
1164 1165
	}

1166
	if (stats->nr_unknown_id != 0) {
1167
		ui__warning("%u samples with id not present in the header\n",
1168
			    stats->nr_unknown_id);
1169 1170
	}

1171
	if (stats->nr_invalid_chains != 0) {
1172 1173 1174
		ui__warning("Found invalid callchains!\n\n"
			    "%u out of %u events were discarded for this reason.\n\n"
			    "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1175 1176
			    stats->nr_invalid_chains,
			    stats->nr_events[PERF_RECORD_SAMPLE]);
1177
	}
1178

1179
	if (stats->nr_unprocessable_samples != 0) {
1180 1181
		ui__warning("%u unprocessable samples recorded.\n"
			    "Do you have a KVM guest running and not using 'perf kvm'?\n",
1182
			    stats->nr_unprocessable_samples);
1183
	}
1184

1185 1186
	if (stats->nr_unordered_events != 0)
		ui__warning("%u out of order events recorded.\n", stats->nr_unordered_events);
1187 1188
}

1189 1190
volatile int session_done;

1191
static int __perf_session__process_pipe_events(struct perf_session *session)
1192
{
1193
	struct ordered_events *oe = &session->ordered_events;
1194
	struct perf_tool *tool = oe->tool;
1195
	int fd = perf_data_file__fd(session->file);
1196 1197 1198
	union perf_event *event;
	uint32_t size, cur_size = 0;
	void *buf = NULL;
1199
	s64 skip = 0;
1200
	u64 head;
1201
	ssize_t err;
1202 1203
	void *p;

1204
	perf_tool__fill_defaults(tool);
1205 1206

	head = 0;
1207 1208 1209 1210 1211
	cur_size = sizeof(union perf_event);

	buf = malloc(cur_size);
	if (!buf)
		return -errno;
1212
more:
1213
	event = buf;
1214
	err = readn(fd, event, sizeof(struct perf_event_header));
1215 1216 1217 1218 1219 1220 1221 1222
	if (err <= 0) {
		if (err == 0)
			goto done;

		pr_err("failed to read event header\n");
		goto out_err;
	}

1223
	if (session->header.needs_swap)
1224
		perf_event_header__bswap(&event->header);
1225

1226
	size = event->header.size;
1227 1228 1229 1230
	if (size < sizeof(struct perf_event_header)) {
		pr_err("bad event header size\n");
		goto out_err;
	}
1231

1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242
	if (size > cur_size) {
		void *new = realloc(buf, size);
		if (!new) {
			pr_err("failed to allocate memory to read event\n");
			goto out_err;
		}
		buf = new;
		cur_size = size;
		event = buf;
	}
	p = event;
1243 1244
	p += sizeof(struct perf_event_header);

1245
	if (size - sizeof(struct perf_event_header)) {
1246
		err = readn(fd, p, size - sizeof(struct perf_event_header));
1247 1248 1249 1250 1251
		if (err <= 0) {
			if (err == 0) {
				pr_err("unexpected end of event stream\n");
				goto done;
			}
1252

1253 1254 1255
			pr_err("failed to read event data\n");
			goto out_err;
		}
1256 1257
	}

1258
	if ((skip = perf_session__process_event(session, event, head)) < 0) {
1259
		pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1260
		       head, event->header.size, event->header.type);
1261 1262
		err = -EINVAL;
		goto out_err;
1263 1264 1265 1266 1267 1268 1269 1270 1271 1272
	}

	head += size;

	if (skip > 0)
		head += skip;

	if (!session_done())
		goto more;
done:
1273
	/* do the final flush for ordered samples */
1274
	err = ordered_events__flush(oe, OE_FLUSH__FINAL);
1275
out_err:
1276
	free(buf);
1277
	perf_tool__warn_about_errors(tool, &session->evlist->stats);
1278
	ordered_events__free(&session->ordered_events);
1279 1280 1281
	return err;
}

1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299
static union perf_event *
fetch_mmaped_event(struct perf_session *session,
		   u64 head, size_t mmap_size, char *buf)
{
	union perf_event *event;

	/*
	 * Ensure we have enough space remaining to read
	 * the size of the event in the headers.
	 */
	if (head + sizeof(event->header) > mmap_size)
		return NULL;

	event = (union perf_event *)(buf + head);

	if (session->header.needs_swap)
		perf_event_header__bswap(&event->header);

1300 1301 1302 1303
	if (head + event->header.size > mmap_size) {
		/* We're not fetching the event so swap back again */
		if (session->header.needs_swap)
			perf_event_header__bswap(&event->header);
1304
		return NULL;
1305
	}
1306 1307 1308 1309

	return event;
}

1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321
/*
 * On 64bit we can mmap the data file in one go. No need for tiny mmap
 * slices. On 32bit we use 32MB.
 */
#if BITS_PER_LONG == 64
#define MMAP_SIZE ULLONG_MAX
#define NUM_MMAPS 1
#else
#define MMAP_SIZE (32 * 1024 * 1024ULL)
#define NUM_MMAPS 128
#endif

1322 1323
static int __perf_session__process_events(struct perf_session *session,
					  u64 data_offset, u64 data_size,
1324
					  u64 file_size)
1325
{
1326
	struct ordered_events *oe = &session->ordered_events;
1327
	struct perf_tool *tool = oe->tool;
1328
	int fd = perf_data_file__fd(session->file);
1329
	u64 head, page_offset, file_offset, file_pos, size;
1330
	int err, mmap_prot, mmap_flags, map_idx = 0;
1331
	size_t	mmap_size;
1332
	char *buf, *mmaps[NUM_MMAPS];
1333
	union perf_event *event;
1334
	struct ui_progress prog;
1335
	s64 skip;
1336

1337
	perf_tool__fill_defaults(tool);
1338

1339 1340 1341
	page_offset = page_size * (data_offset / page_size);
	file_offset = page_offset;
	head = data_offset - page_offset;
1342

1343
	if (data_size && (data_offset + data_size < file_size))
1344 1345
		file_size = data_offset + data_size;

1346
	ui_progress__init(&prog, file_size, "Processing events...");
1347

1348
	mmap_size = MMAP_SIZE;
1349
	if (mmap_size > file_size) {
1350
		mmap_size = file_size;
1351 1352
		session->one_mmap = true;
	}
1353

1354 1355
	memset(mmaps, 0, sizeof(mmaps));

1356 1357 1358
	mmap_prot  = PROT_READ;
	mmap_flags = MAP_SHARED;

1359
	if (session->header.needs_swap) {
1360 1361 1362
		mmap_prot  |= PROT_WRITE;
		mmap_flags = MAP_PRIVATE;
	}
1363
remap:
1364
	buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, fd,
1365
		   file_offset);
1366 1367 1368 1369 1370
	if (buf == MAP_FAILED) {
		pr_err("failed to mmap file\n");
		err = -errno;
		goto out_err;
	}
1371 1372
	mmaps[map_idx] = buf;
	map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
1373
	file_pos = file_offset + head;
1374 1375 1376 1377
	if (session->one_mmap) {
		session->one_mmap_addr = buf;
		session->one_mmap_offset = file_offset;
	}
1378 1379

more:
1380 1381
	event = fetch_mmaped_event(session, head, mmap_size, buf);
	if (!event) {
1382 1383 1384 1385
		if (mmaps[map_idx]) {
			munmap(mmaps[map_idx], mmap_size);
			mmaps[map_idx] = NULL;
		}
1386

1387 1388 1389
		page_offset = page_size * (head / page_size);
		file_offset += page_offset;
		head -= page_offset;
1390 1391 1392 1393 1394
		goto remap;
	}

	size = event->header.size;

1395
	if (size < sizeof(struct perf_event_header) ||
1396
	    (skip = perf_session__process_event(session, event, file_pos)) < 0) {
1397 1398 1399 1400 1401
		pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
		       file_offset + head, event->header.size,
		       event->header.type);
		err = -EINVAL;
		goto out_err;
1402 1403
	}

1404 1405 1406
	if (skip)
		size += skip;

1407
	head += size;
1408
	file_pos += size;
1409

1410
	ui_progress__update(&prog, size);
1411

1412
	if (session_done())
1413
		goto out;
1414

1415
	if (file_pos < file_size)
1416
		goto more;
1417

1418
out:
1419
	/* do the final flush for ordered samples */
1420
	err = ordered_events__flush(oe, OE_FLUSH__FINAL);
1421
out_err:
N
Namhyung Kim 已提交
1422
	ui_progress__finish();
1423
	perf_tool__warn_about_errors(tool, &session->evlist->stats);
1424
	ordered_events__free(&session->ordered_events);
1425
	session->one_mmap = false;
1426 1427
	return err;
}
1428

1429
int perf_session__process_events(struct perf_session *session)
1430
{
1431
	u64 size = perf_data_file__size(session->file);
1432 1433
	int err;

1434
	if (perf_session__register_idle_thread(session) == NULL)
1435 1436
		return -ENOMEM;

1437 1438 1439
	if (!perf_data_file__is_pipe(session->file))
		err = __perf_session__process_events(session,
						     session->header.data_offset,
1440
						     session->header.data_size, size);
1441
	else
1442
		err = __perf_session__process_pipe_events(session);
1443

1444 1445 1446
	return err;
}

1447
bool perf_session__has_traces(struct perf_session *session, const char *msg)
1448
{
1449 1450
	struct perf_evsel *evsel;

1451
	evlist__for_each(session->evlist, evsel) {
1452 1453
		if (evsel->attr.type == PERF_TYPE_TRACEPOINT)
			return true;
1454 1455
	}

1456 1457
	pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
	return false;
1458
}
1459

1460 1461
int maps__set_kallsyms_ref_reloc_sym(struct map **maps,
				     const char *symbol_name, u64 addr)
1462 1463
{
	char *bracket;
1464
	enum map_type i;
1465 1466 1467 1468 1469
	struct ref_reloc_sym *ref;

	ref = zalloc(sizeof(struct ref_reloc_sym));
	if (ref == NULL)
		return -ENOMEM;
1470

1471 1472 1473
	ref->name = strdup(symbol_name);
	if (ref->name == NULL) {
		free(ref);
1474
		return -ENOMEM;
1475
	}
1476

1477
	bracket = strchr(ref->name, ']');
1478 1479 1480
	if (bracket)
		*bracket = '\0';

1481
	ref->addr = addr;
1482 1483

	for (i = 0; i < MAP__NR_TYPES; ++i) {
1484 1485
		struct kmap *kmap = map__kmap(maps[i]);
		kmap->ref_reloc_sym = ref;
1486 1487
	}

1488 1489
	return 0;
}
1490

1491
size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
1492
{
1493
	return machines__fprintf_dsos(&session->machines, fp);
1494
}
1495

1496
size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
1497
					  bool (skip)(struct dso *dso, int parm), int parm)
1498
{
1499
	return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
1500
}
1501 1502 1503 1504 1505

size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
{
	size_t ret = fprintf(fp, "Aggregated stats:\n");

1506
	ret += events_stats__fprintf(&session->evlist->stats, fp);
1507 1508
	return ret;
}
1509

1510 1511 1512 1513 1514 1515
size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
{
	/*
	 * FIXME: Here we have to actually print all the machines in this
	 * session, not just the host...
	 */
1516
	return machine__fprintf(&session->machines.host, fp);
1517 1518
}

1519 1520 1521 1522 1523
struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
					      unsigned int type)
{
	struct perf_evsel *pos;

1524
	evlist__for_each(session->evlist, pos) {
1525 1526 1527 1528 1529 1530
		if (pos->attr.type == type)
			return pos;
	}
	return NULL;
}

1531
void perf_evsel__print_ip(struct perf_evsel *evsel, struct perf_sample *sample,
1532
			  struct addr_location *al,
1533
			  unsigned int print_opts, unsigned int stack_depth)
1534 1535
{
	struct callchain_cursor_node *node;
1536 1537 1538 1539
	int print_ip = print_opts & PRINT_IP_OPT_IP;
	int print_sym = print_opts & PRINT_IP_OPT_SYM;
	int print_dso = print_opts & PRINT_IP_OPT_DSO;
	int print_symoffset = print_opts & PRINT_IP_OPT_SYMOFFSET;
1540
	int print_oneline = print_opts & PRINT_IP_OPT_ONELINE;
1541
	int print_srcline = print_opts & PRINT_IP_OPT_SRCLINE;
1542
	char s = print_oneline ? ' ' : '\t';
1543 1544

	if (symbol_conf.use_callchain && sample->callchain) {
1545
		struct addr_location node_al;
1546

1547 1548 1549
		if (thread__resolve_callchain(al->thread, evsel,
					      sample, NULL, NULL,
					      PERF_MAX_STACK_DEPTH) != 0) {
1550 1551 1552 1553
			if (verbose)
				error("Failed to resolve callchain. Skipping\n");
			return;
		}
1554
		callchain_cursor_commit(&callchain_cursor);
1555

1556 1557 1558
		if (print_symoffset)
			node_al = *al;

1559
		while (stack_depth) {
1560 1561
			u64 addr = 0;

1562
			node = callchain_cursor_current(&callchain_cursor);
1563 1564 1565
			if (!node)
				break;

1566 1567 1568
			if (node->sym && node->sym->ignore)
				goto next;

1569
			if (print_ip)
1570
				printf("%c%16" PRIx64, s, node->ip);
1571

1572 1573 1574
			if (node->map)
				addr = node->map->map_ip(node->map, node->ip);

1575
			if (print_sym) {
1576
				printf(" ");
1577
				if (print_symoffset) {
1578
					node_al.addr = addr;
1579 1580
					node_al.map  = node->map;
					symbol__fprintf_symname_offs(node->sym, &node_al, stdout);
1581 1582
				} else
					symbol__fprintf_symname(node->sym, stdout);
1583
			}
1584

1585
			if (print_dso) {
1586
				printf(" (");
1587
				map__fprintf_dsoname(node->map, stdout);
1588
				printf(")");
1589
			}
1590

1591 1592 1593 1594
			if (print_srcline)
				map__fprintf_srcline(node->map, addr, "\n  ",
						     stdout);

1595 1596
			if (!print_oneline)
				printf("\n");
1597

1598
			stack_depth--;
1599 1600
next:
			callchain_cursor_advance(&callchain_cursor);
1601 1602 1603
		}

	} else {
1604
		if (al->sym && al->sym->ignore)
1605 1606
			return;

1607 1608 1609
		if (print_ip)
			printf("%16" PRIx64, sample->ip);

1610
		if (print_sym) {
1611
			printf(" ");
1612
			if (print_symoffset)
1613
				symbol__fprintf_symname_offs(al->sym, al,
1614 1615
							     stdout);
			else
1616
				symbol__fprintf_symname(al->sym, stdout);
1617 1618 1619
		}

		if (print_dso) {
1620
			printf(" (");
1621
			map__fprintf_dsoname(al->map, stdout);
1622
			printf(")");
1623
		}
1624 1625 1626

		if (print_srcline)
			map__fprintf_srcline(al->map, al->addr, "\n  ", stdout);
1627 1628
	}
}
1629 1630 1631 1632

int perf_session__cpu_bitmap(struct perf_session *session,
			     const char *cpu_list, unsigned long *cpu_bitmap)
{
1633
	int i, err = -1;
1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650
	struct cpu_map *map;

	for (i = 0; i < PERF_TYPE_MAX; ++i) {
		struct perf_evsel *evsel;

		evsel = perf_session__find_first_evtype(session, i);
		if (!evsel)
			continue;

		if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) {
			pr_err("File does not contain CPU events. "
			       "Remove -c option to proceed.\n");
			return -1;
		}
	}

	map = cpu_map__new(cpu_list);
1651 1652 1653 1654
	if (map == NULL) {
		pr_err("Invalid cpu_list\n");
		return -1;
	}
1655 1656 1657 1658 1659 1660 1661

	for (i = 0; i < map->nr; i++) {
		int cpu = map->map[i];

		if (cpu >= MAX_NR_CPUS) {
			pr_err("Requested CPU %d too large. "
			       "Consider raising MAX_NR_CPUS\n", cpu);
1662
			goto out_delete_map;
1663 1664 1665 1666 1667
		}

		set_bit(cpu, cpu_bitmap);
	}

1668 1669 1670 1671 1672
	err = 0;

out_delete_map:
	cpu_map__delete(map);
	return err;
1673
}
1674 1675 1676 1677 1678

void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
				bool full)
{
	struct stat st;
1679
	int fd, ret;
1680 1681 1682 1683

	if (session == NULL || fp == NULL)
		return;

1684 1685
	fd = perf_data_file__fd(session->file);

1686
	ret = fstat(fd, &st);
1687 1688 1689 1690 1691 1692 1693 1694
	if (ret == -1)
		return;

	fprintf(fp, "# ========\n");
	fprintf(fp, "# captured on: %s", ctime(&st.st_ctime));
	perf_header__fprintf_info(session, fp, full);
	fprintf(fp, "# ========\n#\n");
}
1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705


int __perf_session__set_tracepoints_handlers(struct perf_session *session,
					     const struct perf_evsel_str_handler *assocs,
					     size_t nr_assocs)
{
	struct perf_evsel *evsel;
	size_t i;
	int err;

	for (i = 0; i < nr_assocs; i++) {
1706 1707 1708 1709 1710
		/*
		 * Adding a handler for an event not in the session,
		 * just ignore it.
		 */
		evsel = perf_evlist__find_tracepoint_by_name(session->evlist, assocs[i].name);
1711
		if (evsel == NULL)
1712
			continue;
1713 1714

		err = -EEXIST;
1715
		if (evsel->handler != NULL)
1716
			goto out;
1717
		evsel->handler = assocs[i].handler;
1718 1719 1720 1721 1722 1723
	}

	err = 0;
out:
	return err;
}
A
Adrian Hunter 已提交
1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777

int perf_event__process_id_index(struct perf_tool *tool __maybe_unused,
				 union perf_event *event,
				 struct perf_session *session)
{
	struct perf_evlist *evlist = session->evlist;
	struct id_index_event *ie = &event->id_index;
	size_t i, nr, max_nr;

	max_nr = (ie->header.size - sizeof(struct id_index_event)) /
		 sizeof(struct id_index_entry);
	nr = ie->nr;
	if (nr > max_nr)
		return -EINVAL;

	if (dump_trace)
		fprintf(stdout, " nr: %zu\n", nr);

	for (i = 0; i < nr; i++) {
		struct id_index_entry *e = &ie->entries[i];
		struct perf_sample_id *sid;

		if (dump_trace) {
			fprintf(stdout,	" ... id: %"PRIu64, e->id);
			fprintf(stdout,	"  idx: %"PRIu64, e->idx);
			fprintf(stdout,	"  cpu: %"PRId64, e->cpu);
			fprintf(stdout,	"  tid: %"PRId64"\n", e->tid);
		}

		sid = perf_evlist__id2sid(evlist, e->id);
		if (!sid)
			return -ENOENT;
		sid->idx = e->idx;
		sid->cpu = e->cpu;
		sid->tid = e->tid;
	}
	return 0;
}

int perf_event__synthesize_id_index(struct perf_tool *tool,
				    perf_event__handler_t process,
				    struct perf_evlist *evlist,
				    struct machine *machine)
{
	union perf_event *ev;
	struct perf_evsel *evsel;
	size_t nr = 0, i = 0, sz, max_nr, n;
	int err;

	pr_debug2("Synthesizing id index\n");

	max_nr = (UINT16_MAX - sizeof(struct id_index_event)) /
		 sizeof(struct id_index_entry);

1778
	evlist__for_each(evlist, evsel)
A
Adrian Hunter 已提交
1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790
		nr += evsel->ids;

	n = nr > max_nr ? max_nr : nr;
	sz = sizeof(struct id_index_event) + n * sizeof(struct id_index_entry);
	ev = zalloc(sz);
	if (!ev)
		return -ENOMEM;

	ev->id_index.header.type = PERF_RECORD_ID_INDEX;
	ev->id_index.header.size = sz;
	ev->id_index.nr = n;

1791
	evlist__for_each(evlist, evsel) {
A
Adrian Hunter 已提交
1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831
		u32 j;

		for (j = 0; j < evsel->ids; j++) {
			struct id_index_entry *e;
			struct perf_sample_id *sid;

			if (i >= n) {
				err = process(tool, ev, NULL, machine);
				if (err)
					goto out_err;
				nr -= n;
				i = 0;
			}

			e = &ev->id_index.entries[i++];

			e->id = evsel->id[j];

			sid = perf_evlist__id2sid(evlist, e->id);
			if (!sid) {
				free(ev);
				return -ENOENT;
			}

			e->idx = sid->idx;
			e->cpu = sid->cpu;
			e->tid = sid->tid;
		}
	}

	sz = sizeof(struct id_index_event) + nr * sizeof(struct id_index_entry);
	ev->id_index.header.size = sz;
	ev->id_index.nr = nr;

	err = process(tool, ev, NULL, machine);
out_err:
	free(ev);

	return err;
}