event.c 23.4 KB
Newer Older
1
#include <linux/types.h>
2
#include <sys/mman.h>
3 4
#include "event.h"
#include "debug.h"
5
#include "hist.h"
6
#include "machine.h"
7
#include "sort.h"
8
#include "string.h"
9
#include "strlist.h"
10
#include "thread.h"
11
#include "thread_map.h"
12
#include "symbol/kallsyms.h"
13

14
static const char *perf_event__names[] = {
15 16
	[0]					= "TOTAL",
	[PERF_RECORD_MMAP]			= "MMAP",
17
	[PERF_RECORD_MMAP2]			= "MMAP2",
18 19 20 21 22 23 24 25 26 27 28 29 30
	[PERF_RECORD_LOST]			= "LOST",
	[PERF_RECORD_COMM]			= "COMM",
	[PERF_RECORD_EXIT]			= "EXIT",
	[PERF_RECORD_THROTTLE]			= "THROTTLE",
	[PERF_RECORD_UNTHROTTLE]		= "UNTHROTTLE",
	[PERF_RECORD_FORK]			= "FORK",
	[PERF_RECORD_READ]			= "READ",
	[PERF_RECORD_SAMPLE]			= "SAMPLE",
	[PERF_RECORD_HEADER_ATTR]		= "ATTR",
	[PERF_RECORD_HEADER_EVENT_TYPE]		= "EVENT_TYPE",
	[PERF_RECORD_HEADER_TRACING_DATA]	= "TRACING_DATA",
	[PERF_RECORD_HEADER_BUILD_ID]		= "BUILD_ID",
	[PERF_RECORD_FINISHED_ROUND]		= "FINISHED_ROUND",
A
Adrian Hunter 已提交
31
	[PERF_RECORD_ID_INDEX]			= "ID_INDEX",
32 33
};

34
const char *perf_event__name(unsigned int id)
35
{
36
	if (id >= ARRAY_SIZE(perf_event__names))
37
		return "INVALID";
38
	if (!perf_event__names[id])
39
		return "UNKNOWN";
40
	return perf_event__names[id];
41 42
}

43
static struct perf_sample synth_sample = {
44 45 46 47 48 49 50 51
	.pid	   = -1,
	.tid	   = -1,
	.time	   = -1,
	.stream_id = -1,
	.cpu	   = -1,
	.period	   = 1,
};

52
static pid_t perf_event__get_comm_tgid(pid_t pid, char *comm, size_t len)
53 54 55 56 57
{
	char filename[PATH_MAX];
	char bf[BUFSIZ];
	FILE *fp;
	size_t size = 0;
58
	pid_t tgid = -1;
59 60 61 62 63 64 65 66 67

	snprintf(filename, sizeof(filename), "/proc/%d/status", pid);

	fp = fopen(filename, "r");
	if (fp == NULL) {
		pr_debug("couldn't open %s\n", filename);
		return 0;
	}

68
	while (!comm[0] || (tgid < 0)) {
69
		if (fgets(bf, sizeof(bf), fp) == NULL) {
70 71 72
			pr_warning("couldn't get COMM and pgid, malformed %s\n",
				   filename);
			break;
73
		}
74 75 76 77 78 79

		if (memcmp(bf, "Name:", 5) == 0) {
			char *name = bf + 5;
			while (*name && isspace(*name))
				++name;
			size = strlen(name) - 1;
80 81 82
			if (size >= len)
				size = len - 1;
			memcpy(comm, name, size);
83
			comm[size] = '\0';
84

85 86 87 88
		} else if (memcmp(bf, "Tgid:", 5) == 0) {
			char *tgids = bf + 5;
			while (*tgids && isspace(*tgids))
				++tgids;
89
			tgid = atoi(tgids);
90 91 92
		}
	}

93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
	fclose(fp);

	return tgid;
}

static pid_t perf_event__synthesize_comm(struct perf_tool *tool,
					 union perf_event *event, pid_t pid,
					 perf_event__handler_t process,
					 struct machine *machine)
{
	size_t size;
	pid_t tgid;

	memset(&event->comm, 0, sizeof(event->comm));

108 109 110 111 112 113
	if (machine__is_host(machine))
		tgid = perf_event__get_comm_tgid(pid, event->comm.comm,
						 sizeof(event->comm.comm));
	else
		tgid = machine->pid;

114 115 116 117
	if (tgid < 0)
		goto out;

	event->comm.pid = tgid;
118
	event->comm.header.type = PERF_RECORD_COMM;
119 120

	size = strlen(event->comm.comm) + 1;
121
	size = PERF_ALIGN(size, sizeof(u64));
122
	memset(event->comm.comm + size, 0, machine->id_hdr_size);
123 124
	event->comm.header.size = (sizeof(event->comm) -
				(sizeof(event->comm.comm) - size) +
125
				machine->id_hdr_size);
126
	event->comm.tid = pid;
127

128 129
	if (process(tool, event, &synth_sample, machine) != 0)
		return -1;
130

131 132
out:
	return tgid;
133 134
}

135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156
static int perf_event__synthesize_fork(struct perf_tool *tool,
				       union perf_event *event, pid_t pid,
				       pid_t tgid, perf_event__handler_t process,
				       struct machine *machine)
{
	memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);

	/* this is really a clone event but we use fork to synthesize it */
	event->fork.ppid = tgid;
	event->fork.ptid = tgid;
	event->fork.pid  = tgid;
	event->fork.tid  = pid;
	event->fork.header.type = PERF_RECORD_FORK;

	event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);

	if (process(tool, event, &synth_sample, machine) != 0)
		return -1;

	return 0;
}

157 158 159 160 161 162
int perf_event__synthesize_mmap_events(struct perf_tool *tool,
				       union perf_event *event,
				       pid_t pid, pid_t tgid,
				       perf_event__handler_t process,
				       struct machine *machine,
				       bool mmap_data)
163 164 165
{
	char filename[PATH_MAX];
	FILE *fp;
166
	int rc = 0;
167

168 169 170
	if (machine__is_default_guest(machine))
		return 0;

171 172
	snprintf(filename, sizeof(filename), "%s/proc/%d/maps",
		 machine->root_dir, pid);
173 174 175 176 177 178 179 180 181 182

	fp = fopen(filename, "r");
	if (fp == NULL) {
		/*
		 * We raced with a task exiting - just return:
		 */
		pr_debug("couldn't open %s\n", filename);
		return -1;
	}

183
	event->header.type = PERF_RECORD_MMAP2;
184

185
	while (1) {
186 187 188 189
		char bf[BUFSIZ];
		char prot[5];
		char execname[PATH_MAX];
		char anonstr[] = "//anon";
190
		unsigned int ino;
191
		size_t size;
192
		ssize_t n;
193

194 195 196
		if (fgets(bf, sizeof(bf), fp) == NULL)
			break;

197 198 199
		/* ensure null termination since stack will be reused. */
		strcpy(execname, "");

200
		/* 00400000-0040c000 r-xp 00000000 fd:01 41038  /bin/cat */
201 202 203 204 205 206
		n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %s\n",
		       &event->mmap2.start, &event->mmap2.len, prot,
		       &event->mmap2.pgoff, &event->mmap2.maj,
		       &event->mmap2.min,
		       &ino, execname);

207 208 209
		/*
 		 * Anon maps don't have the execname.
 		 */
210
		if (n < 7)
211
			continue;
212 213 214

		event->mmap2.ino = (u64)ino;

215 216 217
		/*
		 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
		 */
218 219 220 221
		if (machine__is_host(machine))
			event->header.misc = PERF_RECORD_MISC_USER;
		else
			event->header.misc = PERF_RECORD_MISC_GUEST_USER;
222

223 224 225 226 227 228 229 230 231 232 233 234 235 236 237
		/* map protection and flags bits */
		event->mmap2.prot = 0;
		event->mmap2.flags = 0;
		if (prot[0] == 'r')
			event->mmap2.prot |= PROT_READ;
		if (prot[1] == 'w')
			event->mmap2.prot |= PROT_WRITE;
		if (prot[2] == 'x')
			event->mmap2.prot |= PROT_EXEC;

		if (prot[3] == 's')
			event->mmap2.flags |= MAP_SHARED;
		else
			event->mmap2.flags |= MAP_PRIVATE;

238 239 240 241 242 243
		if (prot[2] != 'x') {
			if (!mmap_data || prot[0] != 'r')
				continue;

			event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
		}
244 245 246 247 248

		if (!strcmp(execname, ""))
			strcpy(execname, anonstr);

		size = strlen(execname) + 1;
249
		memcpy(event->mmap2.filename, execname, size);
250
		size = PERF_ALIGN(size, sizeof(u64));
251 252 253 254 255 256 257
		event->mmap2.len -= event->mmap.start;
		event->mmap2.header.size = (sizeof(event->mmap2) -
					(sizeof(event->mmap2.filename) - size));
		memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
		event->mmap2.header.size += machine->id_hdr_size;
		event->mmap2.pid = tgid;
		event->mmap2.tid = pid;
258 259 260 261

		if (process(tool, event, &synth_sample, machine) != 0) {
			rc = -1;
			break;
262 263 264 265
		}
	}

	fclose(fp);
266
	return rc;
267 268
}

269
int perf_event__synthesize_modules(struct perf_tool *tool,
270
				   perf_event__handler_t process,
271
				   struct machine *machine)
272
{
273
	int rc = 0;
274
	struct rb_node *nd;
275
	struct map_groups *kmaps = &machine->kmaps;
276
	union perf_event *event = zalloc((sizeof(event->mmap) +
277
					  machine->id_hdr_size));
278 279 280 281 282 283 284
	if (event == NULL) {
		pr_debug("Not enough memory synthesizing mmap event "
			 "for kernel modules\n");
		return -1;
	}

	event->header.type = PERF_RECORD_MMAP;
285

286 287 288 289
	/*
	 * kernel uses 0 for user space maps, see kernel/perf_event.c
	 * __perf_event_mmap
	 */
290
	if (machine__is_host(machine))
291
		event->header.misc = PERF_RECORD_MISC_KERNEL;
292
	else
293
		event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
294 295

	for (nd = rb_first(&kmaps->maps[MAP__FUNCTION]);
296 297 298 299 300 301 302
	     nd; nd = rb_next(nd)) {
		size_t size;
		struct map *pos = rb_entry(nd, struct map, rb_node);

		if (pos->dso->kernel)
			continue;

303
		size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
304 305 306
		event->mmap.header.type = PERF_RECORD_MMAP;
		event->mmap.header.size = (sizeof(event->mmap) -
				        (sizeof(event->mmap.filename) - size));
307 308
		memset(event->mmap.filename + size, 0, machine->id_hdr_size);
		event->mmap.header.size += machine->id_hdr_size;
309 310 311 312 313
		event->mmap.start = pos->start;
		event->mmap.len   = pos->end - pos->start;
		event->mmap.pid   = machine->pid;

		memcpy(event->mmap.filename, pos->dso->long_name,
314
		       pos->dso->long_name_len + 1);
315 316 317 318
		if (process(tool, event, &synth_sample, machine) != 0) {
			rc = -1;
			break;
		}
319 320
	}

321
	free(event);
322
	return rc;
323 324
}

325 326
static int __event__synthesize_thread(union perf_event *comm_event,
				      union perf_event *mmap_event,
327
				      union perf_event *fork_event,
328 329
				      pid_t pid, int full,
					  perf_event__handler_t process,
330
				      struct perf_tool *tool,
331
				      struct machine *machine, bool mmap_data)
332
{
333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375
	char filename[PATH_MAX];
	DIR *tasks;
	struct dirent dirent, *next;
	pid_t tgid;

	/* special case: only send one comm event using passed in pid */
	if (!full) {
		tgid = perf_event__synthesize_comm(tool, comm_event, pid,
						   process, machine);

		if (tgid == -1)
			return -1;

		return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
							  process, machine, mmap_data);
	}

	if (machine__is_default_guest(machine))
		return 0;

	snprintf(filename, sizeof(filename), "%s/proc/%d/task",
		 machine->root_dir, pid);

	tasks = opendir(filename);
	if (tasks == NULL) {
		pr_debug("couldn't open %s\n", filename);
		return 0;
	}

	while (!readdir_r(tasks, &dirent, &next) && next) {
		char *end;
		int rc = 0;
		pid_t _pid;

		_pid = strtol(dirent.d_name, &end, 10);
		if (*end)
			continue;

		tgid = perf_event__synthesize_comm(tool, comm_event, _pid,
						   process, machine);
		if (tgid == -1)
			return -1;

376 377 378 379 380 381 382 383 384
		if (_pid == pid) {
			/* process the parent's maps too */
			rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
						process, machine, mmap_data);
		} else {
			/* only fork the tid's map, to save time */
			rc = perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
						 process, machine);
		}
385 386 387 388 389 390 391

		if (rc)
			return rc;
	}

	closedir(tasks);
	return 0;
392 393
}

394
int perf_event__synthesize_thread_map(struct perf_tool *tool,
395
				      struct thread_map *threads,
396
				      perf_event__handler_t process,
397 398
				      struct machine *machine,
				      bool mmap_data)
399
{
400
	union perf_event *comm_event, *mmap_event, *fork_event;
401
	int err = -1, thread, j;
402

403
	comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
404 405 406
	if (comm_event == NULL)
		goto out;

407
	mmap_event = malloc(sizeof(mmap_event->mmap) + machine->id_hdr_size);
408 409 410
	if (mmap_event == NULL)
		goto out_free_comm;

411 412 413 414
	fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
	if (fork_event == NULL)
		goto out_free_mmap;

415 416 417
	err = 0;
	for (thread = 0; thread < threads->nr; ++thread) {
		if (__event__synthesize_thread(comm_event, mmap_event,
418
					       fork_event,
419
					       threads->map[thread], 0,
420 421
					       process, tool, machine,
					       mmap_data)) {
422 423 424
			err = -1;
			break;
		}
425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442

		/*
		 * comm.pid is set to thread group id by
		 * perf_event__synthesize_comm
		 */
		if ((int) comm_event->comm.pid != threads->map[thread]) {
			bool need_leader = true;

			/* is thread group leader in thread_map? */
			for (j = 0; j < threads->nr; ++j) {
				if ((int) comm_event->comm.pid == threads->map[j]) {
					need_leader = false;
					break;
				}
			}

			/* if not, generate events for it */
			if (need_leader &&
443
			    __event__synthesize_thread(comm_event, mmap_event,
444
						       fork_event,
445 446 447
						       comm_event->comm.pid, 0,
						       process, tool, machine,
						       mmap_data)) {
448 449 450 451
				err = -1;
				break;
			}
		}
452
	}
453 454
	free(fork_event);
out_free_mmap:
455 456 457 458 459 460 461
	free(mmap_event);
out_free_comm:
	free(comm_event);
out:
	return err;
}

462
int perf_event__synthesize_threads(struct perf_tool *tool,
463
				   perf_event__handler_t process,
464
				   struct machine *machine, bool mmap_data)
465 466
{
	DIR *proc;
467
	char proc_path[PATH_MAX];
468
	struct dirent dirent, *next;
469
	union perf_event *comm_event, *mmap_event, *fork_event;
470 471
	int err = -1;

472 473 474
	if (machine__is_default_guest(machine))
		return 0;

475
	comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
476 477 478
	if (comm_event == NULL)
		goto out;

479
	mmap_event = malloc(sizeof(mmap_event->mmap) + machine->id_hdr_size);
480 481
	if (mmap_event == NULL)
		goto out_free_comm;
482

483 484 485 486
	fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
	if (fork_event == NULL)
		goto out_free_mmap;

487 488 489
	snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
	proc = opendir(proc_path);

490
	if (proc == NULL)
491
		goto out_free_fork;
492 493 494 495 496 497 498

	while (!readdir_r(proc, &dirent, &next) && next) {
		char *end;
		pid_t pid = strtol(dirent.d_name, &end, 10);

		if (*end) /* only interested in proper numerical dirents */
			continue;
499 500 501 502
		/*
 		 * We may race with exiting thread, so don't stop just because
 		 * one thread couldn't be synthesized.
 		 */
503 504
		__event__synthesize_thread(comm_event, mmap_event, fork_event, pid,
					   1, process, tool, machine, mmap_data);
505 506
	}

507
	err = 0;
508
	closedir(proc);
509 510
out_free_fork:
	free(fork_event);
511 512 513 514 515 516
out_free_mmap:
	free(mmap_event);
out_free_comm:
	free(comm_event);
out:
	return err;
517
}
518

519 520 521 522 523
struct process_symbol_args {
	const char *name;
	u64	   start;
};

524
static int find_symbol_cb(void *arg, const char *name, char type,
525
			  u64 start)
526 527 528
{
	struct process_symbol_args *args = arg;

529 530 531 532 533 534
	/*
	 * Must be a function or at least an alias, as in PARISC64, where "_text" is
	 * an 'A' to the same address as "_stext".
	 */
	if (!(symbol_type__is_a(type, MAP__FUNCTION) ||
	      type == 'A') || strcmp(name, args->name))
535 536 537 538 539 540
		return 0;

	args->start = start;
	return 1;
}

541 542 543 544 545 546 547 548 549 550 551
u64 kallsyms__get_function_start(const char *kallsyms_filename,
				 const char *symbol_name)
{
	struct process_symbol_args args = { .name = symbol_name, };

	if (kallsyms__parse(kallsyms_filename, &args, find_symbol_cb) <= 0)
		return 0;

	return args.start;
}

552
int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
553
				       perf_event__handler_t process,
554
				       struct machine *machine)
555 556
{
	size_t size;
557
	const char *mmap_name;
558 559
	char name_buff[PATH_MAX];
	struct map *map;
560
	struct kmap *kmap;
561
	int err;
562 563 564 565 566
	union perf_event *event;

	if (machine->vmlinux_maps[0] == NULL)
		return -1;

567 568 569 570 571
	/*
	 * We should get this from /sys/kernel/sections/.text, but till that is
	 * available use this, and after it is use this as a fallback for older
	 * kernels.
	 */
572
	event = zalloc((sizeof(event->mmap) + machine->id_hdr_size));
573 574 575 576 577
	if (event == NULL) {
		pr_debug("Not enough memory synthesizing mmap event "
			 "for kernel modules\n");
		return -1;
	}
578

579
	mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff));
580
	if (machine__is_host(machine)) {
581 582 583 584
		/*
		 * kernel uses PERF_RECORD_MISC_USER for user space maps,
		 * see kernel/perf_event.c __perf_event_mmap
		 */
585
		event->header.misc = PERF_RECORD_MISC_KERNEL;
586
	} else {
587
		event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
588
	}
589

590
	map = machine->vmlinux_maps[MAP__FUNCTION];
591
	kmap = map__kmap(map);
592
	size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
593
			"%s%s", mmap_name, kmap->ref_reloc_sym->name) + 1;
594
	size = PERF_ALIGN(size, sizeof(u64));
595 596
	event->mmap.header.type = PERF_RECORD_MMAP;
	event->mmap.header.size = (sizeof(event->mmap) -
597
			(sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
598
	event->mmap.pgoff = kmap->ref_reloc_sym->addr;
599 600 601 602
	event->mmap.start = map->start;
	event->mmap.len   = map->end - event->mmap.start;
	event->mmap.pid   = machine->pid;

603
	err = process(tool, event, &synth_sample, machine);
604 605 606
	free(event);

	return err;
607 608
}

609 610
size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp)
{
611 612 613 614 615 616 617
	const char *s;

	if (event->header.misc & PERF_RECORD_MISC_COMM_EXEC)
		s = " exec";
	else
		s = "";

618
	return fprintf(fp, "%s: %s:%d/%d\n", s, event->comm.comm, event->comm.pid, event->comm.tid);
619 620
}

621
int perf_event__process_comm(struct perf_tool *tool __maybe_unused,
622
			     union perf_event *event,
623
			     struct perf_sample *sample,
624
			     struct machine *machine)
625
{
626
	return machine__process_comm_event(machine, event, sample);
627 628
}

629
int perf_event__process_lost(struct perf_tool *tool __maybe_unused,
630
			     union perf_event *event,
631
			     struct perf_sample *sample,
632
			     struct machine *machine)
633
{
634
	return machine__process_lost_event(machine, event, sample);
635
}
636

637 638
size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
{
639
	return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %c %s\n",
640
		       event->mmap.pid, event->mmap.tid, event->mmap.start,
641 642 643
		       event->mmap.len, event->mmap.pgoff,
		       (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x',
		       event->mmap.filename);
644 645
}

646 647 648
size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp)
{
	return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64
649
			   " %02x:%02x %"PRIu64" %"PRIu64"]: %c%c%c%c %s\n",
650 651 652 653
		       event->mmap2.pid, event->mmap2.tid, event->mmap2.start,
		       event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj,
		       event->mmap2.min, event->mmap2.ino,
		       event->mmap2.ino_generation,
654 655 656 657
		       (event->mmap2.prot & PROT_READ) ? 'r' : '-',
		       (event->mmap2.prot & PROT_WRITE) ? 'w' : '-',
		       (event->mmap2.prot & PROT_EXEC) ? 'x' : '-',
		       (event->mmap2.flags & MAP_SHARED) ? 's' : 'p',
658 659 660
		       event->mmap2.filename);
}

661
int perf_event__process_mmap(struct perf_tool *tool __maybe_unused,
662
			     union perf_event *event,
663
			     struct perf_sample *sample,
664
			     struct machine *machine)
665
{
666
	return machine__process_mmap_event(machine, event, sample);
667 668
}

669 670
int perf_event__process_mmap2(struct perf_tool *tool __maybe_unused,
			     union perf_event *event,
671
			     struct perf_sample *sample,
672 673
			     struct machine *machine)
{
674
	return machine__process_mmap2_event(machine, event, sample);
675 676
}

677 678 679 680 681 682 683
size_t perf_event__fprintf_task(union perf_event *event, FILE *fp)
{
	return fprintf(fp, "(%d:%d):(%d:%d)\n",
		       event->fork.pid, event->fork.tid,
		       event->fork.ppid, event->fork.ptid);
}

684
int perf_event__process_fork(struct perf_tool *tool __maybe_unused,
685
			     union perf_event *event,
686
			     struct perf_sample *sample,
687
			     struct machine *machine)
688
{
689
	return machine__process_fork_event(machine, event, sample);
690
}
691

692 693
int perf_event__process_exit(struct perf_tool *tool __maybe_unused,
			     union perf_event *event,
694
			     struct perf_sample *sample,
695 696
			     struct machine *machine)
{
697
	return machine__process_exit_event(machine, event, sample);
698 699
}

700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715
size_t perf_event__fprintf(union perf_event *event, FILE *fp)
{
	size_t ret = fprintf(fp, "PERF_RECORD_%s",
			     perf_event__name(event->header.type));

	switch (event->header.type) {
	case PERF_RECORD_COMM:
		ret += perf_event__fprintf_comm(event, fp);
		break;
	case PERF_RECORD_FORK:
	case PERF_RECORD_EXIT:
		ret += perf_event__fprintf_task(event, fp);
		break;
	case PERF_RECORD_MMAP:
		ret += perf_event__fprintf_mmap(event, fp);
		break;
716 717 718
	case PERF_RECORD_MMAP2:
		ret += perf_event__fprintf_mmap2(event, fp);
		break;
719 720 721 722 723 724 725
	default:
		ret += fprintf(fp, "\n");
	}

	return ret;
}

726 727
int perf_event__process(struct perf_tool *tool __maybe_unused,
			union perf_event *event,
728
			struct perf_sample *sample,
729
			struct machine *machine)
730
{
731
	return machine__process_event(machine, event, sample);
732 733
}

734
void thread__find_addr_map(struct thread *thread, u8 cpumode,
735
			   enum map_type type, u64 addr,
736
			   struct addr_location *al)
737
{
738
	struct map_groups *mg = thread->mg;
739
	struct machine *machine = mg->machine;
740
	bool load_map = false;
741

742
	al->machine = machine;
743
	al->thread = thread;
744
	al->addr = addr;
745
	al->cpumode = cpumode;
746
	al->filtered = 0;
747

748 749 750 751 752
	if (machine == NULL) {
		al->map = NULL;
		return;
	}

753
	if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
754
		al->level = 'k';
755
		mg = &machine->kmaps;
756
		load_map = true;
757
	} else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
758
		al->level = '.';
759 760
	} else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
		al->level = 'g';
761
		mg = &machine->kmaps;
762
		load_map = true;
763 764
	} else if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) {
		al->level = 'u';
765
	} else {
766
		al->level = 'H';
767
		al->map = NULL;
768 769 770 771

		if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
			cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
			!perf_guest)
772
			al->filtered |= (1 << HIST_FILTER__GUEST);
773 774 775
		if ((cpumode == PERF_RECORD_MISC_USER ||
			cpumode == PERF_RECORD_MISC_KERNEL) &&
			!perf_host)
776
			al->filtered |= (1 << HIST_FILTER__HOST);
777

778 779 780
		return;
	}
try_again:
781
	al->map = map_groups__find(mg, type, al->addr);
782 783 784 785 786 787 788 789 790 791
	if (al->map == NULL) {
		/*
		 * If this is outside of all known maps, and is a negative
		 * address, try to look it up in the kernel dso, as it might be
		 * a vsyscall or vdso (which executes in user-mode).
		 *
		 * XXX This is nasty, we should have a symbol list in the
		 * "[vdso]" dso, but for now lets use the old trick of looking
		 * in the whole kernel symbol list.
		 */
792 793 794
		if (cpumode == PERF_RECORD_MISC_USER && machine &&
		    mg != &machine->kmaps &&
		    machine__kernel_ip(machine, al->addr)) {
795
			mg = &machine->kmaps;
796
			load_map = true;
797 798
			goto try_again;
		}
799 800 801 802 803 804
	} else {
		/*
		 * Kernel maps might be changed when loading symbols so loading
		 * must be done prior to using kernel maps.
		 */
		if (load_map)
805
			map__load(al->map, machine->symbol_filter);
806
		al->addr = al->map->map_ip(al->map, al->addr);
807
	}
808 809
}

810
void thread__find_addr_location(struct thread *thread,
811
				u8 cpumode, enum map_type type, u64 addr,
812
				struct addr_location *al)
813
{
814
	thread__find_addr_map(thread, cpumode, type, addr, al);
815
	if (al->map != NULL)
816
		al->sym = map__find_symbol(al->map, al->addr,
817
					   thread->mg->machine->symbol_filter);
818 819
	else
		al->sym = NULL;
820 821
}

822
int perf_event__preprocess_sample(const union perf_event *event,
823
				  struct machine *machine,
824
				  struct addr_location *al,
825
				  struct perf_sample *sample)
826
{
827
	u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
828
	struct thread *thread = machine__findnew_thread(machine, sample->pid,
829
							sample->tid);
830

831 832 833
	if (thread == NULL)
		return -1;

834
	dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
835
	/*
836
	 * Have we already created the kernel maps for this machine?
837 838 839 840 841 842
	 *
	 * This should have happened earlier, when we processed the kernel MMAP
	 * events, but for older perf.data files there was no such thing, so do
	 * it now.
	 */
	if (cpumode == PERF_RECORD_MISC_KERNEL &&
843 844
	    machine->vmlinux_maps[MAP__FUNCTION] == NULL)
		machine__create_kernel_maps(machine);
845

846
	thread__find_addr_map(thread, cpumode, MAP__FUNCTION, sample->ip, al);
847 848 849
	dump_printf(" ...... dso: %s\n",
		    al->map ? al->map->dso->long_name :
			al->level == 'H' ? "[hypervisor]" : "<not found>");
850 851 852 853

	if (thread__is_filtered(thread))
		al->filtered |= (1 << HIST_FILTER__THREAD);

854
	al->sym = NULL;
855
	al->cpu = sample->cpu;
856 857

	if (al->map) {
858 859
		struct dso *dso = al->map->dso;

860
		if (symbol_conf.dso_list &&
861 862 863 864
		    (!dso || !(strlist__has_entry(symbol_conf.dso_list,
						  dso->short_name) ||
			       (dso->short_name != dso->long_name &&
				strlist__has_entry(symbol_conf.dso_list,
865 866 867
						   dso->long_name))))) {
			al->filtered |= (1 << HIST_FILTER__DSO);
		}
868

869 870
		al->sym = map__find_symbol(al->map, al->addr,
					   machine->symbol_filter);
871
	}
872

873 874
	if (symbol_conf.sym_list &&
		(!al->sym || !strlist__has_entry(symbol_conf.sym_list,
875 876 877
						al->sym->name))) {
		al->filtered |= (1 << HIST_FILTER__SYMBOL);
	}
878 879

	return 0;
880
}
881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909

bool is_bts_event(struct perf_event_attr *attr)
{
	return attr->type == PERF_TYPE_HARDWARE &&
	       (attr->config & PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
	       attr->sample_period == 1;
}

bool sample_addr_correlates_sym(struct perf_event_attr *attr)
{
	if (attr->type == PERF_TYPE_SOFTWARE &&
	    (attr->config == PERF_COUNT_SW_PAGE_FAULTS ||
	     attr->config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
	     attr->config == PERF_COUNT_SW_PAGE_FAULTS_MAJ))
		return true;

	if (is_bts_event(attr))
		return true;

	return false;
}

void perf_event__preprocess_sample_addr(union perf_event *event,
					struct perf_sample *sample,
					struct thread *thread,
					struct addr_location *al)
{
	u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;

910
	thread__find_addr_map(thread, cpumode, MAP__FUNCTION, sample->addr, al);
911
	if (!al->map)
912
		thread__find_addr_map(thread, cpumode, MAP__VARIABLE,
913 914 915 916 917 918 919 920
				      sample->addr, al);

	al->cpu = sample->cpu;
	al->sym = NULL;

	if (al->map)
		al->sym = map__find_symbol(al->map, al->addr, NULL);
}