event.c 22.0 KB
Newer Older
1
#include <linux/types.h>
2
#include <sys/mman.h>
3 4
#include "event.h"
#include "debug.h"
5
#include "hist.h"
6
#include "machine.h"
7
#include "sort.h"
8
#include "string.h"
9
#include "strlist.h"
10
#include "thread.h"
11
#include "thread_map.h"
12
#include "symbol/kallsyms.h"
13

14
static const char *perf_event__names[] = {
15 16
	[0]					= "TOTAL",
	[PERF_RECORD_MMAP]			= "MMAP",
17
	[PERF_RECORD_MMAP2]			= "MMAP2",
18 19 20 21 22 23 24 25 26 27 28 29 30
	[PERF_RECORD_LOST]			= "LOST",
	[PERF_RECORD_COMM]			= "COMM",
	[PERF_RECORD_EXIT]			= "EXIT",
	[PERF_RECORD_THROTTLE]			= "THROTTLE",
	[PERF_RECORD_UNTHROTTLE]		= "UNTHROTTLE",
	[PERF_RECORD_FORK]			= "FORK",
	[PERF_RECORD_READ]			= "READ",
	[PERF_RECORD_SAMPLE]			= "SAMPLE",
	[PERF_RECORD_HEADER_ATTR]		= "ATTR",
	[PERF_RECORD_HEADER_EVENT_TYPE]		= "EVENT_TYPE",
	[PERF_RECORD_HEADER_TRACING_DATA]	= "TRACING_DATA",
	[PERF_RECORD_HEADER_BUILD_ID]		= "BUILD_ID",
	[PERF_RECORD_FINISHED_ROUND]		= "FINISHED_ROUND",
31 32
};

33
const char *perf_event__name(unsigned int id)
34
{
35
	if (id >= ARRAY_SIZE(perf_event__names))
36
		return "INVALID";
37
	if (!perf_event__names[id])
38
		return "UNKNOWN";
39
	return perf_event__names[id];
40 41
}

42
static struct perf_sample synth_sample = {
43 44 45 46 47 48 49 50
	.pid	   = -1,
	.tid	   = -1,
	.time	   = -1,
	.stream_id = -1,
	.cpu	   = -1,
	.period	   = 1,
};

51
static pid_t perf_event__get_comm_tgid(pid_t pid, char *comm, size_t len)
52 53 54 55 56
{
	char filename[PATH_MAX];
	char bf[BUFSIZ];
	FILE *fp;
	size_t size = 0;
57
	pid_t tgid = -1;
58 59 60 61 62 63 64 65 66

	snprintf(filename, sizeof(filename), "/proc/%d/status", pid);

	fp = fopen(filename, "r");
	if (fp == NULL) {
		pr_debug("couldn't open %s\n", filename);
		return 0;
	}

67
	while (!comm[0] || (tgid < 0)) {
68
		if (fgets(bf, sizeof(bf), fp) == NULL) {
69 70 71
			pr_warning("couldn't get COMM and pgid, malformed %s\n",
				   filename);
			break;
72
		}
73 74 75 76 77 78

		if (memcmp(bf, "Name:", 5) == 0) {
			char *name = bf + 5;
			while (*name && isspace(*name))
				++name;
			size = strlen(name) - 1;
79 80 81
			if (size >= len)
				size = len - 1;
			memcpy(comm, name, size);
82
			comm[size] = '\0';
83

84 85 86 87
		} else if (memcmp(bf, "Tgid:", 5) == 0) {
			char *tgids = bf + 5;
			while (*tgids && isspace(*tgids))
				++tgids;
88
			tgid = atoi(tgids);
89 90 91
		}
	}

92 93 94 95 96 97 98 99 100 101 102 103 104 105 106
	fclose(fp);

	return tgid;
}

static pid_t perf_event__synthesize_comm(struct perf_tool *tool,
					 union perf_event *event, pid_t pid,
					 perf_event__handler_t process,
					 struct machine *machine)
{
	size_t size;
	pid_t tgid;

	memset(&event->comm, 0, sizeof(event->comm));

107 108 109 110 111 112
	if (machine__is_host(machine))
		tgid = perf_event__get_comm_tgid(pid, event->comm.comm,
						 sizeof(event->comm.comm));
	else
		tgid = machine->pid;

113 114 115 116
	if (tgid < 0)
		goto out;

	event->comm.pid = tgid;
117
	event->comm.header.type = PERF_RECORD_COMM;
118 119

	size = strlen(event->comm.comm) + 1;
120
	size = PERF_ALIGN(size, sizeof(u64));
121
	memset(event->comm.comm + size, 0, machine->id_hdr_size);
122 123
	event->comm.header.size = (sizeof(event->comm) -
				(sizeof(event->comm.comm) - size) +
124
				machine->id_hdr_size);
125
	event->comm.tid = pid;
126

127 128
	if (process(tool, event, &synth_sample, machine) != 0)
		return -1;
129

130 131
out:
	return tgid;
132 133
}

134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155
static int perf_event__synthesize_fork(struct perf_tool *tool,
				       union perf_event *event, pid_t pid,
				       pid_t tgid, perf_event__handler_t process,
				       struct machine *machine)
{
	memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);

	/* this is really a clone event but we use fork to synthesize it */
	event->fork.ppid = tgid;
	event->fork.ptid = tgid;
	event->fork.pid  = tgid;
	event->fork.tid  = pid;
	event->fork.header.type = PERF_RECORD_FORK;

	event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);

	if (process(tool, event, &synth_sample, machine) != 0)
		return -1;

	return 0;
}

156 157 158 159 160 161
int perf_event__synthesize_mmap_events(struct perf_tool *tool,
				       union perf_event *event,
				       pid_t pid, pid_t tgid,
				       perf_event__handler_t process,
				       struct machine *machine,
				       bool mmap_data)
162 163 164
{
	char filename[PATH_MAX];
	FILE *fp;
165
	int rc = 0;
166

167 168 169
	if (machine__is_default_guest(machine))
		return 0;

170 171
	snprintf(filename, sizeof(filename), "%s/proc/%d/maps",
		 machine->root_dir, pid);
172 173 174 175 176 177 178 179 180 181

	fp = fopen(filename, "r");
	if (fp == NULL) {
		/*
		 * We raced with a task exiting - just return:
		 */
		pr_debug("couldn't open %s\n", filename);
		return -1;
	}

182
	event->header.type = PERF_RECORD_MMAP;
183

184
	while (1) {
185 186 187 188
		char bf[BUFSIZ];
		char prot[5];
		char execname[PATH_MAX];
		char anonstr[] = "//anon";
189
		size_t size;
190
		ssize_t n;
191

192 193 194
		if (fgets(bf, sizeof(bf), fp) == NULL)
			break;

195 196 197
		/* ensure null termination since stack will be reused. */
		strcpy(execname, "");

198
		/* 00400000-0040c000 r-xp 00000000 fd:01 41038  /bin/cat */
199 200 201 202
		n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %*x:%*x %*u %s\n",
		       &event->mmap.start, &event->mmap.len, prot,
		       &event->mmap.pgoff,
		       execname);
203 204 205 206
		/*
 		 * Anon maps don't have the execname.
 		 */
		if (n < 4)
207
			continue;
208 209 210
		/*
		 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
		 */
211 212 213 214
		if (machine__is_host(machine))
			event->header.misc = PERF_RECORD_MISC_USER;
		else
			event->header.misc = PERF_RECORD_MISC_GUEST_USER;
215

216 217 218 219 220 221 222 223 224 225 226 227 228 229 230
		/* map protection and flags bits */
		event->mmap2.prot = 0;
		event->mmap2.flags = 0;
		if (prot[0] == 'r')
			event->mmap2.prot |= PROT_READ;
		if (prot[1] == 'w')
			event->mmap2.prot |= PROT_WRITE;
		if (prot[2] == 'x')
			event->mmap2.prot |= PROT_EXEC;

		if (prot[3] == 's')
			event->mmap2.flags |= MAP_SHARED;
		else
			event->mmap2.flags |= MAP_PRIVATE;

231 232 233 234 235 236
		if (prot[2] != 'x') {
			if (!mmap_data || prot[0] != 'r')
				continue;

			event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
		}
237 238 239 240 241

		if (!strcmp(execname, ""))
			strcpy(execname, anonstr);

		size = strlen(execname) + 1;
242
		memcpy(event->mmap.filename, execname, size);
243
		size = PERF_ALIGN(size, sizeof(u64));
244 245 246 247 248 249 250
		event->mmap.len -= event->mmap.start;
		event->mmap.header.size = (sizeof(event->mmap) -
					(sizeof(event->mmap.filename) - size));
		memset(event->mmap.filename + size, 0, machine->id_hdr_size);
		event->mmap.header.size += machine->id_hdr_size;
		event->mmap.pid = tgid;
		event->mmap.tid = pid;
251 252 253 254

		if (process(tool, event, &synth_sample, machine) != 0) {
			rc = -1;
			break;
255 256 257 258
		}
	}

	fclose(fp);
259
	return rc;
260 261
}

262
int perf_event__synthesize_modules(struct perf_tool *tool,
263
				   perf_event__handler_t process,
264
				   struct machine *machine)
265
{
266
	int rc = 0;
267
	struct rb_node *nd;
268
	struct map_groups *kmaps = &machine->kmaps;
269
	union perf_event *event = zalloc((sizeof(event->mmap) +
270
					  machine->id_hdr_size));
271 272 273 274 275 276 277
	if (event == NULL) {
		pr_debug("Not enough memory synthesizing mmap event "
			 "for kernel modules\n");
		return -1;
	}

	event->header.type = PERF_RECORD_MMAP;
278

279 280 281 282
	/*
	 * kernel uses 0 for user space maps, see kernel/perf_event.c
	 * __perf_event_mmap
	 */
283
	if (machine__is_host(machine))
284
		event->header.misc = PERF_RECORD_MISC_KERNEL;
285
	else
286
		event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
287 288

	for (nd = rb_first(&kmaps->maps[MAP__FUNCTION]);
289 290 291 292 293 294 295
	     nd; nd = rb_next(nd)) {
		size_t size;
		struct map *pos = rb_entry(nd, struct map, rb_node);

		if (pos->dso->kernel)
			continue;

296
		size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
297 298 299
		event->mmap.header.type = PERF_RECORD_MMAP;
		event->mmap.header.size = (sizeof(event->mmap) -
				        (sizeof(event->mmap.filename) - size));
300 301
		memset(event->mmap.filename + size, 0, machine->id_hdr_size);
		event->mmap.header.size += machine->id_hdr_size;
302 303 304 305 306
		event->mmap.start = pos->start;
		event->mmap.len   = pos->end - pos->start;
		event->mmap.pid   = machine->pid;

		memcpy(event->mmap.filename, pos->dso->long_name,
307
		       pos->dso->long_name_len + 1);
308 309 310 311
		if (process(tool, event, &synth_sample, machine) != 0) {
			rc = -1;
			break;
		}
312 313
	}

314
	free(event);
315
	return rc;
316 317
}

318 319
static int __event__synthesize_thread(union perf_event *comm_event,
				      union perf_event *mmap_event,
320
				      union perf_event *fork_event,
321 322
				      pid_t pid, int full,
					  perf_event__handler_t process,
323
				      struct perf_tool *tool,
324
				      struct machine *machine, bool mmap_data)
325
{
326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368
	char filename[PATH_MAX];
	DIR *tasks;
	struct dirent dirent, *next;
	pid_t tgid;

	/* special case: only send one comm event using passed in pid */
	if (!full) {
		tgid = perf_event__synthesize_comm(tool, comm_event, pid,
						   process, machine);

		if (tgid == -1)
			return -1;

		return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
							  process, machine, mmap_data);
	}

	if (machine__is_default_guest(machine))
		return 0;

	snprintf(filename, sizeof(filename), "%s/proc/%d/task",
		 machine->root_dir, pid);

	tasks = opendir(filename);
	if (tasks == NULL) {
		pr_debug("couldn't open %s\n", filename);
		return 0;
	}

	while (!readdir_r(tasks, &dirent, &next) && next) {
		char *end;
		int rc = 0;
		pid_t _pid;

		_pid = strtol(dirent.d_name, &end, 10);
		if (*end)
			continue;

		tgid = perf_event__synthesize_comm(tool, comm_event, _pid,
						   process, machine);
		if (tgid == -1)
			return -1;

369 370 371 372 373 374 375 376 377
		if (_pid == pid) {
			/* process the parent's maps too */
			rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
						process, machine, mmap_data);
		} else {
			/* only fork the tid's map, to save time */
			rc = perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
						 process, machine);
		}
378 379 380 381 382 383 384

		if (rc)
			return rc;
	}

	closedir(tasks);
	return 0;
385 386
}

387
int perf_event__synthesize_thread_map(struct perf_tool *tool,
388
				      struct thread_map *threads,
389
				      perf_event__handler_t process,
390 391
				      struct machine *machine,
				      bool mmap_data)
392
{
393
	union perf_event *comm_event, *mmap_event, *fork_event;
394
	int err = -1, thread, j;
395

396
	comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
397 398 399
	if (comm_event == NULL)
		goto out;

400
	mmap_event = malloc(sizeof(mmap_event->mmap) + machine->id_hdr_size);
401 402 403
	if (mmap_event == NULL)
		goto out_free_comm;

404 405 406 407
	fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
	if (fork_event == NULL)
		goto out_free_mmap;

408 409 410
	err = 0;
	for (thread = 0; thread < threads->nr; ++thread) {
		if (__event__synthesize_thread(comm_event, mmap_event,
411
					       fork_event,
412
					       threads->map[thread], 0,
413 414
					       process, tool, machine,
					       mmap_data)) {
415 416 417
			err = -1;
			break;
		}
418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435

		/*
		 * comm.pid is set to thread group id by
		 * perf_event__synthesize_comm
		 */
		if ((int) comm_event->comm.pid != threads->map[thread]) {
			bool need_leader = true;

			/* is thread group leader in thread_map? */
			for (j = 0; j < threads->nr; ++j) {
				if ((int) comm_event->comm.pid == threads->map[j]) {
					need_leader = false;
					break;
				}
			}

			/* if not, generate events for it */
			if (need_leader &&
436
			    __event__synthesize_thread(comm_event, mmap_event,
437
						       fork_event,
438 439 440
						       comm_event->comm.pid, 0,
						       process, tool, machine,
						       mmap_data)) {
441 442 443 444
				err = -1;
				break;
			}
		}
445
	}
446 447
	free(fork_event);
out_free_mmap:
448 449 450 451 452 453 454
	free(mmap_event);
out_free_comm:
	free(comm_event);
out:
	return err;
}

455
int perf_event__synthesize_threads(struct perf_tool *tool,
456
				   perf_event__handler_t process,
457
				   struct machine *machine, bool mmap_data)
458 459
{
	DIR *proc;
460
	char proc_path[PATH_MAX];
461
	struct dirent dirent, *next;
462
	union perf_event *comm_event, *mmap_event, *fork_event;
463 464
	int err = -1;

465 466 467
	if (machine__is_default_guest(machine))
		return 0;

468
	comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
469 470 471
	if (comm_event == NULL)
		goto out;

472
	mmap_event = malloc(sizeof(mmap_event->mmap) + machine->id_hdr_size);
473 474
	if (mmap_event == NULL)
		goto out_free_comm;
475

476 477 478 479
	fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
	if (fork_event == NULL)
		goto out_free_mmap;

480 481 482
	snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
	proc = opendir(proc_path);

483
	if (proc == NULL)
484
		goto out_free_fork;
485 486 487 488 489 490 491

	while (!readdir_r(proc, &dirent, &next) && next) {
		char *end;
		pid_t pid = strtol(dirent.d_name, &end, 10);

		if (*end) /* only interested in proper numerical dirents */
			continue;
492 493 494 495
		/*
 		 * We may race with exiting thread, so don't stop just because
 		 * one thread couldn't be synthesized.
 		 */
496 497
		__event__synthesize_thread(comm_event, mmap_event, fork_event, pid,
					   1, process, tool, machine, mmap_data);
498 499
	}

500
	err = 0;
501
	closedir(proc);
502 503
out_free_fork:
	free(fork_event);
504 505 506 507 508 509
out_free_mmap:
	free(mmap_event);
out_free_comm:
	free(comm_event);
out:
	return err;
510
}
511

512 513 514 515 516
struct process_symbol_args {
	const char *name;
	u64	   start;
};

517
static int find_symbol_cb(void *arg, const char *name, char type,
518
			  u64 start)
519 520 521
{
	struct process_symbol_args *args = arg;

522 523 524 525 526 527
	/*
	 * Must be a function or at least an alias, as in PARISC64, where "_text" is
	 * an 'A' to the same address as "_stext".
	 */
	if (!(symbol_type__is_a(type, MAP__FUNCTION) ||
	      type == 'A') || strcmp(name, args->name))
528 529 530 531 532 533
		return 0;

	args->start = start;
	return 1;
}

534 535 536 537 538 539 540 541 542 543 544
u64 kallsyms__get_function_start(const char *kallsyms_filename,
				 const char *symbol_name)
{
	struct process_symbol_args args = { .name = symbol_name, };

	if (kallsyms__parse(kallsyms_filename, &args, find_symbol_cb) <= 0)
		return 0;

	return args.start;
}

545
int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
546
				       perf_event__handler_t process,
547
				       struct machine *machine)
548 549
{
	size_t size;
550
	const char *mmap_name;
551 552
	char name_buff[PATH_MAX];
	struct map *map;
553
	struct kmap *kmap;
554
	int err;
555 556 557 558 559
	/*
	 * We should get this from /sys/kernel/sections/.text, but till that is
	 * available use this, and after it is use this as a fallback for older
	 * kernels.
	 */
560
	union perf_event *event = zalloc((sizeof(event->mmap) +
561
					  machine->id_hdr_size));
562 563 564 565 566
	if (event == NULL) {
		pr_debug("Not enough memory synthesizing mmap event "
			 "for kernel modules\n");
		return -1;
	}
567

568
	mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff));
569
	if (machine__is_host(machine)) {
570 571 572 573
		/*
		 * kernel uses PERF_RECORD_MISC_USER for user space maps,
		 * see kernel/perf_event.c __perf_event_mmap
		 */
574
		event->header.misc = PERF_RECORD_MISC_KERNEL;
575
	} else {
576
		event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
577
	}
578

579
	map = machine->vmlinux_maps[MAP__FUNCTION];
580
	kmap = map__kmap(map);
581
	size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
582
			"%s%s", mmap_name, kmap->ref_reloc_sym->name) + 1;
583
	size = PERF_ALIGN(size, sizeof(u64));
584 585
	event->mmap.header.type = PERF_RECORD_MMAP;
	event->mmap.header.size = (sizeof(event->mmap) -
586
			(sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
587
	event->mmap.pgoff = kmap->ref_reloc_sym->addr;
588 589 590 591
	event->mmap.start = map->start;
	event->mmap.len   = map->end - event->mmap.start;
	event->mmap.pid   = machine->pid;

592
	err = process(tool, event, &synth_sample, machine);
593 594 595
	free(event);

	return err;
596 597
}

598 599 600 601 602
size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp)
{
	return fprintf(fp, ": %s:%d\n", event->comm.comm, event->comm.tid);
}

603
int perf_event__process_comm(struct perf_tool *tool __maybe_unused,
604
			     union perf_event *event,
605
			     struct perf_sample *sample,
606
			     struct machine *machine)
607
{
608
	return machine__process_comm_event(machine, event, sample);
609 610
}

611
int perf_event__process_lost(struct perf_tool *tool __maybe_unused,
612
			     union perf_event *event,
613
			     struct perf_sample *sample,
614
			     struct machine *machine)
615
{
616
	return machine__process_lost_event(machine, event, sample);
617
}
618

619 620
size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
{
621
	return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %c %s\n",
622
		       event->mmap.pid, event->mmap.tid, event->mmap.start,
623 624 625
		       event->mmap.len, event->mmap.pgoff,
		       (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x',
		       event->mmap.filename);
626 627
}

628 629 630
size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp)
{
	return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64
631
			   " %02x:%02x %"PRIu64" %"PRIu64"]: %c%c%c%c %s\n",
632 633 634 635
		       event->mmap2.pid, event->mmap2.tid, event->mmap2.start,
		       event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj,
		       event->mmap2.min, event->mmap2.ino,
		       event->mmap2.ino_generation,
636 637 638 639
		       (event->mmap2.prot & PROT_READ) ? 'r' : '-',
		       (event->mmap2.prot & PROT_WRITE) ? 'w' : '-',
		       (event->mmap2.prot & PROT_EXEC) ? 'x' : '-',
		       (event->mmap2.flags & MAP_SHARED) ? 's' : 'p',
640 641 642
		       event->mmap2.filename);
}

643
int perf_event__process_mmap(struct perf_tool *tool __maybe_unused,
644
			     union perf_event *event,
645
			     struct perf_sample *sample,
646
			     struct machine *machine)
647
{
648
	return machine__process_mmap_event(machine, event, sample);
649 650
}

651 652
int perf_event__process_mmap2(struct perf_tool *tool __maybe_unused,
			     union perf_event *event,
653
			     struct perf_sample *sample,
654 655
			     struct machine *machine)
{
656
	return machine__process_mmap2_event(machine, event, sample);
657 658
}

659 660 661 662 663 664 665
size_t perf_event__fprintf_task(union perf_event *event, FILE *fp)
{
	return fprintf(fp, "(%d:%d):(%d:%d)\n",
		       event->fork.pid, event->fork.tid,
		       event->fork.ppid, event->fork.ptid);
}

666
int perf_event__process_fork(struct perf_tool *tool __maybe_unused,
667
			     union perf_event *event,
668
			     struct perf_sample *sample,
669
			     struct machine *machine)
670
{
671
	return machine__process_fork_event(machine, event, sample);
672
}
673

674 675
int perf_event__process_exit(struct perf_tool *tool __maybe_unused,
			     union perf_event *event,
676
			     struct perf_sample *sample,
677 678
			     struct machine *machine)
{
679
	return machine__process_exit_event(machine, event, sample);
680 681
}

682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697
size_t perf_event__fprintf(union perf_event *event, FILE *fp)
{
	size_t ret = fprintf(fp, "PERF_RECORD_%s",
			     perf_event__name(event->header.type));

	switch (event->header.type) {
	case PERF_RECORD_COMM:
		ret += perf_event__fprintf_comm(event, fp);
		break;
	case PERF_RECORD_FORK:
	case PERF_RECORD_EXIT:
		ret += perf_event__fprintf_task(event, fp);
		break;
	case PERF_RECORD_MMAP:
		ret += perf_event__fprintf_mmap(event, fp);
		break;
698 699 700
	case PERF_RECORD_MMAP2:
		ret += perf_event__fprintf_mmap2(event, fp);
		break;
701 702 703 704 705 706 707
	default:
		ret += fprintf(fp, "\n");
	}

	return ret;
}

708 709
int perf_event__process(struct perf_tool *tool __maybe_unused,
			union perf_event *event,
710
			struct perf_sample *sample,
711
			struct machine *machine)
712
{
713
	return machine__process_event(machine, event, sample);
714 715
}

716
void thread__find_addr_map(struct thread *thread,
717 718
			   struct machine *machine, u8 cpumode,
			   enum map_type type, u64 addr,
719
			   struct addr_location *al)
720
{
721
	struct map_groups *mg = thread->mg;
722
	bool load_map = false;
723

724
	al->machine = machine;
725
	al->thread = thread;
726
	al->addr = addr;
727
	al->cpumode = cpumode;
728
	al->filtered = 0;
729

730 731 732 733 734
	if (machine == NULL) {
		al->map = NULL;
		return;
	}

735
	if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
736
		al->level = 'k';
737
		mg = &machine->kmaps;
738
		load_map = true;
739
	} else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
740
		al->level = '.';
741 742
	} else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
		al->level = 'g';
743
		mg = &machine->kmaps;
744
		load_map = true;
745 746
	} else if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) {
		al->level = 'u';
747
	} else {
748
		al->level = 'H';
749
		al->map = NULL;
750 751 752 753

		if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
			cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
			!perf_guest)
754
			al->filtered |= (1 << HIST_FILTER__GUEST);
755 756 757
		if ((cpumode == PERF_RECORD_MISC_USER ||
			cpumode == PERF_RECORD_MISC_KERNEL) &&
			!perf_host)
758
			al->filtered |= (1 << HIST_FILTER__HOST);
759

760 761 762
		return;
	}
try_again:
763
	al->map = map_groups__find(mg, type, al->addr);
764 765 766 767 768 769 770 771 772 773
	if (al->map == NULL) {
		/*
		 * If this is outside of all known maps, and is a negative
		 * address, try to look it up in the kernel dso, as it might be
		 * a vsyscall or vdso (which executes in user-mode).
		 *
		 * XXX This is nasty, we should have a symbol list in the
		 * "[vdso]" dso, but for now lets use the old trick of looking
		 * in the whole kernel symbol list.
		 */
774
		if ((long long)al->addr < 0 &&
775
		    cpumode == PERF_RECORD_MISC_USER &&
776 777
		    machine && mg != &machine->kmaps) {
			mg = &machine->kmaps;
778 779
			goto try_again;
		}
780 781 782 783 784 785
	} else {
		/*
		 * Kernel maps might be changed when loading symbols so loading
		 * must be done prior to using kernel maps.
		 */
		if (load_map)
786
			map__load(al->map, machine->symbol_filter);
787
		al->addr = al->map->map_ip(al->map, al->addr);
788
	}
789 790
}

791 792
void thread__find_addr_location(struct thread *thread, struct machine *machine,
				u8 cpumode, enum map_type type, u64 addr,
793
				struct addr_location *al)
794
{
795
	thread__find_addr_map(thread, machine, cpumode, type, addr, al);
796
	if (al->map != NULL)
797 798
		al->sym = map__find_symbol(al->map, al->addr,
					   machine->symbol_filter);
799 800
	else
		al->sym = NULL;
801 802
}

803
int perf_event__preprocess_sample(const union perf_event *event,
804
				  struct machine *machine,
805
				  struct addr_location *al,
806
				  struct perf_sample *sample)
807
{
808
	u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
809
	struct thread *thread = machine__findnew_thread(machine, sample->pid,
810
							sample->tid);
811

812 813 814
	if (thread == NULL)
		return -1;

815
	dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
816
	/*
817
	 * Have we already created the kernel maps for this machine?
818 819 820 821 822 823
	 *
	 * This should have happened earlier, when we processed the kernel MMAP
	 * events, but for older perf.data files there was no such thing, so do
	 * it now.
	 */
	if (cpumode == PERF_RECORD_MISC_KERNEL &&
824 825
	    machine->vmlinux_maps[MAP__FUNCTION] == NULL)
		machine__create_kernel_maps(machine);
826

827
	thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION,
828
			      sample->ip, al);
829 830 831
	dump_printf(" ...... dso: %s\n",
		    al->map ? al->map->dso->long_name :
			al->level == 'H' ? "[hypervisor]" : "<not found>");
832 833 834 835

	if (thread__is_filtered(thread))
		al->filtered |= (1 << HIST_FILTER__THREAD);

836
	al->sym = NULL;
837
	al->cpu = sample->cpu;
838 839

	if (al->map) {
840 841
		struct dso *dso = al->map->dso;

842
		if (symbol_conf.dso_list &&
843 844 845 846
		    (!dso || !(strlist__has_entry(symbol_conf.dso_list,
						  dso->short_name) ||
			       (dso->short_name != dso->long_name &&
				strlist__has_entry(symbol_conf.dso_list,
847 848 849
						   dso->long_name))))) {
			al->filtered |= (1 << HIST_FILTER__DSO);
		}
850

851 852
		al->sym = map__find_symbol(al->map, al->addr,
					   machine->symbol_filter);
853
	}
854

855 856
	if (symbol_conf.sym_list &&
		(!al->sym || !strlist__has_entry(symbol_conf.sym_list,
857 858 859
						al->sym->name))) {
		al->filtered |= (1 << HIST_FILTER__SYMBOL);
	}
860 861

	return 0;
862
}