event.c 21.4 KB
Newer Older
1 2 3
#include <linux/types.h>
#include "event.h"
#include "debug.h"
4
#include "machine.h"
5
#include "sort.h"
6
#include "string.h"
7
#include "strlist.h"
8
#include "thread.h"
9
#include "thread_map.h"
10
#include "symbol/kallsyms.h"
11

12
static const char *perf_event__names[] = {
13 14
	[0]					= "TOTAL",
	[PERF_RECORD_MMAP]			= "MMAP",
15
	[PERF_RECORD_MMAP2]			= "MMAP2",
16 17 18 19 20 21 22 23 24 25 26 27 28
	[PERF_RECORD_LOST]			= "LOST",
	[PERF_RECORD_COMM]			= "COMM",
	[PERF_RECORD_EXIT]			= "EXIT",
	[PERF_RECORD_THROTTLE]			= "THROTTLE",
	[PERF_RECORD_UNTHROTTLE]		= "UNTHROTTLE",
	[PERF_RECORD_FORK]			= "FORK",
	[PERF_RECORD_READ]			= "READ",
	[PERF_RECORD_SAMPLE]			= "SAMPLE",
	[PERF_RECORD_HEADER_ATTR]		= "ATTR",
	[PERF_RECORD_HEADER_EVENT_TYPE]		= "EVENT_TYPE",
	[PERF_RECORD_HEADER_TRACING_DATA]	= "TRACING_DATA",
	[PERF_RECORD_HEADER_BUILD_ID]		= "BUILD_ID",
	[PERF_RECORD_FINISHED_ROUND]		= "FINISHED_ROUND",
29 30
};

31
const char *perf_event__name(unsigned int id)
32
{
33
	if (id >= ARRAY_SIZE(perf_event__names))
34
		return "INVALID";
35
	if (!perf_event__names[id])
36
		return "UNKNOWN";
37
	return perf_event__names[id];
38 39
}

40
static struct perf_sample synth_sample = {
41 42 43 44 45 46 47 48
	.pid	   = -1,
	.tid	   = -1,
	.time	   = -1,
	.stream_id = -1,
	.cpu	   = -1,
	.period	   = 1,
};

49
static pid_t perf_event__get_comm_tgid(pid_t pid, char *comm, size_t len)
50 51 52 53 54
{
	char filename[PATH_MAX];
	char bf[BUFSIZ];
	FILE *fp;
	size_t size = 0;
55
	pid_t tgid = -1;
56 57 58 59 60 61 62 63 64

	snprintf(filename, sizeof(filename), "/proc/%d/status", pid);

	fp = fopen(filename, "r");
	if (fp == NULL) {
		pr_debug("couldn't open %s\n", filename);
		return 0;
	}

65
	while (!comm[0] || (tgid < 0)) {
66
		if (fgets(bf, sizeof(bf), fp) == NULL) {
67 68 69
			pr_warning("couldn't get COMM and pgid, malformed %s\n",
				   filename);
			break;
70
		}
71 72 73 74 75 76

		if (memcmp(bf, "Name:", 5) == 0) {
			char *name = bf + 5;
			while (*name && isspace(*name))
				++name;
			size = strlen(name) - 1;
77 78 79
			if (size >= len)
				size = len - 1;
			memcpy(comm, name, size);
80
			comm[size] = '\0';
81

82 83 84 85
		} else if (memcmp(bf, "Tgid:", 5) == 0) {
			char *tgids = bf + 5;
			while (*tgids && isspace(*tgids))
				++tgids;
86
			tgid = atoi(tgids);
87 88 89
		}
	}

90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
	fclose(fp);

	return tgid;
}

static pid_t perf_event__synthesize_comm(struct perf_tool *tool,
					 union perf_event *event, pid_t pid,
					 perf_event__handler_t process,
					 struct machine *machine)
{
	size_t size;
	pid_t tgid;

	memset(&event->comm, 0, sizeof(event->comm));

105 106 107 108 109 110
	if (machine__is_host(machine))
		tgid = perf_event__get_comm_tgid(pid, event->comm.comm,
						 sizeof(event->comm.comm));
	else
		tgid = machine->pid;

111 112 113 114
	if (tgid < 0)
		goto out;

	event->comm.pid = tgid;
115
	event->comm.header.type = PERF_RECORD_COMM;
116 117

	size = strlen(event->comm.comm) + 1;
118
	size = PERF_ALIGN(size, sizeof(u64));
119
	memset(event->comm.comm + size, 0, machine->id_hdr_size);
120 121
	event->comm.header.size = (sizeof(event->comm) -
				(sizeof(event->comm.comm) - size) +
122
				machine->id_hdr_size);
123
	event->comm.tid = pid;
124

125 126
	if (process(tool, event, &synth_sample, machine) != 0)
		return -1;
127

128 129
out:
	return tgid;
130 131
}

132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153
static int perf_event__synthesize_fork(struct perf_tool *tool,
				       union perf_event *event, pid_t pid,
				       pid_t tgid, perf_event__handler_t process,
				       struct machine *machine)
{
	memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);

	/* this is really a clone event but we use fork to synthesize it */
	event->fork.ppid = tgid;
	event->fork.ptid = tgid;
	event->fork.pid  = tgid;
	event->fork.tid  = pid;
	event->fork.header.type = PERF_RECORD_FORK;

	event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);

	if (process(tool, event, &synth_sample, machine) != 0)
		return -1;

	return 0;
}

154 155 156 157 158 159
int perf_event__synthesize_mmap_events(struct perf_tool *tool,
				       union perf_event *event,
				       pid_t pid, pid_t tgid,
				       perf_event__handler_t process,
				       struct machine *machine,
				       bool mmap_data)
160 161 162
{
	char filename[PATH_MAX];
	FILE *fp;
163
	int rc = 0;
164

165 166 167
	if (machine__is_default_guest(machine))
		return 0;

168 169
	snprintf(filename, sizeof(filename), "%s/proc/%d/maps",
		 machine->root_dir, pid);
170 171 172 173 174 175 176 177 178 179

	fp = fopen(filename, "r");
	if (fp == NULL) {
		/*
		 * We raced with a task exiting - just return:
		 */
		pr_debug("couldn't open %s\n", filename);
		return -1;
	}

180
	event->header.type = PERF_RECORD_MMAP;
181

182
	while (1) {
183 184 185 186
		char bf[BUFSIZ];
		char prot[5];
		char execname[PATH_MAX];
		char anonstr[] = "//anon";
187
		size_t size;
188
		ssize_t n;
189

190 191 192
		if (fgets(bf, sizeof(bf), fp) == NULL)
			break;

193 194 195
		/* ensure null termination since stack will be reused. */
		strcpy(execname, "");

196
		/* 00400000-0040c000 r-xp 00000000 fd:01 41038  /bin/cat */
197 198 199 200
		n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %*x:%*x %*u %s\n",
		       &event->mmap.start, &event->mmap.len, prot,
		       &event->mmap.pgoff,
		       execname);
201 202 203 204
		/*
 		 * Anon maps don't have the execname.
 		 */
		if (n < 4)
205
			continue;
206 207 208
		/*
		 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
		 */
209 210 211 212
		if (machine__is_host(machine))
			event->header.misc = PERF_RECORD_MISC_USER;
		else
			event->header.misc = PERF_RECORD_MISC_GUEST_USER;
213

214 215 216 217 218 219
		if (prot[2] != 'x') {
			if (!mmap_data || prot[0] != 'r')
				continue;

			event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
		}
220 221 222 223 224

		if (!strcmp(execname, ""))
			strcpy(execname, anonstr);

		size = strlen(execname) + 1;
225
		memcpy(event->mmap.filename, execname, size);
226
		size = PERF_ALIGN(size, sizeof(u64));
227 228 229 230 231 232 233
		event->mmap.len -= event->mmap.start;
		event->mmap.header.size = (sizeof(event->mmap) -
					(sizeof(event->mmap.filename) - size));
		memset(event->mmap.filename + size, 0, machine->id_hdr_size);
		event->mmap.header.size += machine->id_hdr_size;
		event->mmap.pid = tgid;
		event->mmap.tid = pid;
234 235 236 237

		if (process(tool, event, &synth_sample, machine) != 0) {
			rc = -1;
			break;
238 239 240 241
		}
	}

	fclose(fp);
242
	return rc;
243 244
}

245
int perf_event__synthesize_modules(struct perf_tool *tool,
246
				   perf_event__handler_t process,
247
				   struct machine *machine)
248
{
249
	int rc = 0;
250
	struct rb_node *nd;
251
	struct map_groups *kmaps = &machine->kmaps;
252
	union perf_event *event = zalloc((sizeof(event->mmap) +
253
					  machine->id_hdr_size));
254 255 256 257 258 259 260
	if (event == NULL) {
		pr_debug("Not enough memory synthesizing mmap event "
			 "for kernel modules\n");
		return -1;
	}

	event->header.type = PERF_RECORD_MMAP;
261

262 263 264 265
	/*
	 * kernel uses 0 for user space maps, see kernel/perf_event.c
	 * __perf_event_mmap
	 */
266
	if (machine__is_host(machine))
267
		event->header.misc = PERF_RECORD_MISC_KERNEL;
268
	else
269
		event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
270 271

	for (nd = rb_first(&kmaps->maps[MAP__FUNCTION]);
272 273 274 275 276 277 278
	     nd; nd = rb_next(nd)) {
		size_t size;
		struct map *pos = rb_entry(nd, struct map, rb_node);

		if (pos->dso->kernel)
			continue;

279
		size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
280 281 282
		event->mmap.header.type = PERF_RECORD_MMAP;
		event->mmap.header.size = (sizeof(event->mmap) -
				        (sizeof(event->mmap.filename) - size));
283 284
		memset(event->mmap.filename + size, 0, machine->id_hdr_size);
		event->mmap.header.size += machine->id_hdr_size;
285 286 287 288 289
		event->mmap.start = pos->start;
		event->mmap.len   = pos->end - pos->start;
		event->mmap.pid   = machine->pid;

		memcpy(event->mmap.filename, pos->dso->long_name,
290
		       pos->dso->long_name_len + 1);
291 292 293 294
		if (process(tool, event, &synth_sample, machine) != 0) {
			rc = -1;
			break;
		}
295 296
	}

297
	free(event);
298
	return rc;
299 300
}

301 302
static int __event__synthesize_thread(union perf_event *comm_event,
				      union perf_event *mmap_event,
303
				      union perf_event *fork_event,
304 305
				      pid_t pid, int full,
					  perf_event__handler_t process,
306
				      struct perf_tool *tool,
307
				      struct machine *machine, bool mmap_data)
308
{
309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351
	char filename[PATH_MAX];
	DIR *tasks;
	struct dirent dirent, *next;
	pid_t tgid;

	/* special case: only send one comm event using passed in pid */
	if (!full) {
		tgid = perf_event__synthesize_comm(tool, comm_event, pid,
						   process, machine);

		if (tgid == -1)
			return -1;

		return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
							  process, machine, mmap_data);
	}

	if (machine__is_default_guest(machine))
		return 0;

	snprintf(filename, sizeof(filename), "%s/proc/%d/task",
		 machine->root_dir, pid);

	tasks = opendir(filename);
	if (tasks == NULL) {
		pr_debug("couldn't open %s\n", filename);
		return 0;
	}

	while (!readdir_r(tasks, &dirent, &next) && next) {
		char *end;
		int rc = 0;
		pid_t _pid;

		_pid = strtol(dirent.d_name, &end, 10);
		if (*end)
			continue;

		tgid = perf_event__synthesize_comm(tool, comm_event, _pid,
						   process, machine);
		if (tgid == -1)
			return -1;

352 353 354 355 356 357 358 359 360
		if (_pid == pid) {
			/* process the parent's maps too */
			rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
						process, machine, mmap_data);
		} else {
			/* only fork the tid's map, to save time */
			rc = perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
						 process, machine);
		}
361 362 363 364 365 366 367

		if (rc)
			return rc;
	}

	closedir(tasks);
	return 0;
368 369
}

370
int perf_event__synthesize_thread_map(struct perf_tool *tool,
371
				      struct thread_map *threads,
372
				      perf_event__handler_t process,
373 374
				      struct machine *machine,
				      bool mmap_data)
375
{
376
	union perf_event *comm_event, *mmap_event, *fork_event;
377
	int err = -1, thread, j;
378

379
	comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
380 381 382
	if (comm_event == NULL)
		goto out;

383
	mmap_event = malloc(sizeof(mmap_event->mmap) + machine->id_hdr_size);
384 385 386
	if (mmap_event == NULL)
		goto out_free_comm;

387 388 389 390
	fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
	if (fork_event == NULL)
		goto out_free_mmap;

391 392 393
	err = 0;
	for (thread = 0; thread < threads->nr; ++thread) {
		if (__event__synthesize_thread(comm_event, mmap_event,
394
					       fork_event,
395
					       threads->map[thread], 0,
396 397
					       process, tool, machine,
					       mmap_data)) {
398 399 400
			err = -1;
			break;
		}
401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418

		/*
		 * comm.pid is set to thread group id by
		 * perf_event__synthesize_comm
		 */
		if ((int) comm_event->comm.pid != threads->map[thread]) {
			bool need_leader = true;

			/* is thread group leader in thread_map? */
			for (j = 0; j < threads->nr; ++j) {
				if ((int) comm_event->comm.pid == threads->map[j]) {
					need_leader = false;
					break;
				}
			}

			/* if not, generate events for it */
			if (need_leader &&
419
			    __event__synthesize_thread(comm_event, mmap_event,
420
						       fork_event,
421 422 423
						       comm_event->comm.pid, 0,
						       process, tool, machine,
						       mmap_data)) {
424 425 426 427
				err = -1;
				break;
			}
		}
428
	}
429 430
	free(fork_event);
out_free_mmap:
431 432 433 434 435 436 437
	free(mmap_event);
out_free_comm:
	free(comm_event);
out:
	return err;
}

438
int perf_event__synthesize_threads(struct perf_tool *tool,
439
				   perf_event__handler_t process,
440
				   struct machine *machine, bool mmap_data)
441 442
{
	DIR *proc;
443
	char proc_path[PATH_MAX];
444
	struct dirent dirent, *next;
445
	union perf_event *comm_event, *mmap_event, *fork_event;
446 447
	int err = -1;

448
	comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
449 450 451
	if (comm_event == NULL)
		goto out;

452
	mmap_event = malloc(sizeof(mmap_event->mmap) + machine->id_hdr_size);
453 454
	if (mmap_event == NULL)
		goto out_free_comm;
455

456 457 458 459
	fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
	if (fork_event == NULL)
		goto out_free_mmap;

460 461 462
	if (machine__is_default_guest(machine))
		return 0;

463 464 465
	snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
	proc = opendir(proc_path);

466
	if (proc == NULL)
467
		goto out_free_fork;
468 469 470 471 472 473 474

	while (!readdir_r(proc, &dirent, &next) && next) {
		char *end;
		pid_t pid = strtol(dirent.d_name, &end, 10);

		if (*end) /* only interested in proper numerical dirents */
			continue;
475 476 477 478
		/*
 		 * We may race with exiting thread, so don't stop just because
 		 * one thread couldn't be synthesized.
 		 */
479 480
		__event__synthesize_thread(comm_event, mmap_event, fork_event, pid,
					   1, process, tool, machine, mmap_data);
481 482
	}

483
	err = 0;
484
	closedir(proc);
485 486
out_free_fork:
	free(fork_event);
487 488 489 490 491 492
out_free_mmap:
	free(mmap_event);
out_free_comm:
	free(comm_event);
out:
	return err;
493
}
494

495 496 497 498 499
struct process_symbol_args {
	const char *name;
	u64	   start;
};

500
static int find_symbol_cb(void *arg, const char *name, char type,
501
			  u64 start)
502 503 504
{
	struct process_symbol_args *args = arg;

505 506 507 508 509 510
	/*
	 * Must be a function or at least an alias, as in PARISC64, where "_text" is
	 * an 'A' to the same address as "_stext".
	 */
	if (!(symbol_type__is_a(type, MAP__FUNCTION) ||
	      type == 'A') || strcmp(name, args->name))
511 512 513 514 515 516
		return 0;

	args->start = start;
	return 1;
}

517 518 519 520 521 522 523 524 525 526 527
u64 kallsyms__get_function_start(const char *kallsyms_filename,
				 const char *symbol_name)
{
	struct process_symbol_args args = { .name = symbol_name, };

	if (kallsyms__parse(kallsyms_filename, &args, find_symbol_cb) <= 0)
		return 0;

	return args.start;
}

528
int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
529
				       perf_event__handler_t process,
530
				       struct machine *machine)
531 532
{
	size_t size;
533
	const char *mmap_name;
534 535
	char name_buff[PATH_MAX];
	struct map *map;
536
	struct kmap *kmap;
537
	int err;
538 539 540 541 542
	/*
	 * We should get this from /sys/kernel/sections/.text, but till that is
	 * available use this, and after it is use this as a fallback for older
	 * kernels.
	 */
543
	union perf_event *event = zalloc((sizeof(event->mmap) +
544
					  machine->id_hdr_size));
545 546 547 548 549
	if (event == NULL) {
		pr_debug("Not enough memory synthesizing mmap event "
			 "for kernel modules\n");
		return -1;
	}
550

551
	mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff));
552
	if (machine__is_host(machine)) {
553 554 555 556
		/*
		 * kernel uses PERF_RECORD_MISC_USER for user space maps,
		 * see kernel/perf_event.c __perf_event_mmap
		 */
557
		event->header.misc = PERF_RECORD_MISC_KERNEL;
558
	} else {
559
		event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
560
	}
561

562
	map = machine->vmlinux_maps[MAP__FUNCTION];
563
	kmap = map__kmap(map);
564
	size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
565
			"%s%s", mmap_name, kmap->ref_reloc_sym->name) + 1;
566
	size = PERF_ALIGN(size, sizeof(u64));
567 568
	event->mmap.header.type = PERF_RECORD_MMAP;
	event->mmap.header.size = (sizeof(event->mmap) -
569
			(sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
570
	event->mmap.pgoff = kmap->ref_reloc_sym->addr;
571 572 573 574
	event->mmap.start = map->start;
	event->mmap.len   = map->end - event->mmap.start;
	event->mmap.pid   = machine->pid;

575
	err = process(tool, event, &synth_sample, machine);
576 577 578
	free(event);

	return err;
579 580
}

581 582 583 584 585
size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp)
{
	return fprintf(fp, ": %s:%d\n", event->comm.comm, event->comm.tid);
}

586
int perf_event__process_comm(struct perf_tool *tool __maybe_unused,
587
			     union perf_event *event,
588
			     struct perf_sample *sample,
589
			     struct machine *machine)
590
{
591
	return machine__process_comm_event(machine, event, sample);
592 593
}

594
int perf_event__process_lost(struct perf_tool *tool __maybe_unused,
595
			     union perf_event *event,
596
			     struct perf_sample *sample,
597
			     struct machine *machine)
598
{
599
	return machine__process_lost_event(machine, event, sample);
600
}
601

602 603
size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
{
604
	return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %c %s\n",
605
		       event->mmap.pid, event->mmap.tid, event->mmap.start,
606 607 608
		       event->mmap.len, event->mmap.pgoff,
		       (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x',
		       event->mmap.filename);
609 610
}

611 612 613
size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp)
{
	return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64
614
			   " %02x:%02x %"PRIu64" %"PRIu64"]: %c %s\n",
615 616 617 618
		       event->mmap2.pid, event->mmap2.tid, event->mmap2.start,
		       event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj,
		       event->mmap2.min, event->mmap2.ino,
		       event->mmap2.ino_generation,
619
		       (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x',
620 621 622
		       event->mmap2.filename);
}

623
int perf_event__process_mmap(struct perf_tool *tool __maybe_unused,
624
			     union perf_event *event,
625
			     struct perf_sample *sample,
626
			     struct machine *machine)
627
{
628
	return machine__process_mmap_event(machine, event, sample);
629 630
}

631 632
int perf_event__process_mmap2(struct perf_tool *tool __maybe_unused,
			     union perf_event *event,
633
			     struct perf_sample *sample,
634 635
			     struct machine *machine)
{
636
	return machine__process_mmap2_event(machine, event, sample);
637 638
}

639 640 641 642 643 644 645
size_t perf_event__fprintf_task(union perf_event *event, FILE *fp)
{
	return fprintf(fp, "(%d:%d):(%d:%d)\n",
		       event->fork.pid, event->fork.tid,
		       event->fork.ppid, event->fork.ptid);
}

646
int perf_event__process_fork(struct perf_tool *tool __maybe_unused,
647
			     union perf_event *event,
648
			     struct perf_sample *sample,
649
			     struct machine *machine)
650
{
651
	return machine__process_fork_event(machine, event, sample);
652
}
653

654 655
int perf_event__process_exit(struct perf_tool *tool __maybe_unused,
			     union perf_event *event,
656
			     struct perf_sample *sample,
657 658
			     struct machine *machine)
{
659
	return machine__process_exit_event(machine, event, sample);
660 661
}

662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677
size_t perf_event__fprintf(union perf_event *event, FILE *fp)
{
	size_t ret = fprintf(fp, "PERF_RECORD_%s",
			     perf_event__name(event->header.type));

	switch (event->header.type) {
	case PERF_RECORD_COMM:
		ret += perf_event__fprintf_comm(event, fp);
		break;
	case PERF_RECORD_FORK:
	case PERF_RECORD_EXIT:
		ret += perf_event__fprintf_task(event, fp);
		break;
	case PERF_RECORD_MMAP:
		ret += perf_event__fprintf_mmap(event, fp);
		break;
678 679 680
	case PERF_RECORD_MMAP2:
		ret += perf_event__fprintf_mmap2(event, fp);
		break;
681 682 683 684 685 686 687
	default:
		ret += fprintf(fp, "\n");
	}

	return ret;
}

688 689
int perf_event__process(struct perf_tool *tool __maybe_unused,
			union perf_event *event,
690
			struct perf_sample *sample,
691
			struct machine *machine)
692
{
693
	return machine__process_event(machine, event, sample);
694 695
}

696
void thread__find_addr_map(struct thread *thread,
697 698
			   struct machine *machine, u8 cpumode,
			   enum map_type type, u64 addr,
699
			   struct addr_location *al)
700
{
701
	struct map_groups *mg = &thread->mg;
702
	bool load_map = false;
703

704
	al->machine = machine;
705
	al->thread = thread;
706
	al->addr = addr;
707 708
	al->cpumode = cpumode;
	al->filtered = false;
709

710 711 712 713 714
	if (machine == NULL) {
		al->map = NULL;
		return;
	}

715
	if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
716
		al->level = 'k';
717
		mg = &machine->kmaps;
718
		load_map = true;
719
	} else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
720
		al->level = '.';
721 722
	} else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
		al->level = 'g';
723
		mg = &machine->kmaps;
724
		load_map = true;
725 726
	} else if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) {
		al->level = 'u';
727
	} else {
728
		al->level = 'H';
729
		al->map = NULL;
730 731 732 733 734 735 736 737 738 739

		if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
			cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
			!perf_guest)
			al->filtered = true;
		if ((cpumode == PERF_RECORD_MISC_USER ||
			cpumode == PERF_RECORD_MISC_KERNEL) &&
			!perf_host)
			al->filtered = true;

740 741 742
		return;
	}
try_again:
743
	al->map = map_groups__find(mg, type, al->addr);
744 745 746 747 748 749 750 751 752 753
	if (al->map == NULL) {
		/*
		 * If this is outside of all known maps, and is a negative
		 * address, try to look it up in the kernel dso, as it might be
		 * a vsyscall or vdso (which executes in user-mode).
		 *
		 * XXX This is nasty, we should have a symbol list in the
		 * "[vdso]" dso, but for now lets use the old trick of looking
		 * in the whole kernel symbol list.
		 */
754
		if ((long long)al->addr < 0 &&
755
		    cpumode == PERF_RECORD_MISC_USER &&
756 757
		    machine && mg != &machine->kmaps) {
			mg = &machine->kmaps;
758 759
			goto try_again;
		}
760 761 762 763 764 765
	} else {
		/*
		 * Kernel maps might be changed when loading symbols so loading
		 * must be done prior to using kernel maps.
		 */
		if (load_map)
766
			map__load(al->map, machine->symbol_filter);
767
		al->addr = al->map->map_ip(al->map, al->addr);
768
	}
769 770
}

771 772
void thread__find_addr_location(struct thread *thread, struct machine *machine,
				u8 cpumode, enum map_type type, u64 addr,
773
				struct addr_location *al)
774
{
775
	thread__find_addr_map(thread, machine, cpumode, type, addr, al);
776
	if (al->map != NULL)
777 778
		al->sym = map__find_symbol(al->map, al->addr,
					   machine->symbol_filter);
779 780
	else
		al->sym = NULL;
781 782
}

783
int perf_event__preprocess_sample(const union perf_event *event,
784
				  struct machine *machine,
785
				  struct addr_location *al,
786
				  struct perf_sample *sample)
787
{
788
	u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
789 790
	struct thread *thread = machine__findnew_thread(machine, sample->pid,
							sample->pid);
791

792 793 794
	if (thread == NULL)
		return -1;

795
	if (thread__is_filtered(thread))
796 797
		goto out_filtered;

798
	dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
799
	/*
800
	 * Have we already created the kernel maps for this machine?
801 802 803 804 805 806
	 *
	 * This should have happened earlier, when we processed the kernel MMAP
	 * events, but for older perf.data files there was no such thing, so do
	 * it now.
	 */
	if (cpumode == PERF_RECORD_MISC_KERNEL &&
807 808
	    machine->vmlinux_maps[MAP__FUNCTION] == NULL)
		machine__create_kernel_maps(machine);
809

810
	thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION,
811
			      sample->ip, al);
812 813 814
	dump_printf(" ...... dso: %s\n",
		    al->map ? al->map->dso->long_name :
			al->level == 'H' ? "[hypervisor]" : "<not found>");
815
	al->sym = NULL;
816
	al->cpu = sample->cpu;
817 818

	if (al->map) {
819 820
		struct dso *dso = al->map->dso;

821
		if (symbol_conf.dso_list &&
822 823 824 825 826
		    (!dso || !(strlist__has_entry(symbol_conf.dso_list,
						  dso->short_name) ||
			       (dso->short_name != dso->long_name &&
				strlist__has_entry(symbol_conf.dso_list,
						   dso->long_name)))))
827 828
			goto out_filtered;

829 830
		al->sym = map__find_symbol(al->map, al->addr,
					   machine->symbol_filter);
831
	}
832

833 834 835
	if (symbol_conf.sym_list &&
		(!al->sym || !strlist__has_entry(symbol_conf.sym_list,
						al->sym->name)))
836 837 838 839 840 841
		goto out_filtered;

	return 0;

out_filtered:
	al->filtered = true;
842 843
	return 0;
}