event.c 21.5 KB
Newer Older
1 2 3
#include <linux/types.h>
#include "event.h"
#include "debug.h"
4
#include "hist.h"
5
#include "machine.h"
6
#include "sort.h"
7
#include "string.h"
8
#include "strlist.h"
9
#include "thread.h"
10
#include "thread_map.h"
11
#include "symbol/kallsyms.h"
12

13
static const char *perf_event__names[] = {
14 15
	[0]					= "TOTAL",
	[PERF_RECORD_MMAP]			= "MMAP",
16
	[PERF_RECORD_MMAP2]			= "MMAP2",
17 18 19 20 21 22 23 24 25 26 27 28 29
	[PERF_RECORD_LOST]			= "LOST",
	[PERF_RECORD_COMM]			= "COMM",
	[PERF_RECORD_EXIT]			= "EXIT",
	[PERF_RECORD_THROTTLE]			= "THROTTLE",
	[PERF_RECORD_UNTHROTTLE]		= "UNTHROTTLE",
	[PERF_RECORD_FORK]			= "FORK",
	[PERF_RECORD_READ]			= "READ",
	[PERF_RECORD_SAMPLE]			= "SAMPLE",
	[PERF_RECORD_HEADER_ATTR]		= "ATTR",
	[PERF_RECORD_HEADER_EVENT_TYPE]		= "EVENT_TYPE",
	[PERF_RECORD_HEADER_TRACING_DATA]	= "TRACING_DATA",
	[PERF_RECORD_HEADER_BUILD_ID]		= "BUILD_ID",
	[PERF_RECORD_FINISHED_ROUND]		= "FINISHED_ROUND",
30 31
};

32
const char *perf_event__name(unsigned int id)
33
{
34
	if (id >= ARRAY_SIZE(perf_event__names))
35
		return "INVALID";
36
	if (!perf_event__names[id])
37
		return "UNKNOWN";
38
	return perf_event__names[id];
39 40
}

41
static struct perf_sample synth_sample = {
42 43 44 45 46 47 48 49
	.pid	   = -1,
	.tid	   = -1,
	.time	   = -1,
	.stream_id = -1,
	.cpu	   = -1,
	.period	   = 1,
};

50
static pid_t perf_event__get_comm_tgid(pid_t pid, char *comm, size_t len)
51 52 53 54 55
{
	char filename[PATH_MAX];
	char bf[BUFSIZ];
	FILE *fp;
	size_t size = 0;
56
	pid_t tgid = -1;
57 58 59 60 61 62 63 64 65

	snprintf(filename, sizeof(filename), "/proc/%d/status", pid);

	fp = fopen(filename, "r");
	if (fp == NULL) {
		pr_debug("couldn't open %s\n", filename);
		return 0;
	}

66
	while (!comm[0] || (tgid < 0)) {
67
		if (fgets(bf, sizeof(bf), fp) == NULL) {
68 69 70
			pr_warning("couldn't get COMM and pgid, malformed %s\n",
				   filename);
			break;
71
		}
72 73 74 75 76 77

		if (memcmp(bf, "Name:", 5) == 0) {
			char *name = bf + 5;
			while (*name && isspace(*name))
				++name;
			size = strlen(name) - 1;
78 79 80
			if (size >= len)
				size = len - 1;
			memcpy(comm, name, size);
81
			comm[size] = '\0';
82

83 84 85 86
		} else if (memcmp(bf, "Tgid:", 5) == 0) {
			char *tgids = bf + 5;
			while (*tgids && isspace(*tgids))
				++tgids;
87
			tgid = atoi(tgids);
88 89 90
		}
	}

91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
	fclose(fp);

	return tgid;
}

static pid_t perf_event__synthesize_comm(struct perf_tool *tool,
					 union perf_event *event, pid_t pid,
					 perf_event__handler_t process,
					 struct machine *machine)
{
	size_t size;
	pid_t tgid;

	memset(&event->comm, 0, sizeof(event->comm));

106 107 108 109 110 111
	if (machine__is_host(machine))
		tgid = perf_event__get_comm_tgid(pid, event->comm.comm,
						 sizeof(event->comm.comm));
	else
		tgid = machine->pid;

112 113 114 115
	if (tgid < 0)
		goto out;

	event->comm.pid = tgid;
116
	event->comm.header.type = PERF_RECORD_COMM;
117 118

	size = strlen(event->comm.comm) + 1;
119
	size = PERF_ALIGN(size, sizeof(u64));
120
	memset(event->comm.comm + size, 0, machine->id_hdr_size);
121 122
	event->comm.header.size = (sizeof(event->comm) -
				(sizeof(event->comm.comm) - size) +
123
				machine->id_hdr_size);
124
	event->comm.tid = pid;
125

126 127
	if (process(tool, event, &synth_sample, machine) != 0)
		return -1;
128

129 130
out:
	return tgid;
131 132
}

133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154
static int perf_event__synthesize_fork(struct perf_tool *tool,
				       union perf_event *event, pid_t pid,
				       pid_t tgid, perf_event__handler_t process,
				       struct machine *machine)
{
	memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);

	/* this is really a clone event but we use fork to synthesize it */
	event->fork.ppid = tgid;
	event->fork.ptid = tgid;
	event->fork.pid  = tgid;
	event->fork.tid  = pid;
	event->fork.header.type = PERF_RECORD_FORK;

	event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);

	if (process(tool, event, &synth_sample, machine) != 0)
		return -1;

	return 0;
}

155 156 157 158 159 160
int perf_event__synthesize_mmap_events(struct perf_tool *tool,
				       union perf_event *event,
				       pid_t pid, pid_t tgid,
				       perf_event__handler_t process,
				       struct machine *machine,
				       bool mmap_data)
161 162 163
{
	char filename[PATH_MAX];
	FILE *fp;
164
	int rc = 0;
165

166 167 168
	if (machine__is_default_guest(machine))
		return 0;

169 170
	snprintf(filename, sizeof(filename), "%s/proc/%d/maps",
		 machine->root_dir, pid);
171 172 173 174 175 176 177 178 179 180

	fp = fopen(filename, "r");
	if (fp == NULL) {
		/*
		 * We raced with a task exiting - just return:
		 */
		pr_debug("couldn't open %s\n", filename);
		return -1;
	}

181
	event->header.type = PERF_RECORD_MMAP;
182

183
	while (1) {
184 185 186 187
		char bf[BUFSIZ];
		char prot[5];
		char execname[PATH_MAX];
		char anonstr[] = "//anon";
188
		size_t size;
189
		ssize_t n;
190

191 192 193
		if (fgets(bf, sizeof(bf), fp) == NULL)
			break;

194 195 196
		/* ensure null termination since stack will be reused. */
		strcpy(execname, "");

197
		/* 00400000-0040c000 r-xp 00000000 fd:01 41038  /bin/cat */
198 199 200 201
		n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %*x:%*x %*u %s\n",
		       &event->mmap.start, &event->mmap.len, prot,
		       &event->mmap.pgoff,
		       execname);
202 203 204 205
		/*
 		 * Anon maps don't have the execname.
 		 */
		if (n < 4)
206
			continue;
207 208 209
		/*
		 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
		 */
210 211 212 213
		if (machine__is_host(machine))
			event->header.misc = PERF_RECORD_MISC_USER;
		else
			event->header.misc = PERF_RECORD_MISC_GUEST_USER;
214

215 216 217 218 219 220
		if (prot[2] != 'x') {
			if (!mmap_data || prot[0] != 'r')
				continue;

			event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
		}
221 222 223 224 225

		if (!strcmp(execname, ""))
			strcpy(execname, anonstr);

		size = strlen(execname) + 1;
226
		memcpy(event->mmap.filename, execname, size);
227
		size = PERF_ALIGN(size, sizeof(u64));
228 229 230 231 232 233 234
		event->mmap.len -= event->mmap.start;
		event->mmap.header.size = (sizeof(event->mmap) -
					(sizeof(event->mmap.filename) - size));
		memset(event->mmap.filename + size, 0, machine->id_hdr_size);
		event->mmap.header.size += machine->id_hdr_size;
		event->mmap.pid = tgid;
		event->mmap.tid = pid;
235 236 237 238

		if (process(tool, event, &synth_sample, machine) != 0) {
			rc = -1;
			break;
239 240 241 242
		}
	}

	fclose(fp);
243
	return rc;
244 245
}

246
int perf_event__synthesize_modules(struct perf_tool *tool,
247
				   perf_event__handler_t process,
248
				   struct machine *machine)
249
{
250
	int rc = 0;
251
	struct rb_node *nd;
252
	struct map_groups *kmaps = &machine->kmaps;
253
	union perf_event *event = zalloc((sizeof(event->mmap) +
254
					  machine->id_hdr_size));
255 256 257 258 259 260 261
	if (event == NULL) {
		pr_debug("Not enough memory synthesizing mmap event "
			 "for kernel modules\n");
		return -1;
	}

	event->header.type = PERF_RECORD_MMAP;
262

263 264 265 266
	/*
	 * kernel uses 0 for user space maps, see kernel/perf_event.c
	 * __perf_event_mmap
	 */
267
	if (machine__is_host(machine))
268
		event->header.misc = PERF_RECORD_MISC_KERNEL;
269
	else
270
		event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
271 272

	for (nd = rb_first(&kmaps->maps[MAP__FUNCTION]);
273 274 275 276 277 278 279
	     nd; nd = rb_next(nd)) {
		size_t size;
		struct map *pos = rb_entry(nd, struct map, rb_node);

		if (pos->dso->kernel)
			continue;

280
		size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
281 282 283
		event->mmap.header.type = PERF_RECORD_MMAP;
		event->mmap.header.size = (sizeof(event->mmap) -
				        (sizeof(event->mmap.filename) - size));
284 285
		memset(event->mmap.filename + size, 0, machine->id_hdr_size);
		event->mmap.header.size += machine->id_hdr_size;
286 287 288 289 290
		event->mmap.start = pos->start;
		event->mmap.len   = pos->end - pos->start;
		event->mmap.pid   = machine->pid;

		memcpy(event->mmap.filename, pos->dso->long_name,
291
		       pos->dso->long_name_len + 1);
292 293 294 295
		if (process(tool, event, &synth_sample, machine) != 0) {
			rc = -1;
			break;
		}
296 297
	}

298
	free(event);
299
	return rc;
300 301
}

302 303
static int __event__synthesize_thread(union perf_event *comm_event,
				      union perf_event *mmap_event,
304
				      union perf_event *fork_event,
305 306
				      pid_t pid, int full,
					  perf_event__handler_t process,
307
				      struct perf_tool *tool,
308
				      struct machine *machine, bool mmap_data)
309
{
310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352
	char filename[PATH_MAX];
	DIR *tasks;
	struct dirent dirent, *next;
	pid_t tgid;

	/* special case: only send one comm event using passed in pid */
	if (!full) {
		tgid = perf_event__synthesize_comm(tool, comm_event, pid,
						   process, machine);

		if (tgid == -1)
			return -1;

		return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
							  process, machine, mmap_data);
	}

	if (machine__is_default_guest(machine))
		return 0;

	snprintf(filename, sizeof(filename), "%s/proc/%d/task",
		 machine->root_dir, pid);

	tasks = opendir(filename);
	if (tasks == NULL) {
		pr_debug("couldn't open %s\n", filename);
		return 0;
	}

	while (!readdir_r(tasks, &dirent, &next) && next) {
		char *end;
		int rc = 0;
		pid_t _pid;

		_pid = strtol(dirent.d_name, &end, 10);
		if (*end)
			continue;

		tgid = perf_event__synthesize_comm(tool, comm_event, _pid,
						   process, machine);
		if (tgid == -1)
			return -1;

353 354 355 356 357 358 359 360 361
		if (_pid == pid) {
			/* process the parent's maps too */
			rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
						process, machine, mmap_data);
		} else {
			/* only fork the tid's map, to save time */
			rc = perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
						 process, machine);
		}
362 363 364 365 366 367 368

		if (rc)
			return rc;
	}

	closedir(tasks);
	return 0;
369 370
}

371
int perf_event__synthesize_thread_map(struct perf_tool *tool,
372
				      struct thread_map *threads,
373
				      perf_event__handler_t process,
374 375
				      struct machine *machine,
				      bool mmap_data)
376
{
377
	union perf_event *comm_event, *mmap_event, *fork_event;
378
	int err = -1, thread, j;
379

380
	comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
381 382 383
	if (comm_event == NULL)
		goto out;

384
	mmap_event = malloc(sizeof(mmap_event->mmap) + machine->id_hdr_size);
385 386 387
	if (mmap_event == NULL)
		goto out_free_comm;

388 389 390 391
	fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
	if (fork_event == NULL)
		goto out_free_mmap;

392 393 394
	err = 0;
	for (thread = 0; thread < threads->nr; ++thread) {
		if (__event__synthesize_thread(comm_event, mmap_event,
395
					       fork_event,
396
					       threads->map[thread], 0,
397 398
					       process, tool, machine,
					       mmap_data)) {
399 400 401
			err = -1;
			break;
		}
402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419

		/*
		 * comm.pid is set to thread group id by
		 * perf_event__synthesize_comm
		 */
		if ((int) comm_event->comm.pid != threads->map[thread]) {
			bool need_leader = true;

			/* is thread group leader in thread_map? */
			for (j = 0; j < threads->nr; ++j) {
				if ((int) comm_event->comm.pid == threads->map[j]) {
					need_leader = false;
					break;
				}
			}

			/* if not, generate events for it */
			if (need_leader &&
420
			    __event__synthesize_thread(comm_event, mmap_event,
421
						       fork_event,
422 423 424
						       comm_event->comm.pid, 0,
						       process, tool, machine,
						       mmap_data)) {
425 426 427 428
				err = -1;
				break;
			}
		}
429
	}
430 431
	free(fork_event);
out_free_mmap:
432 433 434 435 436 437 438
	free(mmap_event);
out_free_comm:
	free(comm_event);
out:
	return err;
}

439
int perf_event__synthesize_threads(struct perf_tool *tool,
440
				   perf_event__handler_t process,
441
				   struct machine *machine, bool mmap_data)
442 443
{
	DIR *proc;
444
	char proc_path[PATH_MAX];
445
	struct dirent dirent, *next;
446
	union perf_event *comm_event, *mmap_event, *fork_event;
447 448
	int err = -1;

449
	comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
450 451 452
	if (comm_event == NULL)
		goto out;

453
	mmap_event = malloc(sizeof(mmap_event->mmap) + machine->id_hdr_size);
454 455
	if (mmap_event == NULL)
		goto out_free_comm;
456

457 458 459 460
	fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
	if (fork_event == NULL)
		goto out_free_mmap;

461 462 463
	if (machine__is_default_guest(machine))
		return 0;

464 465 466
	snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
	proc = opendir(proc_path);

467
	if (proc == NULL)
468
		goto out_free_fork;
469 470 471 472 473 474 475

	while (!readdir_r(proc, &dirent, &next) && next) {
		char *end;
		pid_t pid = strtol(dirent.d_name, &end, 10);

		if (*end) /* only interested in proper numerical dirents */
			continue;
476 477 478 479
		/*
 		 * We may race with exiting thread, so don't stop just because
 		 * one thread couldn't be synthesized.
 		 */
480 481
		__event__synthesize_thread(comm_event, mmap_event, fork_event, pid,
					   1, process, tool, machine, mmap_data);
482 483
	}

484
	err = 0;
485
	closedir(proc);
486 487
out_free_fork:
	free(fork_event);
488 489 490 491 492 493
out_free_mmap:
	free(mmap_event);
out_free_comm:
	free(comm_event);
out:
	return err;
494
}
495

496 497 498 499 500
struct process_symbol_args {
	const char *name;
	u64	   start;
};

501
static int find_symbol_cb(void *arg, const char *name, char type,
502
			  u64 start)
503 504 505
{
	struct process_symbol_args *args = arg;

506 507 508 509 510 511
	/*
	 * Must be a function or at least an alias, as in PARISC64, where "_text" is
	 * an 'A' to the same address as "_stext".
	 */
	if (!(symbol_type__is_a(type, MAP__FUNCTION) ||
	      type == 'A') || strcmp(name, args->name))
512 513 514 515 516 517
		return 0;

	args->start = start;
	return 1;
}

518 519 520 521 522 523 524 525 526 527 528
u64 kallsyms__get_function_start(const char *kallsyms_filename,
				 const char *symbol_name)
{
	struct process_symbol_args args = { .name = symbol_name, };

	if (kallsyms__parse(kallsyms_filename, &args, find_symbol_cb) <= 0)
		return 0;

	return args.start;
}

529
int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
530
				       perf_event__handler_t process,
531
				       struct machine *machine)
532 533
{
	size_t size;
534
	const char *mmap_name;
535 536
	char name_buff[PATH_MAX];
	struct map *map;
537
	struct kmap *kmap;
538
	int err;
539 540 541 542 543
	/*
	 * We should get this from /sys/kernel/sections/.text, but till that is
	 * available use this, and after it is use this as a fallback for older
	 * kernels.
	 */
544
	union perf_event *event = zalloc((sizeof(event->mmap) +
545
					  machine->id_hdr_size));
546 547 548 549 550
	if (event == NULL) {
		pr_debug("Not enough memory synthesizing mmap event "
			 "for kernel modules\n");
		return -1;
	}
551

552
	mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff));
553
	if (machine__is_host(machine)) {
554 555 556 557
		/*
		 * kernel uses PERF_RECORD_MISC_USER for user space maps,
		 * see kernel/perf_event.c __perf_event_mmap
		 */
558
		event->header.misc = PERF_RECORD_MISC_KERNEL;
559
	} else {
560
		event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
561
	}
562

563
	map = machine->vmlinux_maps[MAP__FUNCTION];
564
	kmap = map__kmap(map);
565
	size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
566
			"%s%s", mmap_name, kmap->ref_reloc_sym->name) + 1;
567
	size = PERF_ALIGN(size, sizeof(u64));
568 569
	event->mmap.header.type = PERF_RECORD_MMAP;
	event->mmap.header.size = (sizeof(event->mmap) -
570
			(sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
571
	event->mmap.pgoff = kmap->ref_reloc_sym->addr;
572 573 574 575
	event->mmap.start = map->start;
	event->mmap.len   = map->end - event->mmap.start;
	event->mmap.pid   = machine->pid;

576
	err = process(tool, event, &synth_sample, machine);
577 578 579
	free(event);

	return err;
580 581
}

582 583 584 585 586
size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp)
{
	return fprintf(fp, ": %s:%d\n", event->comm.comm, event->comm.tid);
}

587
int perf_event__process_comm(struct perf_tool *tool __maybe_unused,
588
			     union perf_event *event,
589
			     struct perf_sample *sample,
590
			     struct machine *machine)
591
{
592
	return machine__process_comm_event(machine, event, sample);
593 594
}

595
int perf_event__process_lost(struct perf_tool *tool __maybe_unused,
596
			     union perf_event *event,
597
			     struct perf_sample *sample,
598
			     struct machine *machine)
599
{
600
	return machine__process_lost_event(machine, event, sample);
601
}
602

603 604
size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
{
605
	return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %c %s\n",
606
		       event->mmap.pid, event->mmap.tid, event->mmap.start,
607 608 609
		       event->mmap.len, event->mmap.pgoff,
		       (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x',
		       event->mmap.filename);
610 611
}

612 613 614
size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp)
{
	return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64
615
			   " %02x:%02x %"PRIu64" %"PRIu64"]: %c %s\n",
616 617 618 619
		       event->mmap2.pid, event->mmap2.tid, event->mmap2.start,
		       event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj,
		       event->mmap2.min, event->mmap2.ino,
		       event->mmap2.ino_generation,
620
		       (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x',
621 622 623
		       event->mmap2.filename);
}

624
int perf_event__process_mmap(struct perf_tool *tool __maybe_unused,
625
			     union perf_event *event,
626
			     struct perf_sample *sample,
627
			     struct machine *machine)
628
{
629
	return machine__process_mmap_event(machine, event, sample);
630 631
}

632 633
int perf_event__process_mmap2(struct perf_tool *tool __maybe_unused,
			     union perf_event *event,
634
			     struct perf_sample *sample,
635 636
			     struct machine *machine)
{
637
	return machine__process_mmap2_event(machine, event, sample);
638 639
}

640 641 642 643 644 645 646
size_t perf_event__fprintf_task(union perf_event *event, FILE *fp)
{
	return fprintf(fp, "(%d:%d):(%d:%d)\n",
		       event->fork.pid, event->fork.tid,
		       event->fork.ppid, event->fork.ptid);
}

647
int perf_event__process_fork(struct perf_tool *tool __maybe_unused,
648
			     union perf_event *event,
649
			     struct perf_sample *sample,
650
			     struct machine *machine)
651
{
652
	return machine__process_fork_event(machine, event, sample);
653
}
654

655 656
int perf_event__process_exit(struct perf_tool *tool __maybe_unused,
			     union perf_event *event,
657
			     struct perf_sample *sample,
658 659
			     struct machine *machine)
{
660
	return machine__process_exit_event(machine, event, sample);
661 662
}

663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678
size_t perf_event__fprintf(union perf_event *event, FILE *fp)
{
	size_t ret = fprintf(fp, "PERF_RECORD_%s",
			     perf_event__name(event->header.type));

	switch (event->header.type) {
	case PERF_RECORD_COMM:
		ret += perf_event__fprintf_comm(event, fp);
		break;
	case PERF_RECORD_FORK:
	case PERF_RECORD_EXIT:
		ret += perf_event__fprintf_task(event, fp);
		break;
	case PERF_RECORD_MMAP:
		ret += perf_event__fprintf_mmap(event, fp);
		break;
679 680 681
	case PERF_RECORD_MMAP2:
		ret += perf_event__fprintf_mmap2(event, fp);
		break;
682 683 684 685 686 687 688
	default:
		ret += fprintf(fp, "\n");
	}

	return ret;
}

689 690
int perf_event__process(struct perf_tool *tool __maybe_unused,
			union perf_event *event,
691
			struct perf_sample *sample,
692
			struct machine *machine)
693
{
694
	return machine__process_event(machine, event, sample);
695 696
}

697
void thread__find_addr_map(struct thread *thread,
698 699
			   struct machine *machine, u8 cpumode,
			   enum map_type type, u64 addr,
700
			   struct addr_location *al)
701
{
702
	struct map_groups *mg = &thread->mg;
703
	bool load_map = false;
704

705
	al->machine = machine;
706
	al->thread = thread;
707
	al->addr = addr;
708
	al->cpumode = cpumode;
709
	al->filtered = 0;
710

711 712 713 714 715
	if (machine == NULL) {
		al->map = NULL;
		return;
	}

716
	if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
717
		al->level = 'k';
718
		mg = &machine->kmaps;
719
		load_map = true;
720
	} else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
721
		al->level = '.';
722 723
	} else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
		al->level = 'g';
724
		mg = &machine->kmaps;
725
		load_map = true;
726 727
	} else if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) {
		al->level = 'u';
728
	} else {
729
		al->level = 'H';
730
		al->map = NULL;
731 732 733 734

		if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
			cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
			!perf_guest)
735
			al->filtered |= (1 << HIST_FILTER__GUEST);
736 737 738
		if ((cpumode == PERF_RECORD_MISC_USER ||
			cpumode == PERF_RECORD_MISC_KERNEL) &&
			!perf_host)
739
			al->filtered |= (1 << HIST_FILTER__HOST);
740

741 742 743
		return;
	}
try_again:
744
	al->map = map_groups__find(mg, type, al->addr);
745 746 747 748 749 750 751 752 753 754
	if (al->map == NULL) {
		/*
		 * If this is outside of all known maps, and is a negative
		 * address, try to look it up in the kernel dso, as it might be
		 * a vsyscall or vdso (which executes in user-mode).
		 *
		 * XXX This is nasty, we should have a symbol list in the
		 * "[vdso]" dso, but for now lets use the old trick of looking
		 * in the whole kernel symbol list.
		 */
755
		if ((long long)al->addr < 0 &&
756
		    cpumode == PERF_RECORD_MISC_USER &&
757 758
		    machine && mg != &machine->kmaps) {
			mg = &machine->kmaps;
759 760
			goto try_again;
		}
761 762 763 764 765 766
	} else {
		/*
		 * Kernel maps might be changed when loading symbols so loading
		 * must be done prior to using kernel maps.
		 */
		if (load_map)
767
			map__load(al->map, machine->symbol_filter);
768
		al->addr = al->map->map_ip(al->map, al->addr);
769
	}
770 771
}

772 773
void thread__find_addr_location(struct thread *thread, struct machine *machine,
				u8 cpumode, enum map_type type, u64 addr,
774
				struct addr_location *al)
775
{
776
	thread__find_addr_map(thread, machine, cpumode, type, addr, al);
777
	if (al->map != NULL)
778 779
		al->sym = map__find_symbol(al->map, al->addr,
					   machine->symbol_filter);
780 781
	else
		al->sym = NULL;
782 783
}

784
int perf_event__preprocess_sample(const union perf_event *event,
785
				  struct machine *machine,
786
				  struct addr_location *al,
787
				  struct perf_sample *sample)
788
{
789
	u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
790 791
	struct thread *thread = machine__findnew_thread(machine, sample->pid,
							sample->pid);
792

793 794 795
	if (thread == NULL)
		return -1;

796
	dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
797
	/*
798
	 * Have we already created the kernel maps for this machine?
799 800 801 802 803 804
	 *
	 * This should have happened earlier, when we processed the kernel MMAP
	 * events, but for older perf.data files there was no such thing, so do
	 * it now.
	 */
	if (cpumode == PERF_RECORD_MISC_KERNEL &&
805 806
	    machine->vmlinux_maps[MAP__FUNCTION] == NULL)
		machine__create_kernel_maps(machine);
807

808
	thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION,
809
			      sample->ip, al);
810 811 812
	dump_printf(" ...... dso: %s\n",
		    al->map ? al->map->dso->long_name :
			al->level == 'H' ? "[hypervisor]" : "<not found>");
813 814 815 816

	if (thread__is_filtered(thread))
		al->filtered |= (1 << HIST_FILTER__THREAD);

817
	al->sym = NULL;
818
	al->cpu = sample->cpu;
819 820

	if (al->map) {
821 822
		struct dso *dso = al->map->dso;

823
		if (symbol_conf.dso_list &&
824 825 826 827
		    (!dso || !(strlist__has_entry(symbol_conf.dso_list,
						  dso->short_name) ||
			       (dso->short_name != dso->long_name &&
				strlist__has_entry(symbol_conf.dso_list,
828 829 830
						   dso->long_name))))) {
			al->filtered |= (1 << HIST_FILTER__DSO);
		}
831

832 833
		al->sym = map__find_symbol(al->map, al->addr,
					   machine->symbol_filter);
834
	}
835

836 837
	if (symbol_conf.sym_list &&
		(!al->sym || !strlist__has_entry(symbol_conf.sym_list,
838 839 840
						al->sym->name))) {
		al->filtered |= (1 << HIST_FILTER__SYMBOL);
	}
841 842

	return 0;
843
}