event.c 23.6 KB
Newer Older
1
#include <linux/types.h>
2
#include <sys/mman.h>
3 4
#include "event.h"
#include "debug.h"
5
#include "hist.h"
6
#include "machine.h"
7
#include "sort.h"
8
#include "string.h"
9
#include "strlist.h"
10
#include "thread.h"
11
#include "thread_map.h"
12
#include "symbol/kallsyms.h"
13

14
static const char *perf_event__names[] = {
15 16
	[0]					= "TOTAL",
	[PERF_RECORD_MMAP]			= "MMAP",
17
	[PERF_RECORD_MMAP2]			= "MMAP2",
18 19 20 21 22 23 24 25 26 27 28 29 30
	[PERF_RECORD_LOST]			= "LOST",
	[PERF_RECORD_COMM]			= "COMM",
	[PERF_RECORD_EXIT]			= "EXIT",
	[PERF_RECORD_THROTTLE]			= "THROTTLE",
	[PERF_RECORD_UNTHROTTLE]		= "UNTHROTTLE",
	[PERF_RECORD_FORK]			= "FORK",
	[PERF_RECORD_READ]			= "READ",
	[PERF_RECORD_SAMPLE]			= "SAMPLE",
	[PERF_RECORD_HEADER_ATTR]		= "ATTR",
	[PERF_RECORD_HEADER_EVENT_TYPE]		= "EVENT_TYPE",
	[PERF_RECORD_HEADER_TRACING_DATA]	= "TRACING_DATA",
	[PERF_RECORD_HEADER_BUILD_ID]		= "BUILD_ID",
	[PERF_RECORD_FINISHED_ROUND]		= "FINISHED_ROUND",
A
Adrian Hunter 已提交
31
	[PERF_RECORD_ID_INDEX]			= "ID_INDEX",
32 33
};

34
const char *perf_event__name(unsigned int id)
35
{
36
	if (id >= ARRAY_SIZE(perf_event__names))
37
		return "INVALID";
38
	if (!perf_event__names[id])
39
		return "UNKNOWN";
40
	return perf_event__names[id];
41 42
}

43
static struct perf_sample synth_sample = {
44 45 46 47 48 49 50 51
	.pid	   = -1,
	.tid	   = -1,
	.time	   = -1,
	.stream_id = -1,
	.cpu	   = -1,
	.period	   = 1,
};

52
static pid_t perf_event__get_comm_tgid(pid_t pid, char *comm, size_t len)
53 54 55 56 57
{
	char filename[PATH_MAX];
	char bf[BUFSIZ];
	FILE *fp;
	size_t size = 0;
58
	pid_t tgid = -1;
59 60 61 62 63 64 65 66 67

	snprintf(filename, sizeof(filename), "/proc/%d/status", pid);

	fp = fopen(filename, "r");
	if (fp == NULL) {
		pr_debug("couldn't open %s\n", filename);
		return 0;
	}

68
	while (!comm[0] || (tgid < 0)) {
69
		if (fgets(bf, sizeof(bf), fp) == NULL) {
70 71 72
			pr_warning("couldn't get COMM and pgid, malformed %s\n",
				   filename);
			break;
73
		}
74 75 76 77 78 79

		if (memcmp(bf, "Name:", 5) == 0) {
			char *name = bf + 5;
			while (*name && isspace(*name))
				++name;
			size = strlen(name) - 1;
80 81 82
			if (size >= len)
				size = len - 1;
			memcpy(comm, name, size);
83
			comm[size] = '\0';
84

85 86 87 88
		} else if (memcmp(bf, "Tgid:", 5) == 0) {
			char *tgids = bf + 5;
			while (*tgids && isspace(*tgids))
				++tgids;
89
			tgid = atoi(tgids);
90 91 92
		}
	}

93 94 95 96 97
	fclose(fp);

	return tgid;
}

98
static pid_t perf_event__prepare_comm(union perf_event *event, pid_t pid,
99 100 101 102 103 104 105
					 struct machine *machine)
{
	size_t size;
	pid_t tgid;

	memset(&event->comm, 0, sizeof(event->comm));

106 107 108 109 110 111
	if (machine__is_host(machine))
		tgid = perf_event__get_comm_tgid(pid, event->comm.comm,
						 sizeof(event->comm.comm));
	else
		tgid = machine->pid;

112 113 114 115
	if (tgid < 0)
		goto out;

	event->comm.pid = tgid;
116
	event->comm.header.type = PERF_RECORD_COMM;
117 118

	size = strlen(event->comm.comm) + 1;
119
	size = PERF_ALIGN(size, sizeof(u64));
120
	memset(event->comm.comm + size, 0, machine->id_hdr_size);
121 122
	event->comm.header.size = (sizeof(event->comm) -
				(sizeof(event->comm.comm) - size) +
123
				machine->id_hdr_size);
124
	event->comm.tid = pid;
125 126 127 128 129 130 131 132 133 134 135 136 137
out:
	return tgid;
}

static pid_t perf_event__synthesize_comm(struct perf_tool *tool,
					 union perf_event *event, pid_t pid,
					 perf_event__handler_t process,
					 struct machine *machine)
{
	pid_t tgid = perf_event__prepare_comm(event, pid, machine);

	if (tgid == -1)
		goto out;
138

139 140
	if (process(tool, event, &synth_sample, machine) != 0)
		return -1;
141

142 143
out:
	return tgid;
144 145
}

146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
static int perf_event__synthesize_fork(struct perf_tool *tool,
				       union perf_event *event, pid_t pid,
				       pid_t tgid, perf_event__handler_t process,
				       struct machine *machine)
{
	memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);

	event->fork.ppid = tgid;
	event->fork.ptid = tgid;
	event->fork.pid  = tgid;
	event->fork.tid  = pid;
	event->fork.header.type = PERF_RECORD_FORK;

	event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);

	if (process(tool, event, &synth_sample, machine) != 0)
		return -1;

	return 0;
}

167 168 169 170 171 172
int perf_event__synthesize_mmap_events(struct perf_tool *tool,
				       union perf_event *event,
				       pid_t pid, pid_t tgid,
				       perf_event__handler_t process,
				       struct machine *machine,
				       bool mmap_data)
173 174 175
{
	char filename[PATH_MAX];
	FILE *fp;
176
	int rc = 0;
177

178 179 180
	if (machine__is_default_guest(machine))
		return 0;

181 182
	snprintf(filename, sizeof(filename), "%s/proc/%d/maps",
		 machine->root_dir, pid);
183 184 185 186 187 188 189 190 191 192

	fp = fopen(filename, "r");
	if (fp == NULL) {
		/*
		 * We raced with a task exiting - just return:
		 */
		pr_debug("couldn't open %s\n", filename);
		return -1;
	}

193
	event->header.type = PERF_RECORD_MMAP2;
194

195
	while (1) {
196 197 198 199
		char bf[BUFSIZ];
		char prot[5];
		char execname[PATH_MAX];
		char anonstr[] = "//anon";
200
		unsigned int ino;
201
		size_t size;
202
		ssize_t n;
203

204 205 206
		if (fgets(bf, sizeof(bf), fp) == NULL)
			break;

207 208 209
		/* ensure null termination since stack will be reused. */
		strcpy(execname, "");

210
		/* 00400000-0040c000 r-xp 00000000 fd:01 41038  /bin/cat */
211 212 213 214 215 216
		n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %s\n",
		       &event->mmap2.start, &event->mmap2.len, prot,
		       &event->mmap2.pgoff, &event->mmap2.maj,
		       &event->mmap2.min,
		       &ino, execname);

217 218 219
		/*
 		 * Anon maps don't have the execname.
 		 */
220
		if (n < 7)
221
			continue;
222 223 224

		event->mmap2.ino = (u64)ino;

225 226 227
		/*
		 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
		 */
228 229 230 231
		if (machine__is_host(machine))
			event->header.misc = PERF_RECORD_MISC_USER;
		else
			event->header.misc = PERF_RECORD_MISC_GUEST_USER;
232

233 234 235 236 237 238 239 240 241 242 243 244 245 246 247
		/* map protection and flags bits */
		event->mmap2.prot = 0;
		event->mmap2.flags = 0;
		if (prot[0] == 'r')
			event->mmap2.prot |= PROT_READ;
		if (prot[1] == 'w')
			event->mmap2.prot |= PROT_WRITE;
		if (prot[2] == 'x')
			event->mmap2.prot |= PROT_EXEC;

		if (prot[3] == 's')
			event->mmap2.flags |= MAP_SHARED;
		else
			event->mmap2.flags |= MAP_PRIVATE;

248 249 250 251 252 253
		if (prot[2] != 'x') {
			if (!mmap_data || prot[0] != 'r')
				continue;

			event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
		}
254 255 256 257 258

		if (!strcmp(execname, ""))
			strcpy(execname, anonstr);

		size = strlen(execname) + 1;
259
		memcpy(event->mmap2.filename, execname, size);
260
		size = PERF_ALIGN(size, sizeof(u64));
261 262 263 264 265 266 267
		event->mmap2.len -= event->mmap.start;
		event->mmap2.header.size = (sizeof(event->mmap2) -
					(sizeof(event->mmap2.filename) - size));
		memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
		event->mmap2.header.size += machine->id_hdr_size;
		event->mmap2.pid = tgid;
		event->mmap2.tid = pid;
268 269 270 271

		if (process(tool, event, &synth_sample, machine) != 0) {
			rc = -1;
			break;
272 273 274 275
		}
	}

	fclose(fp);
276
	return rc;
277 278
}

279
int perf_event__synthesize_modules(struct perf_tool *tool,
280
				   perf_event__handler_t process,
281
				   struct machine *machine)
282
{
283
	int rc = 0;
284
	struct rb_node *nd;
285
	struct map_groups *kmaps = &machine->kmaps;
286
	union perf_event *event = zalloc((sizeof(event->mmap) +
287
					  machine->id_hdr_size));
288 289 290 291 292 293 294
	if (event == NULL) {
		pr_debug("Not enough memory synthesizing mmap event "
			 "for kernel modules\n");
		return -1;
	}

	event->header.type = PERF_RECORD_MMAP;
295

296 297 298 299
	/*
	 * kernel uses 0 for user space maps, see kernel/perf_event.c
	 * __perf_event_mmap
	 */
300
	if (machine__is_host(machine))
301
		event->header.misc = PERF_RECORD_MISC_KERNEL;
302
	else
303
		event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
304 305

	for (nd = rb_first(&kmaps->maps[MAP__FUNCTION]);
306 307 308 309 310 311 312
	     nd; nd = rb_next(nd)) {
		size_t size;
		struct map *pos = rb_entry(nd, struct map, rb_node);

		if (pos->dso->kernel)
			continue;

313
		size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
314 315 316
		event->mmap.header.type = PERF_RECORD_MMAP;
		event->mmap.header.size = (sizeof(event->mmap) -
				        (sizeof(event->mmap.filename) - size));
317 318
		memset(event->mmap.filename + size, 0, machine->id_hdr_size);
		event->mmap.header.size += machine->id_hdr_size;
319 320 321 322 323
		event->mmap.start = pos->start;
		event->mmap.len   = pos->end - pos->start;
		event->mmap.pid   = machine->pid;

		memcpy(event->mmap.filename, pos->dso->long_name,
324
		       pos->dso->long_name_len + 1);
325 326 327 328
		if (process(tool, event, &synth_sample, machine) != 0) {
			rc = -1;
			break;
		}
329 330
	}

331
	free(event);
332
	return rc;
333 334
}

335 336
static int __event__synthesize_thread(union perf_event *comm_event,
				      union perf_event *mmap_event,
337
				      union perf_event *fork_event,
338 339
				      pid_t pid, int full,
					  perf_event__handler_t process,
340
				      struct perf_tool *tool,
341
				      struct machine *machine, bool mmap_data)
342
{
343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380
	char filename[PATH_MAX];
	DIR *tasks;
	struct dirent dirent, *next;
	pid_t tgid;

	/* special case: only send one comm event using passed in pid */
	if (!full) {
		tgid = perf_event__synthesize_comm(tool, comm_event, pid,
						   process, machine);

		if (tgid == -1)
			return -1;

		return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
							  process, machine, mmap_data);
	}

	if (machine__is_default_guest(machine))
		return 0;

	snprintf(filename, sizeof(filename), "%s/proc/%d/task",
		 machine->root_dir, pid);

	tasks = opendir(filename);
	if (tasks == NULL) {
		pr_debug("couldn't open %s\n", filename);
		return 0;
	}

	while (!readdir_r(tasks, &dirent, &next) && next) {
		char *end;
		int rc = 0;
		pid_t _pid;

		_pid = strtol(dirent.d_name, &end, 10);
		if (*end)
			continue;

381
		tgid = perf_event__prepare_comm(comm_event, _pid, machine);
382 383 384
		if (tgid == -1)
			return -1;

385 386 387 388 389 390 391 392 393
		if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
						process, machine) < 0)
			return -1;
		/*
		 * Send the prepared comm event
		 */
		if (process(tool, comm_event, &synth_sample, machine) != 0)
			return -1;

394 395 396 397 398
		if (_pid == pid) {
			/* process the parent's maps too */
			rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
						process, machine, mmap_data);
		}
399 400 401 402 403 404 405

		if (rc)
			return rc;
	}

	closedir(tasks);
	return 0;
406 407
}

408
int perf_event__synthesize_thread_map(struct perf_tool *tool,
409
				      struct thread_map *threads,
410
				      perf_event__handler_t process,
411 412
				      struct machine *machine,
				      bool mmap_data)
413
{
414
	union perf_event *comm_event, *mmap_event, *fork_event;
415
	int err = -1, thread, j;
416

417
	comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
418 419 420
	if (comm_event == NULL)
		goto out;

421
	mmap_event = malloc(sizeof(mmap_event->mmap) + machine->id_hdr_size);
422 423 424
	if (mmap_event == NULL)
		goto out_free_comm;

425 426 427 428
	fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
	if (fork_event == NULL)
		goto out_free_mmap;

429 430 431
	err = 0;
	for (thread = 0; thread < threads->nr; ++thread) {
		if (__event__synthesize_thread(comm_event, mmap_event,
432
					       fork_event,
433
					       threads->map[thread], 0,
434 435
					       process, tool, machine,
					       mmap_data)) {
436 437 438
			err = -1;
			break;
		}
439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456

		/*
		 * comm.pid is set to thread group id by
		 * perf_event__synthesize_comm
		 */
		if ((int) comm_event->comm.pid != threads->map[thread]) {
			bool need_leader = true;

			/* is thread group leader in thread_map? */
			for (j = 0; j < threads->nr; ++j) {
				if ((int) comm_event->comm.pid == threads->map[j]) {
					need_leader = false;
					break;
				}
			}

			/* if not, generate events for it */
			if (need_leader &&
457
			    __event__synthesize_thread(comm_event, mmap_event,
458
						       fork_event,
459 460 461
						       comm_event->comm.pid, 0,
						       process, tool, machine,
						       mmap_data)) {
462 463 464 465
				err = -1;
				break;
			}
		}
466
	}
467 468
	free(fork_event);
out_free_mmap:
469 470 471 472 473 474 475
	free(mmap_event);
out_free_comm:
	free(comm_event);
out:
	return err;
}

476
int perf_event__synthesize_threads(struct perf_tool *tool,
477
				   perf_event__handler_t process,
478
				   struct machine *machine, bool mmap_data)
479 480
{
	DIR *proc;
481
	char proc_path[PATH_MAX];
482
	struct dirent dirent, *next;
483
	union perf_event *comm_event, *mmap_event, *fork_event;
484 485
	int err = -1;

486 487 488
	if (machine__is_default_guest(machine))
		return 0;

489
	comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
490 491 492
	if (comm_event == NULL)
		goto out;

493
	mmap_event = malloc(sizeof(mmap_event->mmap) + machine->id_hdr_size);
494 495
	if (mmap_event == NULL)
		goto out_free_comm;
496

497 498 499 500
	fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
	if (fork_event == NULL)
		goto out_free_mmap;

501 502 503
	snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
	proc = opendir(proc_path);

504
	if (proc == NULL)
505
		goto out_free_fork;
506 507 508 509 510 511 512

	while (!readdir_r(proc, &dirent, &next) && next) {
		char *end;
		pid_t pid = strtol(dirent.d_name, &end, 10);

		if (*end) /* only interested in proper numerical dirents */
			continue;
513 514 515 516
		/*
 		 * We may race with exiting thread, so don't stop just because
 		 * one thread couldn't be synthesized.
 		 */
517 518
		__event__synthesize_thread(comm_event, mmap_event, fork_event, pid,
					   1, process, tool, machine, mmap_data);
519 520
	}

521
	err = 0;
522
	closedir(proc);
523 524
out_free_fork:
	free(fork_event);
525 526 527 528 529 530
out_free_mmap:
	free(mmap_event);
out_free_comm:
	free(comm_event);
out:
	return err;
531
}
532

533 534 535 536 537
struct process_symbol_args {
	const char *name;
	u64	   start;
};

538
static int find_symbol_cb(void *arg, const char *name, char type,
539
			  u64 start)
540 541 542
{
	struct process_symbol_args *args = arg;

543 544 545 546 547 548
	/*
	 * Must be a function or at least an alias, as in PARISC64, where "_text" is
	 * an 'A' to the same address as "_stext".
	 */
	if (!(symbol_type__is_a(type, MAP__FUNCTION) ||
	      type == 'A') || strcmp(name, args->name))
549 550 551 552 553 554
		return 0;

	args->start = start;
	return 1;
}

555 556 557 558 559 560 561 562 563 564 565
u64 kallsyms__get_function_start(const char *kallsyms_filename,
				 const char *symbol_name)
{
	struct process_symbol_args args = { .name = symbol_name, };

	if (kallsyms__parse(kallsyms_filename, &args, find_symbol_cb) <= 0)
		return 0;

	return args.start;
}

566
int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
567
				       perf_event__handler_t process,
568
				       struct machine *machine)
569 570
{
	size_t size;
571
	const char *mmap_name;
572 573
	char name_buff[PATH_MAX];
	struct map *map;
574
	struct kmap *kmap;
575
	int err;
576 577 578 579 580
	union perf_event *event;

	if (machine->vmlinux_maps[0] == NULL)
		return -1;

581 582 583 584 585
	/*
	 * We should get this from /sys/kernel/sections/.text, but till that is
	 * available use this, and after it is use this as a fallback for older
	 * kernels.
	 */
586
	event = zalloc((sizeof(event->mmap) + machine->id_hdr_size));
587 588 589 590 591
	if (event == NULL) {
		pr_debug("Not enough memory synthesizing mmap event "
			 "for kernel modules\n");
		return -1;
	}
592

593
	mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff));
594
	if (machine__is_host(machine)) {
595 596 597 598
		/*
		 * kernel uses PERF_RECORD_MISC_USER for user space maps,
		 * see kernel/perf_event.c __perf_event_mmap
		 */
599
		event->header.misc = PERF_RECORD_MISC_KERNEL;
600
	} else {
601
		event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
602
	}
603

604
	map = machine->vmlinux_maps[MAP__FUNCTION];
605
	kmap = map__kmap(map);
606
	size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
607
			"%s%s", mmap_name, kmap->ref_reloc_sym->name) + 1;
608
	size = PERF_ALIGN(size, sizeof(u64));
609 610
	event->mmap.header.type = PERF_RECORD_MMAP;
	event->mmap.header.size = (sizeof(event->mmap) -
611
			(sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
612
	event->mmap.pgoff = kmap->ref_reloc_sym->addr;
613 614 615 616
	event->mmap.start = map->start;
	event->mmap.len   = map->end - event->mmap.start;
	event->mmap.pid   = machine->pid;

617
	err = process(tool, event, &synth_sample, machine);
618 619 620
	free(event);

	return err;
621 622
}

623 624
size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp)
{
625 626 627 628 629 630 631
	const char *s;

	if (event->header.misc & PERF_RECORD_MISC_COMM_EXEC)
		s = " exec";
	else
		s = "";

632
	return fprintf(fp, "%s: %s:%d/%d\n", s, event->comm.comm, event->comm.pid, event->comm.tid);
633 634
}

635
int perf_event__process_comm(struct perf_tool *tool __maybe_unused,
636
			     union perf_event *event,
637
			     struct perf_sample *sample,
638
			     struct machine *machine)
639
{
640
	return machine__process_comm_event(machine, event, sample);
641 642
}

643
int perf_event__process_lost(struct perf_tool *tool __maybe_unused,
644
			     union perf_event *event,
645
			     struct perf_sample *sample,
646
			     struct machine *machine)
647
{
648
	return machine__process_lost_event(machine, event, sample);
649
}
650

651 652
size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
{
653
	return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %c %s\n",
654
		       event->mmap.pid, event->mmap.tid, event->mmap.start,
655 656 657
		       event->mmap.len, event->mmap.pgoff,
		       (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x',
		       event->mmap.filename);
658 659
}

660 661 662
size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp)
{
	return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64
663
			   " %02x:%02x %"PRIu64" %"PRIu64"]: %c%c%c%c %s\n",
664 665 666 667
		       event->mmap2.pid, event->mmap2.tid, event->mmap2.start,
		       event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj,
		       event->mmap2.min, event->mmap2.ino,
		       event->mmap2.ino_generation,
668 669 670 671
		       (event->mmap2.prot & PROT_READ) ? 'r' : '-',
		       (event->mmap2.prot & PROT_WRITE) ? 'w' : '-',
		       (event->mmap2.prot & PROT_EXEC) ? 'x' : '-',
		       (event->mmap2.flags & MAP_SHARED) ? 's' : 'p',
672 673 674
		       event->mmap2.filename);
}

675
int perf_event__process_mmap(struct perf_tool *tool __maybe_unused,
676
			     union perf_event *event,
677
			     struct perf_sample *sample,
678
			     struct machine *machine)
679
{
680
	return machine__process_mmap_event(machine, event, sample);
681 682
}

683 684
int perf_event__process_mmap2(struct perf_tool *tool __maybe_unused,
			     union perf_event *event,
685
			     struct perf_sample *sample,
686 687
			     struct machine *machine)
{
688
	return machine__process_mmap2_event(machine, event, sample);
689 690
}

691 692 693 694 695 696 697
size_t perf_event__fprintf_task(union perf_event *event, FILE *fp)
{
	return fprintf(fp, "(%d:%d):(%d:%d)\n",
		       event->fork.pid, event->fork.tid,
		       event->fork.ppid, event->fork.ptid);
}

698
int perf_event__process_fork(struct perf_tool *tool __maybe_unused,
699
			     union perf_event *event,
700
			     struct perf_sample *sample,
701
			     struct machine *machine)
702
{
703
	return machine__process_fork_event(machine, event, sample);
704
}
705

706 707
int perf_event__process_exit(struct perf_tool *tool __maybe_unused,
			     union perf_event *event,
708
			     struct perf_sample *sample,
709 710
			     struct machine *machine)
{
711
	return machine__process_exit_event(machine, event, sample);
712 713
}

714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729
size_t perf_event__fprintf(union perf_event *event, FILE *fp)
{
	size_t ret = fprintf(fp, "PERF_RECORD_%s",
			     perf_event__name(event->header.type));

	switch (event->header.type) {
	case PERF_RECORD_COMM:
		ret += perf_event__fprintf_comm(event, fp);
		break;
	case PERF_RECORD_FORK:
	case PERF_RECORD_EXIT:
		ret += perf_event__fprintf_task(event, fp);
		break;
	case PERF_RECORD_MMAP:
		ret += perf_event__fprintf_mmap(event, fp);
		break;
730 731 732
	case PERF_RECORD_MMAP2:
		ret += perf_event__fprintf_mmap2(event, fp);
		break;
733 734 735 736 737 738 739
	default:
		ret += fprintf(fp, "\n");
	}

	return ret;
}

740 741
int perf_event__process(struct perf_tool *tool __maybe_unused,
			union perf_event *event,
742
			struct perf_sample *sample,
743
			struct machine *machine)
744
{
745
	return machine__process_event(machine, event, sample);
746 747
}

748
void thread__find_addr_map(struct thread *thread, u8 cpumode,
749
			   enum map_type type, u64 addr,
750
			   struct addr_location *al)
751
{
752
	struct map_groups *mg = thread->mg;
753
	struct machine *machine = mg->machine;
754
	bool load_map = false;
755

756
	al->machine = machine;
757
	al->thread = thread;
758
	al->addr = addr;
759
	al->cpumode = cpumode;
760
	al->filtered = 0;
761

762 763 764 765 766
	if (machine == NULL) {
		al->map = NULL;
		return;
	}

767
	if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
768
		al->level = 'k';
769
		mg = &machine->kmaps;
770
		load_map = true;
771
	} else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
772
		al->level = '.';
773 774
	} else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
		al->level = 'g';
775
		mg = &machine->kmaps;
776
		load_map = true;
777 778
	} else if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) {
		al->level = 'u';
779
	} else {
780
		al->level = 'H';
781
		al->map = NULL;
782 783 784 785

		if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
			cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
			!perf_guest)
786
			al->filtered |= (1 << HIST_FILTER__GUEST);
787 788 789
		if ((cpumode == PERF_RECORD_MISC_USER ||
			cpumode == PERF_RECORD_MISC_KERNEL) &&
			!perf_host)
790
			al->filtered |= (1 << HIST_FILTER__HOST);
791

792 793 794
		return;
	}
try_again:
795
	al->map = map_groups__find(mg, type, al->addr);
796 797 798 799 800 801 802 803 804 805
	if (al->map == NULL) {
		/*
		 * If this is outside of all known maps, and is a negative
		 * address, try to look it up in the kernel dso, as it might be
		 * a vsyscall or vdso (which executes in user-mode).
		 *
		 * XXX This is nasty, we should have a symbol list in the
		 * "[vdso]" dso, but for now lets use the old trick of looking
		 * in the whole kernel symbol list.
		 */
806 807 808
		if (cpumode == PERF_RECORD_MISC_USER && machine &&
		    mg != &machine->kmaps &&
		    machine__kernel_ip(machine, al->addr)) {
809
			mg = &machine->kmaps;
810
			load_map = true;
811 812
			goto try_again;
		}
813 814 815 816 817 818
	} else {
		/*
		 * Kernel maps might be changed when loading symbols so loading
		 * must be done prior to using kernel maps.
		 */
		if (load_map)
819
			map__load(al->map, machine->symbol_filter);
820
		al->addr = al->map->map_ip(al->map, al->addr);
821
	}
822 823
}

824
void thread__find_addr_location(struct thread *thread,
825
				u8 cpumode, enum map_type type, u64 addr,
826
				struct addr_location *al)
827
{
828
	thread__find_addr_map(thread, cpumode, type, addr, al);
829
	if (al->map != NULL)
830
		al->sym = map__find_symbol(al->map, al->addr,
831
					   thread->mg->machine->symbol_filter);
832 833
	else
		al->sym = NULL;
834 835
}

836
int perf_event__preprocess_sample(const union perf_event *event,
837
				  struct machine *machine,
838
				  struct addr_location *al,
839
				  struct perf_sample *sample)
840
{
841
	u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
842
	struct thread *thread = machine__findnew_thread(machine, sample->pid,
843
							sample->tid);
844

845 846 847
	if (thread == NULL)
		return -1;

848
	dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
849
	/*
850
	 * Have we already created the kernel maps for this machine?
851 852 853 854 855 856
	 *
	 * This should have happened earlier, when we processed the kernel MMAP
	 * events, but for older perf.data files there was no such thing, so do
	 * it now.
	 */
	if (cpumode == PERF_RECORD_MISC_KERNEL &&
857 858
	    machine->vmlinux_maps[MAP__FUNCTION] == NULL)
		machine__create_kernel_maps(machine);
859

860
	thread__find_addr_map(thread, cpumode, MAP__FUNCTION, sample->ip, al);
861 862 863
	dump_printf(" ...... dso: %s\n",
		    al->map ? al->map->dso->long_name :
			al->level == 'H' ? "[hypervisor]" : "<not found>");
864 865 866 867

	if (thread__is_filtered(thread))
		al->filtered |= (1 << HIST_FILTER__THREAD);

868
	al->sym = NULL;
869
	al->cpu = sample->cpu;
870 871

	if (al->map) {
872 873
		struct dso *dso = al->map->dso;

874
		if (symbol_conf.dso_list &&
875 876 877 878
		    (!dso || !(strlist__has_entry(symbol_conf.dso_list,
						  dso->short_name) ||
			       (dso->short_name != dso->long_name &&
				strlist__has_entry(symbol_conf.dso_list,
879 880 881
						   dso->long_name))))) {
			al->filtered |= (1 << HIST_FILTER__DSO);
		}
882

883 884
		al->sym = map__find_symbol(al->map, al->addr,
					   machine->symbol_filter);
885
	}
886

887 888
	if (symbol_conf.sym_list &&
		(!al->sym || !strlist__has_entry(symbol_conf.sym_list,
889 890 891
						al->sym->name))) {
		al->filtered |= (1 << HIST_FILTER__SYMBOL);
	}
892 893

	return 0;
894
}
895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923

bool is_bts_event(struct perf_event_attr *attr)
{
	return attr->type == PERF_TYPE_HARDWARE &&
	       (attr->config & PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
	       attr->sample_period == 1;
}

bool sample_addr_correlates_sym(struct perf_event_attr *attr)
{
	if (attr->type == PERF_TYPE_SOFTWARE &&
	    (attr->config == PERF_COUNT_SW_PAGE_FAULTS ||
	     attr->config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
	     attr->config == PERF_COUNT_SW_PAGE_FAULTS_MAJ))
		return true;

	if (is_bts_event(attr))
		return true;

	return false;
}

void perf_event__preprocess_sample_addr(union perf_event *event,
					struct perf_sample *sample,
					struct thread *thread,
					struct addr_location *al)
{
	u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;

924
	thread__find_addr_map(thread, cpumode, MAP__FUNCTION, sample->addr, al);
925
	if (!al->map)
926
		thread__find_addr_map(thread, cpumode, MAP__VARIABLE,
927 928 929 930 931 932 933 934
				      sample->addr, al);

	al->cpu = sample->cpu;
	al->sym = NULL;

	if (al->map)
		al->sym = map__find_symbol(al->map, al->addr, NULL);
}