event.c 39.5 KB
Newer Older
1
#include <linux/types.h>
2
#include <linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
3
#include <api/fs/fs.h>
4 5
#include "event.h"
#include "debug.h"
6
#include "hist.h"
7
#include "machine.h"
8
#include "sort.h"
9
#include "string.h"
10
#include "strlist.h"
11
#include "thread.h"
12
#include "thread_map.h"
13
#include "symbol/kallsyms.h"
14 15
#include "asm/bug.h"
#include "stat.h"
16

17
static const char *perf_event__names[] = {
18 19
	[0]					= "TOTAL",
	[PERF_RECORD_MMAP]			= "MMAP",
20
	[PERF_RECORD_MMAP2]			= "MMAP2",
21 22 23 24 25 26 27 28
	[PERF_RECORD_LOST]			= "LOST",
	[PERF_RECORD_COMM]			= "COMM",
	[PERF_RECORD_EXIT]			= "EXIT",
	[PERF_RECORD_THROTTLE]			= "THROTTLE",
	[PERF_RECORD_UNTHROTTLE]		= "UNTHROTTLE",
	[PERF_RECORD_FORK]			= "FORK",
	[PERF_RECORD_READ]			= "READ",
	[PERF_RECORD_SAMPLE]			= "SAMPLE",
29
	[PERF_RECORD_AUX]			= "AUX",
30
	[PERF_RECORD_ITRACE_START]		= "ITRACE_START",
31
	[PERF_RECORD_LOST_SAMPLES]		= "LOST_SAMPLES",
32 33
	[PERF_RECORD_SWITCH]			= "SWITCH",
	[PERF_RECORD_SWITCH_CPU_WIDE]		= "SWITCH_CPU_WIDE",
34
	[PERF_RECORD_NAMESPACES]		= "NAMESPACES",
35 36 37 38 39
	[PERF_RECORD_HEADER_ATTR]		= "ATTR",
	[PERF_RECORD_HEADER_EVENT_TYPE]		= "EVENT_TYPE",
	[PERF_RECORD_HEADER_TRACING_DATA]	= "TRACING_DATA",
	[PERF_RECORD_HEADER_BUILD_ID]		= "BUILD_ID",
	[PERF_RECORD_FINISHED_ROUND]		= "FINISHED_ROUND",
A
Adrian Hunter 已提交
40
	[PERF_RECORD_ID_INDEX]			= "ID_INDEX",
41 42
	[PERF_RECORD_AUXTRACE_INFO]		= "AUXTRACE_INFO",
	[PERF_RECORD_AUXTRACE]			= "AUXTRACE",
43
	[PERF_RECORD_AUXTRACE_ERROR]		= "AUXTRACE_ERROR",
44
	[PERF_RECORD_THREAD_MAP]		= "THREAD_MAP",
45
	[PERF_RECORD_CPU_MAP]			= "CPU_MAP",
46
	[PERF_RECORD_STAT_CONFIG]		= "STAT_CONFIG",
J
Jiri Olsa 已提交
47
	[PERF_RECORD_STAT]			= "STAT",
48
	[PERF_RECORD_STAT_ROUND]		= "STAT_ROUND",
49
	[PERF_RECORD_EVENT_UPDATE]		= "EVENT_UPDATE",
50
	[PERF_RECORD_TIME_CONV]			= "TIME_CONV",
51 52
};

53 54 55 56 57 58 59 60 61 62
static const char *perf_ns__names[] = {
	[NET_NS_INDEX]		= "net",
	[UTS_NS_INDEX]		= "uts",
	[IPC_NS_INDEX]		= "ipc",
	[PID_NS_INDEX]		= "pid",
	[USER_NS_INDEX]		= "user",
	[MNT_NS_INDEX]		= "mnt",
	[CGROUP_NS_INDEX]	= "cgroup",
};

63
const char *perf_event__name(unsigned int id)
64
{
65
	if (id >= ARRAY_SIZE(perf_event__names))
66
		return "INVALID";
67
	if (!perf_event__names[id])
68
		return "UNKNOWN";
69
	return perf_event__names[id];
70 71
}

72 73 74 75 76 77 78
static const char *perf_ns__name(unsigned int id)
{
	if (id >= ARRAY_SIZE(perf_ns__names))
		return "UNKNOWN";
	return perf_ns__names[id];
}

79 80 81 82 83 84
static int perf_tool__process_synth_event(struct perf_tool *tool,
					  union perf_event *event,
					  struct machine *machine,
					  perf_event__handler_t process)
{
	struct perf_sample synth_sample = {
85 86 87 88 89 90
	.pid	   = -1,
	.tid	   = -1,
	.time	   = -1,
	.stream_id = -1,
	.cpu	   = -1,
	.period	   = 1,
91 92 93 94
	.cpumode   = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK,
	};

	return process(tool, event, &synth_sample, machine);
95 96
};

97 98
/*
 * Assumes that the first 4095 bytes of /proc/pid/stat contains
99
 * the comm, tgid and ppid.
100
 */
101 102
static int perf_event__get_comm_ids(pid_t pid, char *comm, size_t len,
				    pid_t *tgid, pid_t *ppid)
103 104
{
	char filename[PATH_MAX];
105 106
	char bf[4096];
	int fd;
107 108
	size_t size = 0;
	ssize_t n;
109 110 111 112
	char *nl, *name, *tgids, *ppids;

	*tgid = -1;
	*ppid = -1;
113 114 115

	snprintf(filename, sizeof(filename), "/proc/%d/status", pid);

116 117
	fd = open(filename, O_RDONLY);
	if (fd < 0) {
118
		pr_debug("couldn't open %s\n", filename);
119
		return -1;
120 121
	}

122 123 124
	n = read(fd, bf, sizeof(bf) - 1);
	close(fd);
	if (n <= 0) {
125
		pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n",
126 127
			   pid);
		return -1;
128
	}
129
	bf[n] = '\0';
130

131 132
	name = strstr(bf, "Name:");
	tgids = strstr(bf, "Tgid:");
133
	ppids = strstr(bf, "PPid:");
134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155

	if (name) {
		name += 5;  /* strlen("Name:") */

		while (*name && isspace(*name))
			++name;

		nl = strchr(name, '\n');
		if (nl)
			*nl = '\0';

		size = strlen(name);
		if (size >= len)
			size = len - 1;
		memcpy(comm, name, size);
		comm[size] = '\0';
	} else {
		pr_debug("Name: string not found for pid %d\n", pid);
	}

	if (tgids) {
		tgids += 5;  /* strlen("Tgid:") */
156
		*tgid = atoi(tgids);
157 158 159
	} else {
		pr_debug("Tgid: string not found for pid %d\n", pid);
	}
160

161 162 163 164 165 166 167 168
	if (ppids) {
		ppids += 5;  /* strlen("PPid:") */
		*ppid = atoi(ppids);
	} else {
		pr_debug("PPid: string not found for pid %d\n", pid);
	}

	return 0;
169 170
}

171 172 173
static int perf_event__prepare_comm(union perf_event *event, pid_t pid,
				    struct machine *machine,
				    pid_t *tgid, pid_t *ppid)
174 175
{
	size_t size;
176 177

	*ppid = -1;
178 179 180

	memset(&event->comm, 0, sizeof(event->comm));

181 182 183 184 185 186 187 188 189
	if (machine__is_host(machine)) {
		if (perf_event__get_comm_ids(pid, event->comm.comm,
					     sizeof(event->comm.comm),
					     tgid, ppid) != 0) {
			return -1;
		}
	} else {
		*tgid = machine->pid;
	}
190

191 192
	if (*tgid < 0)
		return -1;
193

194
	event->comm.pid = *tgid;
195
	event->comm.header.type = PERF_RECORD_COMM;
196 197

	size = strlen(event->comm.comm) + 1;
198
	size = PERF_ALIGN(size, sizeof(u64));
199
	memset(event->comm.comm + size, 0, machine->id_hdr_size);
200 201
	event->comm.header.size = (sizeof(event->comm) -
				(sizeof(event->comm.comm) - size) +
202
				machine->id_hdr_size);
203
	event->comm.tid = pid;
204 205

	return 0;
206 207
}

208
pid_t perf_event__synthesize_comm(struct perf_tool *tool,
209 210 211 212
					 union perf_event *event, pid_t pid,
					 perf_event__handler_t process,
					 struct machine *machine)
{
213
	pid_t tgid, ppid;
214

215 216
	if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0)
		return -1;
217

218
	if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
219
		return -1;
220

221
	return tgid;
222 223
}

224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275
static void perf_event__get_ns_link_info(pid_t pid, const char *ns,
					 struct perf_ns_link_info *ns_link_info)
{
	struct stat64 st;
	char proc_ns[128];

	sprintf(proc_ns, "/proc/%u/ns/%s", pid, ns);
	if (stat64(proc_ns, &st) == 0) {
		ns_link_info->dev = st.st_dev;
		ns_link_info->ino = st.st_ino;
	}
}

int perf_event__synthesize_namespaces(struct perf_tool *tool,
				      union perf_event *event,
				      pid_t pid, pid_t tgid,
				      perf_event__handler_t process,
				      struct machine *machine)
{
	u32 idx;
	struct perf_ns_link_info *ns_link_info;

	if (!tool || !tool->namespace_events)
		return 0;

	memset(&event->namespaces, 0, (sizeof(event->namespaces) +
	       (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
	       machine->id_hdr_size));

	event->namespaces.pid = tgid;
	event->namespaces.tid = pid;

	event->namespaces.nr_namespaces = NR_NAMESPACES;

	ns_link_info = event->namespaces.link_info;

	for (idx = 0; idx < event->namespaces.nr_namespaces; idx++)
		perf_event__get_ns_link_info(pid, perf_ns__name(idx),
					     &ns_link_info[idx]);

	event->namespaces.header.type = PERF_RECORD_NAMESPACES;

	event->namespaces.header.size = (sizeof(event->namespaces) +
			(NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
			machine->id_hdr_size);

	if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
		return -1;

	return 0;
}

276
static int perf_event__synthesize_fork(struct perf_tool *tool,
277 278 279
				       union perf_event *event,
				       pid_t pid, pid_t tgid, pid_t ppid,
				       perf_event__handler_t process,
280 281 282 283
				       struct machine *machine)
{
	memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);

284 285 286 287 288 289 290 291 292 293 294 295
	/*
	 * for main thread set parent to ppid from status file. For other
	 * threads set parent pid to main thread. ie., assume main thread
	 * spawns all threads in a process
	*/
	if (tgid == pid) {
		event->fork.ppid = ppid;
		event->fork.ptid = ppid;
	} else {
		event->fork.ppid = tgid;
		event->fork.ptid = tgid;
	}
296 297 298 299 300 301
	event->fork.pid  = tgid;
	event->fork.tid  = pid;
	event->fork.header.type = PERF_RECORD_FORK;

	event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);

302
	if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
303 304 305 306 307
		return -1;

	return 0;
}

308 309 310 311 312
int perf_event__synthesize_mmap_events(struct perf_tool *tool,
				       union perf_event *event,
				       pid_t pid, pid_t tgid,
				       perf_event__handler_t process,
				       struct machine *machine,
313 314
				       bool mmap_data,
				       unsigned int proc_map_timeout)
315 316 317
{
	char filename[PATH_MAX];
	FILE *fp;
318 319
	unsigned long long t;
	bool truncation = false;
320
	unsigned long long timeout = proc_map_timeout * 1000000ULL;
321
	int rc = 0;
322 323
	const char *hugetlbfs_mnt = hugetlbfs__mountpoint();
	int hugetlbfs_mnt_len = hugetlbfs_mnt ? strlen(hugetlbfs_mnt) : 0;
324

325 326 327
	if (machine__is_default_guest(machine))
		return 0;

328 329
	snprintf(filename, sizeof(filename), "%s/proc/%d/task/%d/maps",
		 machine->root_dir, pid, pid);
330 331 332 333 334 335 336 337 338 339

	fp = fopen(filename, "r");
	if (fp == NULL) {
		/*
		 * We raced with a task exiting - just return:
		 */
		pr_debug("couldn't open %s\n", filename);
		return -1;
	}

340
	event->header.type = PERF_RECORD_MMAP2;
341
	t = rdclock();
342

343
	while (1) {
344 345 346 347
		char bf[BUFSIZ];
		char prot[5];
		char execname[PATH_MAX];
		char anonstr[] = "//anon";
348
		unsigned int ino;
349
		size_t size;
350
		ssize_t n;
351

352 353 354
		if (fgets(bf, sizeof(bf), fp) == NULL)
			break;

355 356 357 358 359
		if ((rdclock() - t) > timeout) {
			pr_warning("Reading %s time out. "
				   "You may want to increase "
				   "the time limit by --proc-map-timeout\n",
				   filename);
360 361 362 363
			truncation = true;
			goto out;
		}

364 365 366
		/* ensure null termination since stack will be reused. */
		strcpy(execname, "");

367
		/* 00400000-0040c000 r-xp 00000000 fd:01 41038  /bin/cat */
368
		n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %[^\n]\n",
369 370 371 372 373
		       &event->mmap2.start, &event->mmap2.len, prot,
		       &event->mmap2.pgoff, &event->mmap2.maj,
		       &event->mmap2.min,
		       &ino, execname);

374 375 376
		/*
 		 * Anon maps don't have the execname.
 		 */
377
		if (n < 7)
378
			continue;
379 380 381

		event->mmap2.ino = (u64)ino;

382 383 384
		/*
		 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
		 */
385 386 387 388
		if (machine__is_host(machine))
			event->header.misc = PERF_RECORD_MISC_USER;
		else
			event->header.misc = PERF_RECORD_MISC_GUEST_USER;
389

390 391 392 393 394 395 396 397 398 399 400 401 402 403 404
		/* map protection and flags bits */
		event->mmap2.prot = 0;
		event->mmap2.flags = 0;
		if (prot[0] == 'r')
			event->mmap2.prot |= PROT_READ;
		if (prot[1] == 'w')
			event->mmap2.prot |= PROT_WRITE;
		if (prot[2] == 'x')
			event->mmap2.prot |= PROT_EXEC;

		if (prot[3] == 's')
			event->mmap2.flags |= MAP_SHARED;
		else
			event->mmap2.flags |= MAP_PRIVATE;

405 406 407 408 409 410
		if (prot[2] != 'x') {
			if (!mmap_data || prot[0] != 'r')
				continue;

			event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
		}
411

412 413 414 415
out:
		if (truncation)
			event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT;

416 417
		if (!strcmp(execname, ""))
			strcpy(execname, anonstr);
418

419 420
		if (hugetlbfs_mnt_len &&
		    !strncmp(execname, hugetlbfs_mnt, hugetlbfs_mnt_len)) {
421 422 423
			strcpy(execname, anonstr);
			event->mmap2.flags |= MAP_HUGETLB;
		}
424 425

		size = strlen(execname) + 1;
426
		memcpy(event->mmap2.filename, execname, size);
427
		size = PERF_ALIGN(size, sizeof(u64));
428 429 430 431 432 433 434
		event->mmap2.len -= event->mmap.start;
		event->mmap2.header.size = (sizeof(event->mmap2) -
					(sizeof(event->mmap2.filename) - size));
		memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
		event->mmap2.header.size += machine->id_hdr_size;
		event->mmap2.pid = tgid;
		event->mmap2.tid = pid;
435

436
		if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
437 438
			rc = -1;
			break;
439
		}
440 441 442

		if (truncation)
			break;
443 444 445
	}

	fclose(fp);
446
	return rc;
447 448
}

449
int perf_event__synthesize_modules(struct perf_tool *tool,
450
				   perf_event__handler_t process,
451
				   struct machine *machine)
452
{
453
	int rc = 0;
454
	struct map *pos;
455
	struct map_groups *kmaps = &machine->kmaps;
456
	struct maps *maps = &kmaps->maps[MAP__FUNCTION];
457
	union perf_event *event = zalloc((sizeof(event->mmap) +
458
					  machine->id_hdr_size));
459 460 461 462 463 464 465
	if (event == NULL) {
		pr_debug("Not enough memory synthesizing mmap event "
			 "for kernel modules\n");
		return -1;
	}

	event->header.type = PERF_RECORD_MMAP;
466

467 468 469 470
	/*
	 * kernel uses 0 for user space maps, see kernel/perf_event.c
	 * __perf_event_mmap
	 */
471
	if (machine__is_host(machine))
472
		event->header.misc = PERF_RECORD_MISC_KERNEL;
473
	else
474
		event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
475

476
	for (pos = maps__first(maps); pos; pos = map__next(pos)) {
477 478
		size_t size;

479
		if (__map__is_kernel(pos))
480 481
			continue;

482
		size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
483 484 485
		event->mmap.header.type = PERF_RECORD_MMAP;
		event->mmap.header.size = (sizeof(event->mmap) -
				        (sizeof(event->mmap.filename) - size));
486 487
		memset(event->mmap.filename + size, 0, machine->id_hdr_size);
		event->mmap.header.size += machine->id_hdr_size;
488 489 490 491 492
		event->mmap.start = pos->start;
		event->mmap.len   = pos->end - pos->start;
		event->mmap.pid   = machine->pid;

		memcpy(event->mmap.filename, pos->dso->long_name,
493
		       pos->dso->long_name_len + 1);
494
		if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
495 496 497
			rc = -1;
			break;
		}
498 499
	}

500
	free(event);
501
	return rc;
502 503
}

504 505
static int __event__synthesize_thread(union perf_event *comm_event,
				      union perf_event *mmap_event,
506
				      union perf_event *fork_event,
507
				      union perf_event *namespaces_event,
508
				      pid_t pid, int full,
509
				      perf_event__handler_t process,
510
				      struct perf_tool *tool,
511 512 513
				      struct machine *machine,
				      bool mmap_data,
				      unsigned int proc_map_timeout)
514
{
515 516
	char filename[PATH_MAX];
	DIR *tasks;
517
	struct dirent *dirent;
518
	pid_t tgid, ppid;
519
	int rc = 0;
520 521 522 523 524 525 526 527 528

	/* special case: only send one comm event using passed in pid */
	if (!full) {
		tgid = perf_event__synthesize_comm(tool, comm_event, pid,
						   process, machine);

		if (tgid == -1)
			return -1;

529 530 531 532 533
		if (perf_event__synthesize_namespaces(tool, namespaces_event, pid,
						      tgid, process, machine) < 0)
			return -1;


534
		return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
535 536
							  process, machine, mmap_data,
							  proc_map_timeout);
537 538 539 540 541 542 543 544 545 546 547 548 549 550
	}

	if (machine__is_default_guest(machine))
		return 0;

	snprintf(filename, sizeof(filename), "%s/proc/%d/task",
		 machine->root_dir, pid);

	tasks = opendir(filename);
	if (tasks == NULL) {
		pr_debug("couldn't open %s\n", filename);
		return 0;
	}

551
	while ((dirent = readdir(tasks)) != NULL) {
552 553 554
		char *end;
		pid_t _pid;

555
		_pid = strtol(dirent->d_name, &end, 10);
556 557 558
		if (*end)
			continue;

559
		rc = -1;
560 561
		if (perf_event__prepare_comm(comm_event, _pid, machine,
					     &tgid, &ppid) != 0)
562
			break;
563

564
		if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
565
						ppid, process, machine) < 0)
566
			break;
567 568 569 570 571

		if (perf_event__synthesize_namespaces(tool, namespaces_event, _pid,
						      tgid, process, machine) < 0)
			break;

572 573 574
		/*
		 * Send the prepared comm event
		 */
575
		if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0)
576
			break;
577

578
		rc = 0;
579 580 581
		if (_pid == pid) {
			/* process the parent's maps too */
			rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
582
						process, machine, mmap_data, proc_map_timeout);
583 584
			if (rc)
				break;
585
		}
586 587 588
	}

	closedir(tasks);
589
	return rc;
590 591
}

592
int perf_event__synthesize_thread_map(struct perf_tool *tool,
593
				      struct thread_map *threads,
594
				      perf_event__handler_t process,
595
				      struct machine *machine,
596 597
				      bool mmap_data,
				      unsigned int proc_map_timeout)
598
{
599
	union perf_event *comm_event, *mmap_event, *fork_event;
600
	union perf_event *namespaces_event;
601
	int err = -1, thread, j;
602

603
	comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
604 605 606
	if (comm_event == NULL)
		goto out;

607
	mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
608 609 610
	if (mmap_event == NULL)
		goto out_free_comm;

611 612 613 614
	fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
	if (fork_event == NULL)
		goto out_free_mmap;

615 616 617 618 619 620
	namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
				  (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
				  machine->id_hdr_size);
	if (namespaces_event == NULL)
		goto out_free_fork;

621 622 623
	err = 0;
	for (thread = 0; thread < threads->nr; ++thread) {
		if (__event__synthesize_thread(comm_event, mmap_event,
624
					       fork_event, namespaces_event,
625
					       thread_map__pid(threads, thread), 0,
626
					       process, tool, machine,
627
					       mmap_data, proc_map_timeout)) {
628 629 630
			err = -1;
			break;
		}
631 632 633 634 635

		/*
		 * comm.pid is set to thread group id by
		 * perf_event__synthesize_comm
		 */
636
		if ((int) comm_event->comm.pid != thread_map__pid(threads, thread)) {
637 638 639 640
			bool need_leader = true;

			/* is thread group leader in thread_map? */
			for (j = 0; j < threads->nr; ++j) {
641
				if ((int) comm_event->comm.pid == thread_map__pid(threads, j)) {
642 643 644 645 646 647 648
					need_leader = false;
					break;
				}
			}

			/* if not, generate events for it */
			if (need_leader &&
649
			    __event__synthesize_thread(comm_event, mmap_event,
650
						       fork_event, namespaces_event,
651 652
						       comm_event->comm.pid, 0,
						       process, tool, machine,
653
						       mmap_data, proc_map_timeout)) {
654 655 656 657
				err = -1;
				break;
			}
		}
658
	}
659 660
	free(namespaces_event);
out_free_fork:
661 662
	free(fork_event);
out_free_mmap:
663 664 665 666 667 668 669
	free(mmap_event);
out_free_comm:
	free(comm_event);
out:
	return err;
}

670
int perf_event__synthesize_threads(struct perf_tool *tool,
671
				   perf_event__handler_t process,
672 673 674
				   struct machine *machine,
				   bool mmap_data,
				   unsigned int proc_map_timeout)
675 676
{
	DIR *proc;
677
	char proc_path[PATH_MAX];
678
	struct dirent *dirent;
679
	union perf_event *comm_event, *mmap_event, *fork_event;
680
	union perf_event *namespaces_event;
681 682
	int err = -1;

683 684 685
	if (machine__is_default_guest(machine))
		return 0;

686
	comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
687 688 689
	if (comm_event == NULL)
		goto out;

690
	mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
691 692
	if (mmap_event == NULL)
		goto out_free_comm;
693

694 695 696 697
	fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
	if (fork_event == NULL)
		goto out_free_mmap;

698 699 700 701 702 703
	namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
				  (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
				  machine->id_hdr_size);
	if (namespaces_event == NULL)
		goto out_free_fork;

704 705 706
	snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
	proc = opendir(proc_path);

707
	if (proc == NULL)
708
		goto out_free_namespaces;
709

710
	while ((dirent = readdir(proc)) != NULL) {
711
		char *end;
712
		pid_t pid = strtol(dirent->d_name, &end, 10);
713 714 715

		if (*end) /* only interested in proper numerical dirents */
			continue;
716 717 718 719
		/*
 		 * We may race with exiting thread, so don't stop just because
 		 * one thread couldn't be synthesized.
 		 */
720 721 722
		__event__synthesize_thread(comm_event, mmap_event, fork_event,
					   namespaces_event, pid, 1, process,
					   tool, machine, mmap_data,
723
					   proc_map_timeout);
724 725
	}

726
	err = 0;
727
	closedir(proc);
728 729
out_free_namespaces:
	free(namespaces_event);
730 731
out_free_fork:
	free(fork_event);
732 733 734 735 736 737
out_free_mmap:
	free(mmap_event);
out_free_comm:
	free(comm_event);
out:
	return err;
738
}
739

740 741 742 743 744
struct process_symbol_args {
	const char *name;
	u64	   start;
};

745
static int find_symbol_cb(void *arg, const char *name, char type,
746
			  u64 start)
747 748 749
{
	struct process_symbol_args *args = arg;

750 751 752 753 754 755
	/*
	 * Must be a function or at least an alias, as in PARISC64, where "_text" is
	 * an 'A' to the same address as "_stext".
	 */
	if (!(symbol_type__is_a(type, MAP__FUNCTION) ||
	      type == 'A') || strcmp(name, args->name))
756 757 758 759 760 761
		return 0;

	args->start = start;
	return 1;
}

762 763 764 765 766 767 768 769 770 771 772
u64 kallsyms__get_function_start(const char *kallsyms_filename,
				 const char *symbol_name)
{
	struct process_symbol_args args = { .name = symbol_name, };

	if (kallsyms__parse(kallsyms_filename, &args, find_symbol_cb) <= 0)
		return 0;

	return args.start;
}

773
int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
774
				       perf_event__handler_t process,
775
				       struct machine *machine)
776 777
{
	size_t size;
778
	const char *mmap_name;
779
	char name_buff[PATH_MAX];
780
	struct map *map = machine__kernel_map(machine);
781
	struct kmap *kmap;
782
	int err;
783 784
	union perf_event *event;

785 786
	if (symbol_conf.kptr_restrict)
		return -1;
787
	if (map == NULL)
788 789
		return -1;

790 791 792 793 794
	/*
	 * We should get this from /sys/kernel/sections/.text, but till that is
	 * available use this, and after it is use this as a fallback for older
	 * kernels.
	 */
795
	event = zalloc((sizeof(event->mmap) + machine->id_hdr_size));
796 797 798 799 800
	if (event == NULL) {
		pr_debug("Not enough memory synthesizing mmap event "
			 "for kernel modules\n");
		return -1;
	}
801

802
	mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff));
803
	if (machine__is_host(machine)) {
804 805 806 807
		/*
		 * kernel uses PERF_RECORD_MISC_USER for user space maps,
		 * see kernel/perf_event.c __perf_event_mmap
		 */
808
		event->header.misc = PERF_RECORD_MISC_KERNEL;
809
	} else {
810
		event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
811
	}
812

813
	kmap = map__kmap(map);
814
	size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
815
			"%s%s", mmap_name, kmap->ref_reloc_sym->name) + 1;
816
	size = PERF_ALIGN(size, sizeof(u64));
817 818
	event->mmap.header.type = PERF_RECORD_MMAP;
	event->mmap.header.size = (sizeof(event->mmap) -
819
			(sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
820
	event->mmap.pgoff = kmap->ref_reloc_sym->addr;
821 822 823 824
	event->mmap.start = map->start;
	event->mmap.len   = map->end - event->mmap.start;
	event->mmap.pid   = machine->pid;

825
	err = perf_tool__process_synth_event(tool, event, machine, process);
826 827 828
	free(event);

	return err;
829 830
}

831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866
int perf_event__synthesize_thread_map2(struct perf_tool *tool,
				      struct thread_map *threads,
				      perf_event__handler_t process,
				      struct machine *machine)
{
	union perf_event *event;
	int i, err, size;

	size  = sizeof(event->thread_map);
	size +=	threads->nr * sizeof(event->thread_map.entries[0]);

	event = zalloc(size);
	if (!event)
		return -ENOMEM;

	event->header.type = PERF_RECORD_THREAD_MAP;
	event->header.size = size;
	event->thread_map.nr = threads->nr;

	for (i = 0; i < threads->nr; i++) {
		struct thread_map_event_entry *entry = &event->thread_map.entries[i];
		char *comm = thread_map__comm(threads, i);

		if (!comm)
			comm = (char *) "";

		entry->pid = thread_map__pid(threads, i);
		strncpy((char *) &entry->comm, comm, sizeof(entry->comm));
	}

	err = process(tool, event, NULL, machine);

	free(event);
	return err;
}

867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997
static void synthesize_cpus(struct cpu_map_entries *cpus,
			    struct cpu_map *map)
{
	int i;

	cpus->nr = map->nr;

	for (i = 0; i < map->nr; i++)
		cpus->cpu[i] = map->map[i];
}

static void synthesize_mask(struct cpu_map_mask *mask,
			    struct cpu_map *map, int max)
{
	int i;

	mask->nr = BITS_TO_LONGS(max);
	mask->long_size = sizeof(long);

	for (i = 0; i < map->nr; i++)
		set_bit(map->map[i], mask->mask);
}

static size_t cpus_size(struct cpu_map *map)
{
	return sizeof(struct cpu_map_entries) + map->nr * sizeof(u16);
}

static size_t mask_size(struct cpu_map *map, int *max)
{
	int i;

	*max = 0;

	for (i = 0; i < map->nr; i++) {
		/* bit possition of the cpu is + 1 */
		int bit = map->map[i] + 1;

		if (bit > *max)
			*max = bit;
	}

	return sizeof(struct cpu_map_mask) + BITS_TO_LONGS(*max) * sizeof(long);
}

void *cpu_map_data__alloc(struct cpu_map *map, size_t *size, u16 *type, int *max)
{
	size_t size_cpus, size_mask;
	bool is_dummy = cpu_map__empty(map);

	/*
	 * Both array and mask data have variable size based
	 * on the number of cpus and their actual values.
	 * The size of the 'struct cpu_map_data' is:
	 *
	 *   array = size of 'struct cpu_map_entries' +
	 *           number of cpus * sizeof(u64)
	 *
	 *   mask  = size of 'struct cpu_map_mask' +
	 *           maximum cpu bit converted to size of longs
	 *
	 * and finaly + the size of 'struct cpu_map_data'.
	 */
	size_cpus = cpus_size(map);
	size_mask = mask_size(map, max);

	if (is_dummy || (size_cpus < size_mask)) {
		*size += size_cpus;
		*type  = PERF_CPU_MAP__CPUS;
	} else {
		*size += size_mask;
		*type  = PERF_CPU_MAP__MASK;
	}

	*size += sizeof(struct cpu_map_data);
	return zalloc(*size);
}

void cpu_map_data__synthesize(struct cpu_map_data *data, struct cpu_map *map,
			      u16 type, int max)
{
	data->type = type;

	switch (type) {
	case PERF_CPU_MAP__CPUS:
		synthesize_cpus((struct cpu_map_entries *) data->data, map);
		break;
	case PERF_CPU_MAP__MASK:
		synthesize_mask((struct cpu_map_mask *) data->data, map, max);
	default:
		break;
	};
}

static struct cpu_map_event* cpu_map_event__new(struct cpu_map *map)
{
	size_t size = sizeof(struct cpu_map_event);
	struct cpu_map_event *event;
	int max;
	u16 type;

	event = cpu_map_data__alloc(map, &size, &type, &max);
	if (!event)
		return NULL;

	event->header.type = PERF_RECORD_CPU_MAP;
	event->header.size = size;
	event->data.type   = type;

	cpu_map_data__synthesize(&event->data, map, type, max);
	return event;
}

int perf_event__synthesize_cpu_map(struct perf_tool *tool,
				   struct cpu_map *map,
				   perf_event__handler_t process,
				   struct machine *machine)
{
	struct cpu_map_event *event;
	int err;

	event = cpu_map_event__new(map);
	if (!event)
		return -ENOMEM;

	err = process(tool, (union perf_event *) event, NULL, machine);

	free(event);
	return err;
}

998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035
int perf_event__synthesize_stat_config(struct perf_tool *tool,
				       struct perf_stat_config *config,
				       perf_event__handler_t process,
				       struct machine *machine)
{
	struct stat_config_event *event;
	int size, i = 0, err;

	size  = sizeof(*event);
	size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0]));

	event = zalloc(size);
	if (!event)
		return -ENOMEM;

	event->header.type = PERF_RECORD_STAT_CONFIG;
	event->header.size = size;
	event->nr          = PERF_STAT_CONFIG_TERM__MAX;

#define ADD(__term, __val)					\
	event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term;	\
	event->data[i].val = __val;				\
	i++;

	ADD(AGGR_MODE,	config->aggr_mode)
	ADD(INTERVAL,	config->interval)
	ADD(SCALE,	config->scale)

	WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX,
		  "stat config terms unbalanced\n");
#undef ADD

	err = process(tool, (union perf_event *) event, NULL, machine);

	free(event);
	return err;
}

1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057
int perf_event__synthesize_stat(struct perf_tool *tool,
				u32 cpu, u32 thread, u64 id,
				struct perf_counts_values *count,
				perf_event__handler_t process,
				struct machine *machine)
{
	struct stat_event event;

	event.header.type = PERF_RECORD_STAT;
	event.header.size = sizeof(event);
	event.header.misc = 0;

	event.id        = id;
	event.cpu       = cpu;
	event.thread    = thread;
	event.val       = count->val;
	event.ena       = count->ena;
	event.run       = count->run;

	return process(tool, (union perf_event *) &event, NULL, machine);
}

1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074
int perf_event__synthesize_stat_round(struct perf_tool *tool,
				      u64 evtime, u64 type,
				      perf_event__handler_t process,
				      struct machine *machine)
{
	struct stat_round_event event;

	event.header.type = PERF_RECORD_STAT_ROUND;
	event.header.size = sizeof(event);
	event.header.misc = 0;

	event.time = evtime;
	event.type = type;

	return process(tool, (union perf_event *) &event, NULL, machine);
}

1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098
void perf_event__read_stat_config(struct perf_stat_config *config,
				  struct stat_config_event *event)
{
	unsigned i;

	for (i = 0; i < event->nr; i++) {

		switch (event->data[i].tag) {
#define CASE(__term, __val)					\
		case PERF_STAT_CONFIG_TERM__##__term:		\
			config->__val = event->data[i].val;	\
			break;

		CASE(AGGR_MODE, aggr_mode)
		CASE(SCALE,     scale)
		CASE(INTERVAL,  interval)
#undef CASE
		default:
			pr_warning("unknown stat config term %" PRIu64 "\n",
				   event->data[i].tag);
		}
	}
}

1099 1100
size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp)
{
1101 1102 1103 1104 1105 1106 1107
	const char *s;

	if (event->header.misc & PERF_RECORD_MISC_COMM_EXEC)
		s = " exec";
	else
		s = "";

1108
	return fprintf(fp, "%s: %s:%d/%d\n", s, event->comm.comm, event->comm.pid, event->comm.tid);
1109 1110
}

1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137
size_t perf_event__fprintf_namespaces(union perf_event *event, FILE *fp)
{
	size_t ret = 0;
	struct perf_ns_link_info *ns_link_info;
	u32 nr_namespaces, idx;

	ns_link_info = event->namespaces.link_info;
	nr_namespaces = event->namespaces.nr_namespaces;

	ret += fprintf(fp, " %d/%d - nr_namespaces: %u\n\t\t[",
		       event->namespaces.pid,
		       event->namespaces.tid,
		       nr_namespaces);

	for (idx = 0; idx < nr_namespaces; idx++) {
		if (idx && (idx % 4 == 0))
			ret += fprintf(fp, "\n\t\t ");

		ret  += fprintf(fp, "%u/%s: %" PRIu64 "/%#" PRIx64 "%s", idx,
				perf_ns__name(idx), (u64)ns_link_info[idx].dev,
				(u64)ns_link_info[idx].ino,
				((idx + 1) != nr_namespaces) ? ", " : "]\n");
	}

	return ret;
}

1138
int perf_event__process_comm(struct perf_tool *tool __maybe_unused,
1139
			     union perf_event *event,
1140
			     struct perf_sample *sample,
1141
			     struct machine *machine)
1142
{
1143
	return machine__process_comm_event(machine, event, sample);
1144 1145
}

1146 1147 1148 1149 1150 1151 1152 1153
int perf_event__process_namespaces(struct perf_tool *tool __maybe_unused,
				   union perf_event *event,
				   struct perf_sample *sample,
				   struct machine *machine)
{
	return machine__process_namespaces_event(machine, event, sample);
}

1154
int perf_event__process_lost(struct perf_tool *tool __maybe_unused,
1155
			     union perf_event *event,
1156
			     struct perf_sample *sample,
1157
			     struct machine *machine)
1158
{
1159
	return machine__process_lost_event(machine, event, sample);
1160
}
1161

1162 1163 1164 1165 1166 1167 1168 1169
int perf_event__process_aux(struct perf_tool *tool __maybe_unused,
			    union perf_event *event,
			    struct perf_sample *sample __maybe_unused,
			    struct machine *machine)
{
	return machine__process_aux_event(machine, event);
}

1170 1171 1172 1173 1174 1175 1176 1177
int perf_event__process_itrace_start(struct perf_tool *tool __maybe_unused,
				     union perf_event *event,
				     struct perf_sample *sample __maybe_unused,
				     struct machine *machine)
{
	return machine__process_itrace_start_event(machine, event);
}

1178 1179 1180 1181 1182 1183 1184 1185
int perf_event__process_lost_samples(struct perf_tool *tool __maybe_unused,
				     union perf_event *event,
				     struct perf_sample *sample,
				     struct machine *machine)
{
	return machine__process_lost_samples_event(machine, event, sample);
}

1186 1187 1188 1189 1190 1191 1192 1193
int perf_event__process_switch(struct perf_tool *tool __maybe_unused,
			       union perf_event *event,
			       struct perf_sample *sample __maybe_unused,
			       struct machine *machine)
{
	return machine__process_switch_event(machine, event);
}

1194 1195
size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
{
1196
	return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %c %s\n",
1197
		       event->mmap.pid, event->mmap.tid, event->mmap.start,
1198 1199 1200
		       event->mmap.len, event->mmap.pgoff,
		       (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x',
		       event->mmap.filename);
1201 1202
}

1203 1204 1205
size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp)
{
	return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64
1206
			   " %02x:%02x %"PRIu64" %"PRIu64"]: %c%c%c%c %s\n",
1207 1208 1209 1210
		       event->mmap2.pid, event->mmap2.tid, event->mmap2.start,
		       event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj,
		       event->mmap2.min, event->mmap2.ino,
		       event->mmap2.ino_generation,
1211 1212 1213 1214
		       (event->mmap2.prot & PROT_READ) ? 'r' : '-',
		       (event->mmap2.prot & PROT_WRITE) ? 'w' : '-',
		       (event->mmap2.prot & PROT_EXEC) ? 'x' : '-',
		       (event->mmap2.flags & MAP_SHARED) ? 's' : 'p',
1215 1216 1217
		       event->mmap2.filename);
}

1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233
size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp)
{
	struct thread_map *threads = thread_map__new_event(&event->thread_map);
	size_t ret;

	ret = fprintf(fp, " nr: ");

	if (threads)
		ret += thread_map__fprintf(threads, fp);
	else
		ret += fprintf(fp, "failed to get threads from event\n");

	thread_map__put(threads);
	return ret;
}

1234 1235 1236 1237 1238
size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp)
{
	struct cpu_map *cpus = cpu_map__new_data(&event->cpu_map.data);
	size_t ret;

1239
	ret = fprintf(fp, ": ");
1240 1241 1242 1243 1244 1245 1246 1247 1248 1249

	if (cpus)
		ret += cpu_map__fprintf(cpus, fp);
	else
		ret += fprintf(fp, "failed to get cpumap from event\n");

	cpu_map__put(cpus);
	return ret;
}

1250
int perf_event__process_mmap(struct perf_tool *tool __maybe_unused,
1251
			     union perf_event *event,
1252
			     struct perf_sample *sample,
1253
			     struct machine *machine)
1254
{
1255
	return machine__process_mmap_event(machine, event, sample);
1256 1257
}

1258 1259
int perf_event__process_mmap2(struct perf_tool *tool __maybe_unused,
			     union perf_event *event,
1260
			     struct perf_sample *sample,
1261 1262
			     struct machine *machine)
{
1263
	return machine__process_mmap2_event(machine, event, sample);
1264 1265
}

1266 1267 1268 1269 1270 1271 1272
size_t perf_event__fprintf_task(union perf_event *event, FILE *fp)
{
	return fprintf(fp, "(%d:%d):(%d:%d)\n",
		       event->fork.pid, event->fork.tid,
		       event->fork.ppid, event->fork.ptid);
}

1273
int perf_event__process_fork(struct perf_tool *tool __maybe_unused,
1274
			     union perf_event *event,
1275
			     struct perf_sample *sample,
1276
			     struct machine *machine)
1277
{
1278
	return machine__process_fork_event(machine, event, sample);
1279
}
1280

1281 1282
int perf_event__process_exit(struct perf_tool *tool __maybe_unused,
			     union perf_event *event,
1283
			     struct perf_sample *sample,
1284 1285
			     struct machine *machine)
{
1286
	return machine__process_exit_event(machine, event, sample);
1287 1288
}

1289 1290 1291 1292 1293 1294 1295 1296 1297
size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp)
{
	return fprintf(fp, " offset: %#"PRIx64" size: %#"PRIx64" flags: %#"PRIx64" [%s%s]\n",
		       event->aux.aux_offset, event->aux.aux_size,
		       event->aux.flags,
		       event->aux.flags & PERF_AUX_FLAG_TRUNCATED ? "T" : "",
		       event->aux.flags & PERF_AUX_FLAG_OVERWRITE ? "O" : "");
}

1298 1299 1300 1301 1302 1303
size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp)
{
	return fprintf(fp, " pid: %u tid: %u\n",
		       event->itrace_start.pid, event->itrace_start.tid);
}

1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317
size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp)
{
	bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
	const char *in_out = out ? "OUT" : "IN ";

	if (event->header.type == PERF_RECORD_SWITCH)
		return fprintf(fp, " %s\n", in_out);

	return fprintf(fp, " %s  %s pid/tid: %5u/%-5u\n",
		       in_out, out ? "next" : "prev",
		       event->context_switch.next_prev_pid,
		       event->context_switch.next_prev_tid);
}

1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332
size_t perf_event__fprintf(union perf_event *event, FILE *fp)
{
	size_t ret = fprintf(fp, "PERF_RECORD_%s",
			     perf_event__name(event->header.type));

	switch (event->header.type) {
	case PERF_RECORD_COMM:
		ret += perf_event__fprintf_comm(event, fp);
		break;
	case PERF_RECORD_FORK:
	case PERF_RECORD_EXIT:
		ret += perf_event__fprintf_task(event, fp);
		break;
	case PERF_RECORD_MMAP:
		ret += perf_event__fprintf_mmap(event, fp);
1333 1334 1335
		break;
	case PERF_RECORD_NAMESPACES:
		ret += perf_event__fprintf_namespaces(event, fp);
1336
		break;
1337 1338 1339
	case PERF_RECORD_MMAP2:
		ret += perf_event__fprintf_mmap2(event, fp);
		break;
1340 1341 1342
	case PERF_RECORD_AUX:
		ret += perf_event__fprintf_aux(event, fp);
		break;
1343 1344 1345
	case PERF_RECORD_ITRACE_START:
		ret += perf_event__fprintf_itrace_start(event, fp);
		break;
1346 1347 1348 1349
	case PERF_RECORD_SWITCH:
	case PERF_RECORD_SWITCH_CPU_WIDE:
		ret += perf_event__fprintf_switch(event, fp);
		break;
1350 1351 1352 1353 1354 1355 1356
	default:
		ret += fprintf(fp, "\n");
	}

	return ret;
}

1357 1358
int perf_event__process(struct perf_tool *tool __maybe_unused,
			union perf_event *event,
1359
			struct perf_sample *sample,
1360
			struct machine *machine)
1361
{
1362
	return machine__process_event(machine, event, sample);
1363 1364
}

1365
void thread__find_addr_map(struct thread *thread, u8 cpumode,
1366
			   enum map_type type, u64 addr,
1367
			   struct addr_location *al)
1368
{
1369
	struct map_groups *mg = thread->mg;
1370
	struct machine *machine = mg->machine;
1371
	bool load_map = false;
1372

1373
	al->machine = machine;
1374
	al->thread = thread;
1375
	al->addr = addr;
1376
	al->cpumode = cpumode;
1377
	al->filtered = 0;
1378

1379 1380 1381 1382 1383
	if (machine == NULL) {
		al->map = NULL;
		return;
	}

1384
	if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
1385
		al->level = 'k';
1386
		mg = &machine->kmaps;
1387
		load_map = true;
1388
	} else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
1389
		al->level = '.';
1390 1391
	} else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
		al->level = 'g';
1392
		mg = &machine->kmaps;
1393
		load_map = true;
1394 1395
	} else if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) {
		al->level = 'u';
1396
	} else {
1397
		al->level = 'H';
1398
		al->map = NULL;
1399 1400 1401 1402

		if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
			cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
			!perf_guest)
1403
			al->filtered |= (1 << HIST_FILTER__GUEST);
1404 1405 1406
		if ((cpumode == PERF_RECORD_MISC_USER ||
			cpumode == PERF_RECORD_MISC_KERNEL) &&
			!perf_host)
1407
			al->filtered |= (1 << HIST_FILTER__HOST);
1408

1409 1410 1411
		return;
	}
try_again:
1412
	al->map = map_groups__find(mg, type, al->addr);
1413 1414 1415 1416 1417 1418 1419 1420 1421 1422
	if (al->map == NULL) {
		/*
		 * If this is outside of all known maps, and is a negative
		 * address, try to look it up in the kernel dso, as it might be
		 * a vsyscall or vdso (which executes in user-mode).
		 *
		 * XXX This is nasty, we should have a symbol list in the
		 * "[vdso]" dso, but for now lets use the old trick of looking
		 * in the whole kernel symbol list.
		 */
1423 1424 1425
		if (cpumode == PERF_RECORD_MISC_USER && machine &&
		    mg != &machine->kmaps &&
		    machine__kernel_ip(machine, al->addr)) {
1426
			mg = &machine->kmaps;
1427
			load_map = true;
1428 1429
			goto try_again;
		}
1430 1431 1432 1433 1434 1435
	} else {
		/*
		 * Kernel maps might be changed when loading symbols so loading
		 * must be done prior to using kernel maps.
		 */
		if (load_map)
1436
			map__load(al->map);
1437
		al->addr = al->map->map_ip(al->map, al->addr);
1438
	}
1439 1440
}

1441
void thread__find_addr_location(struct thread *thread,
1442
				u8 cpumode, enum map_type type, u64 addr,
1443
				struct addr_location *al)
1444
{
1445
	thread__find_addr_map(thread, cpumode, type, addr, al);
1446
	if (al->map != NULL)
1447
		al->sym = map__find_symbol(al->map, al->addr);
1448 1449
	else
		al->sym = NULL;
1450 1451
}

1452 1453 1454 1455
/*
 * Callers need to drop the reference to al->thread, obtained in
 * machine__findnew_thread()
 */
1456 1457
int machine__resolve(struct machine *machine, struct addr_location *al,
		     struct perf_sample *sample)
1458
{
1459
	struct thread *thread = machine__findnew_thread(machine, sample->pid,
1460
							sample->tid);
1461

1462 1463 1464
	if (thread == NULL)
		return -1;

1465
	dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
1466
	/*
1467
	 * Have we already created the kernel maps for this machine?
1468 1469 1470 1471 1472
	 *
	 * This should have happened earlier, when we processed the kernel MMAP
	 * events, but for older perf.data files there was no such thing, so do
	 * it now.
	 */
1473
	if (sample->cpumode == PERF_RECORD_MISC_KERNEL &&
1474
	    machine__kernel_map(machine) == NULL)
1475
		machine__create_kernel_maps(machine);
1476

1477
	thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, sample->ip, al);
1478 1479 1480
	dump_printf(" ...... dso: %s\n",
		    al->map ? al->map->dso->long_name :
			al->level == 'H' ? "[hypervisor]" : "<not found>");
1481 1482 1483 1484

	if (thread__is_filtered(thread))
		al->filtered |= (1 << HIST_FILTER__THREAD);

1485
	al->sym = NULL;
1486
	al->cpu = sample->cpu;
1487 1488 1489 1490 1491 1492 1493 1494
	al->socket = -1;

	if (al->cpu >= 0) {
		struct perf_env *env = machine->env;

		if (env && env->cpu)
			al->socket = env->cpu[al->cpu].socket_id;
	}
1495 1496

	if (al->map) {
1497 1498
		struct dso *dso = al->map->dso;

1499
		if (symbol_conf.dso_list &&
1500 1501 1502 1503
		    (!dso || !(strlist__has_entry(symbol_conf.dso_list,
						  dso->short_name) ||
			       (dso->short_name != dso->long_name &&
				strlist__has_entry(symbol_conf.dso_list,
1504 1505 1506
						   dso->long_name))))) {
			al->filtered |= (1 << HIST_FILTER__DSO);
		}
1507

1508
		al->sym = map__find_symbol(al->map, al->addr);
1509
	}
1510

1511 1512
	if (symbol_conf.sym_list &&
		(!al->sym || !strlist__has_entry(symbol_conf.sym_list,
1513 1514 1515
						al->sym->name))) {
		al->filtered |= (1 << HIST_FILTER__SYMBOL);
	}
1516 1517

	return 0;
1518
}
1519

1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530
/*
 * The preprocess_sample method will return with reference counts for the
 * in it, when done using (and perhaps getting ref counts if needing to
 * keep a pointer to one of those entries) it must be paired with
 * addr_location__put(), so that the refcounts can be decremented.
 */
void addr_location__put(struct addr_location *al)
{
	thread__zput(al->thread);
}

1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551
bool is_bts_event(struct perf_event_attr *attr)
{
	return attr->type == PERF_TYPE_HARDWARE &&
	       (attr->config & PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
	       attr->sample_period == 1;
}

bool sample_addr_correlates_sym(struct perf_event_attr *attr)
{
	if (attr->type == PERF_TYPE_SOFTWARE &&
	    (attr->config == PERF_COUNT_SW_PAGE_FAULTS ||
	     attr->config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
	     attr->config == PERF_COUNT_SW_PAGE_FAULTS_MAJ))
		return true;

	if (is_bts_event(attr))
		return true;

	return false;
}

1552 1553
void thread__resolve(struct thread *thread, struct addr_location *al,
		     struct perf_sample *sample)
1554
{
1555
	thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, sample->addr, al);
1556
	if (!al->map)
1557
		thread__find_addr_map(thread, sample->cpumode, MAP__VARIABLE,
1558 1559 1560 1561 1562 1563
				      sample->addr, al);

	al->cpu = sample->cpu;
	al->sym = NULL;

	if (al->map)
1564
		al->sym = map__find_symbol(al->map, al->addr);
1565
}