event.c 39.6 KB
Newer Older
1
#include <errno.h>
2
#include <inttypes.h>
3
#include <linux/kernel.h>
4
#include <linux/types.h>
5
#include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
6
#include <api/fs/fs.h>
7 8
#include "event.h"
#include "debug.h"
9
#include "hist.h"
10
#include "machine.h"
11
#include "sort.h"
12
#include "string2.h"
13
#include "strlist.h"
14
#include "thread.h"
15
#include "thread_map.h"
16
#include "sane_ctype.h"
17
#include "symbol/kallsyms.h"
18 19
#include "asm/bug.h"
#include "stat.h"
20

21
static const char *perf_event__names[] = {
22 23
	[0]					= "TOTAL",
	[PERF_RECORD_MMAP]			= "MMAP",
24
	[PERF_RECORD_MMAP2]			= "MMAP2",
25 26 27 28 29 30 31 32
	[PERF_RECORD_LOST]			= "LOST",
	[PERF_RECORD_COMM]			= "COMM",
	[PERF_RECORD_EXIT]			= "EXIT",
	[PERF_RECORD_THROTTLE]			= "THROTTLE",
	[PERF_RECORD_UNTHROTTLE]		= "UNTHROTTLE",
	[PERF_RECORD_FORK]			= "FORK",
	[PERF_RECORD_READ]			= "READ",
	[PERF_RECORD_SAMPLE]			= "SAMPLE",
33
	[PERF_RECORD_AUX]			= "AUX",
34
	[PERF_RECORD_ITRACE_START]		= "ITRACE_START",
35
	[PERF_RECORD_LOST_SAMPLES]		= "LOST_SAMPLES",
36 37
	[PERF_RECORD_SWITCH]			= "SWITCH",
	[PERF_RECORD_SWITCH_CPU_WIDE]		= "SWITCH_CPU_WIDE",
38
	[PERF_RECORD_NAMESPACES]		= "NAMESPACES",
39 40 41 42 43
	[PERF_RECORD_HEADER_ATTR]		= "ATTR",
	[PERF_RECORD_HEADER_EVENT_TYPE]		= "EVENT_TYPE",
	[PERF_RECORD_HEADER_TRACING_DATA]	= "TRACING_DATA",
	[PERF_RECORD_HEADER_BUILD_ID]		= "BUILD_ID",
	[PERF_RECORD_FINISHED_ROUND]		= "FINISHED_ROUND",
A
Adrian Hunter 已提交
44
	[PERF_RECORD_ID_INDEX]			= "ID_INDEX",
45 46
	[PERF_RECORD_AUXTRACE_INFO]		= "AUXTRACE_INFO",
	[PERF_RECORD_AUXTRACE]			= "AUXTRACE",
47
	[PERF_RECORD_AUXTRACE_ERROR]		= "AUXTRACE_ERROR",
48
	[PERF_RECORD_THREAD_MAP]		= "THREAD_MAP",
49
	[PERF_RECORD_CPU_MAP]			= "CPU_MAP",
50
	[PERF_RECORD_STAT_CONFIG]		= "STAT_CONFIG",
J
Jiri Olsa 已提交
51
	[PERF_RECORD_STAT]			= "STAT",
52
	[PERF_RECORD_STAT_ROUND]		= "STAT_ROUND",
53
	[PERF_RECORD_EVENT_UPDATE]		= "EVENT_UPDATE",
54
	[PERF_RECORD_TIME_CONV]			= "TIME_CONV",
55 56
};

57 58 59 60 61 62 63 64 65 66
static const char *perf_ns__names[] = {
	[NET_NS_INDEX]		= "net",
	[UTS_NS_INDEX]		= "uts",
	[IPC_NS_INDEX]		= "ipc",
	[PID_NS_INDEX]		= "pid",
	[USER_NS_INDEX]		= "user",
	[MNT_NS_INDEX]		= "mnt",
	[CGROUP_NS_INDEX]	= "cgroup",
};

67
const char *perf_event__name(unsigned int id)
68
{
69
	if (id >= ARRAY_SIZE(perf_event__names))
70
		return "INVALID";
71
	if (!perf_event__names[id])
72
		return "UNKNOWN";
73
	return perf_event__names[id];
74 75
}

76 77 78 79 80 81 82
static const char *perf_ns__name(unsigned int id)
{
	if (id >= ARRAY_SIZE(perf_ns__names))
		return "UNKNOWN";
	return perf_ns__names[id];
}

83 84 85 86 87 88
static int perf_tool__process_synth_event(struct perf_tool *tool,
					  union perf_event *event,
					  struct machine *machine,
					  perf_event__handler_t process)
{
	struct perf_sample synth_sample = {
89 90 91 92 93 94
	.pid	   = -1,
	.tid	   = -1,
	.time	   = -1,
	.stream_id = -1,
	.cpu	   = -1,
	.period	   = 1,
95 96 97 98
	.cpumode   = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK,
	};

	return process(tool, event, &synth_sample, machine);
99 100
};

101 102
/*
 * Assumes that the first 4095 bytes of /proc/pid/stat contains
103
 * the comm, tgid and ppid.
104
 */
105 106
static int perf_event__get_comm_ids(pid_t pid, char *comm, size_t len,
				    pid_t *tgid, pid_t *ppid)
107 108
{
	char filename[PATH_MAX];
109 110
	char bf[4096];
	int fd;
111 112
	size_t size = 0;
	ssize_t n;
113
	char *name, *tgids, *ppids;
114 115 116

	*tgid = -1;
	*ppid = -1;
117 118 119

	snprintf(filename, sizeof(filename), "/proc/%d/status", pid);

120 121
	fd = open(filename, O_RDONLY);
	if (fd < 0) {
122
		pr_debug("couldn't open %s\n", filename);
123
		return -1;
124 125
	}

126 127 128
	n = read(fd, bf, sizeof(bf) - 1);
	close(fd);
	if (n <= 0) {
129
		pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n",
130 131
			   pid);
		return -1;
132
	}
133
	bf[n] = '\0';
134

135 136
	name = strstr(bf, "Name:");
	tgids = strstr(bf, "Tgid:");
137
	ppids = strstr(bf, "PPid:");
138 139 140

	if (name) {
		name += 5;  /* strlen("Name:") */
141
		name = rtrim(ltrim(name));
142 143 144 145 146 147 148 149 150 151 152
		size = strlen(name);
		if (size >= len)
			size = len - 1;
		memcpy(comm, name, size);
		comm[size] = '\0';
	} else {
		pr_debug("Name: string not found for pid %d\n", pid);
	}

	if (tgids) {
		tgids += 5;  /* strlen("Tgid:") */
153
		*tgid = atoi(tgids);
154 155 156
	} else {
		pr_debug("Tgid: string not found for pid %d\n", pid);
	}
157

158 159 160 161 162 163 164 165
	if (ppids) {
		ppids += 5;  /* strlen("PPid:") */
		*ppid = atoi(ppids);
	} else {
		pr_debug("PPid: string not found for pid %d\n", pid);
	}

	return 0;
166 167
}

168 169 170
static int perf_event__prepare_comm(union perf_event *event, pid_t pid,
				    struct machine *machine,
				    pid_t *tgid, pid_t *ppid)
171 172
{
	size_t size;
173 174

	*ppid = -1;
175 176 177

	memset(&event->comm, 0, sizeof(event->comm));

178 179 180 181 182 183 184 185 186
	if (machine__is_host(machine)) {
		if (perf_event__get_comm_ids(pid, event->comm.comm,
					     sizeof(event->comm.comm),
					     tgid, ppid) != 0) {
			return -1;
		}
	} else {
		*tgid = machine->pid;
	}
187

188 189
	if (*tgid < 0)
		return -1;
190

191
	event->comm.pid = *tgid;
192
	event->comm.header.type = PERF_RECORD_COMM;
193 194

	size = strlen(event->comm.comm) + 1;
195
	size = PERF_ALIGN(size, sizeof(u64));
196
	memset(event->comm.comm + size, 0, machine->id_hdr_size);
197 198
	event->comm.header.size = (sizeof(event->comm) -
				(sizeof(event->comm.comm) - size) +
199
				machine->id_hdr_size);
200
	event->comm.tid = pid;
201 202

	return 0;
203 204
}

205
pid_t perf_event__synthesize_comm(struct perf_tool *tool,
206 207 208 209
					 union perf_event *event, pid_t pid,
					 perf_event__handler_t process,
					 struct machine *machine)
{
210
	pid_t tgid, ppid;
211

212 213
	if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0)
		return -1;
214

215
	if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
216
		return -1;
217

218
	return tgid;
219 220
}

221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272
static void perf_event__get_ns_link_info(pid_t pid, const char *ns,
					 struct perf_ns_link_info *ns_link_info)
{
	struct stat64 st;
	char proc_ns[128];

	sprintf(proc_ns, "/proc/%u/ns/%s", pid, ns);
	if (stat64(proc_ns, &st) == 0) {
		ns_link_info->dev = st.st_dev;
		ns_link_info->ino = st.st_ino;
	}
}

int perf_event__synthesize_namespaces(struct perf_tool *tool,
				      union perf_event *event,
				      pid_t pid, pid_t tgid,
				      perf_event__handler_t process,
				      struct machine *machine)
{
	u32 idx;
	struct perf_ns_link_info *ns_link_info;

	if (!tool || !tool->namespace_events)
		return 0;

	memset(&event->namespaces, 0, (sizeof(event->namespaces) +
	       (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
	       machine->id_hdr_size));

	event->namespaces.pid = tgid;
	event->namespaces.tid = pid;

	event->namespaces.nr_namespaces = NR_NAMESPACES;

	ns_link_info = event->namespaces.link_info;

	for (idx = 0; idx < event->namespaces.nr_namespaces; idx++)
		perf_event__get_ns_link_info(pid, perf_ns__name(idx),
					     &ns_link_info[idx]);

	event->namespaces.header.type = PERF_RECORD_NAMESPACES;

	event->namespaces.header.size = (sizeof(event->namespaces) +
			(NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
			machine->id_hdr_size);

	if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
		return -1;

	return 0;
}

273
static int perf_event__synthesize_fork(struct perf_tool *tool,
274 275 276
				       union perf_event *event,
				       pid_t pid, pid_t tgid, pid_t ppid,
				       perf_event__handler_t process,
277 278 279 280
				       struct machine *machine)
{
	memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);

281 282 283 284 285 286 287 288 289 290 291 292
	/*
	 * for main thread set parent to ppid from status file. For other
	 * threads set parent pid to main thread. ie., assume main thread
	 * spawns all threads in a process
	*/
	if (tgid == pid) {
		event->fork.ppid = ppid;
		event->fork.ptid = ppid;
	} else {
		event->fork.ppid = tgid;
		event->fork.ptid = tgid;
	}
293 294 295 296 297 298
	event->fork.pid  = tgid;
	event->fork.tid  = pid;
	event->fork.header.type = PERF_RECORD_FORK;

	event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);

299
	if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
300 301 302 303 304
		return -1;

	return 0;
}

305 306 307 308 309
int perf_event__synthesize_mmap_events(struct perf_tool *tool,
				       union perf_event *event,
				       pid_t pid, pid_t tgid,
				       perf_event__handler_t process,
				       struct machine *machine,
310 311
				       bool mmap_data,
				       unsigned int proc_map_timeout)
312 313 314
{
	char filename[PATH_MAX];
	FILE *fp;
315 316
	unsigned long long t;
	bool truncation = false;
317
	unsigned long long timeout = proc_map_timeout * 1000000ULL;
318
	int rc = 0;
319 320
	const char *hugetlbfs_mnt = hugetlbfs__mountpoint();
	int hugetlbfs_mnt_len = hugetlbfs_mnt ? strlen(hugetlbfs_mnt) : 0;
321

322 323 324
	if (machine__is_default_guest(machine))
		return 0;

325 326
	snprintf(filename, sizeof(filename), "%s/proc/%d/task/%d/maps",
		 machine->root_dir, pid, pid);
327 328 329 330 331 332 333 334 335 336

	fp = fopen(filename, "r");
	if (fp == NULL) {
		/*
		 * We raced with a task exiting - just return:
		 */
		pr_debug("couldn't open %s\n", filename);
		return -1;
	}

337
	event->header.type = PERF_RECORD_MMAP2;
338
	t = rdclock();
339

340
	while (1) {
341 342 343 344
		char bf[BUFSIZ];
		char prot[5];
		char execname[PATH_MAX];
		char anonstr[] = "//anon";
345
		unsigned int ino;
346
		size_t size;
347
		ssize_t n;
348

349 350 351
		if (fgets(bf, sizeof(bf), fp) == NULL)
			break;

352 353 354 355 356
		if ((rdclock() - t) > timeout) {
			pr_warning("Reading %s time out. "
				   "You may want to increase "
				   "the time limit by --proc-map-timeout\n",
				   filename);
357 358 359 360
			truncation = true;
			goto out;
		}

361 362 363
		/* ensure null termination since stack will be reused. */
		strcpy(execname, "");

364
		/* 00400000-0040c000 r-xp 00000000 fd:01 41038  /bin/cat */
365
		n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %[^\n]\n",
366 367 368 369 370
		       &event->mmap2.start, &event->mmap2.len, prot,
		       &event->mmap2.pgoff, &event->mmap2.maj,
		       &event->mmap2.min,
		       &ino, execname);

371 372 373
		/*
 		 * Anon maps don't have the execname.
 		 */
374
		if (n < 7)
375
			continue;
376 377 378

		event->mmap2.ino = (u64)ino;

379 380 381
		/*
		 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
		 */
382 383 384 385
		if (machine__is_host(machine))
			event->header.misc = PERF_RECORD_MISC_USER;
		else
			event->header.misc = PERF_RECORD_MISC_GUEST_USER;
386

387 388 389 390 391 392 393 394 395 396 397 398 399 400 401
		/* map protection and flags bits */
		event->mmap2.prot = 0;
		event->mmap2.flags = 0;
		if (prot[0] == 'r')
			event->mmap2.prot |= PROT_READ;
		if (prot[1] == 'w')
			event->mmap2.prot |= PROT_WRITE;
		if (prot[2] == 'x')
			event->mmap2.prot |= PROT_EXEC;

		if (prot[3] == 's')
			event->mmap2.flags |= MAP_SHARED;
		else
			event->mmap2.flags |= MAP_PRIVATE;

402 403 404 405 406 407
		if (prot[2] != 'x') {
			if (!mmap_data || prot[0] != 'r')
				continue;

			event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
		}
408

409 410 411 412
out:
		if (truncation)
			event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT;

413 414
		if (!strcmp(execname, ""))
			strcpy(execname, anonstr);
415

416 417
		if (hugetlbfs_mnt_len &&
		    !strncmp(execname, hugetlbfs_mnt, hugetlbfs_mnt_len)) {
418 419 420
			strcpy(execname, anonstr);
			event->mmap2.flags |= MAP_HUGETLB;
		}
421 422

		size = strlen(execname) + 1;
423
		memcpy(event->mmap2.filename, execname, size);
424
		size = PERF_ALIGN(size, sizeof(u64));
425 426 427 428 429 430 431
		event->mmap2.len -= event->mmap.start;
		event->mmap2.header.size = (sizeof(event->mmap2) -
					(sizeof(event->mmap2.filename) - size));
		memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
		event->mmap2.header.size += machine->id_hdr_size;
		event->mmap2.pid = tgid;
		event->mmap2.tid = pid;
432

433
		if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
434 435
			rc = -1;
			break;
436
		}
437 438 439

		if (truncation)
			break;
440 441 442
	}

	fclose(fp);
443
	return rc;
444 445
}

446
int perf_event__synthesize_modules(struct perf_tool *tool,
447
				   perf_event__handler_t process,
448
				   struct machine *machine)
449
{
450
	int rc = 0;
451
	struct map *pos;
452
	struct map_groups *kmaps = &machine->kmaps;
453
	struct maps *maps = &kmaps->maps[MAP__FUNCTION];
454
	union perf_event *event = zalloc((sizeof(event->mmap) +
455
					  machine->id_hdr_size));
456 457 458 459 460 461 462
	if (event == NULL) {
		pr_debug("Not enough memory synthesizing mmap event "
			 "for kernel modules\n");
		return -1;
	}

	event->header.type = PERF_RECORD_MMAP;
463

464 465 466 467
	/*
	 * kernel uses 0 for user space maps, see kernel/perf_event.c
	 * __perf_event_mmap
	 */
468
	if (machine__is_host(machine))
469
		event->header.misc = PERF_RECORD_MISC_KERNEL;
470
	else
471
		event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
472

473
	for (pos = maps__first(maps); pos; pos = map__next(pos)) {
474 475
		size_t size;

476
		if (__map__is_kernel(pos))
477 478
			continue;

479
		size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
480 481 482
		event->mmap.header.type = PERF_RECORD_MMAP;
		event->mmap.header.size = (sizeof(event->mmap) -
				        (sizeof(event->mmap.filename) - size));
483 484
		memset(event->mmap.filename + size, 0, machine->id_hdr_size);
		event->mmap.header.size += machine->id_hdr_size;
485 486 487 488 489
		event->mmap.start = pos->start;
		event->mmap.len   = pos->end - pos->start;
		event->mmap.pid   = machine->pid;

		memcpy(event->mmap.filename, pos->dso->long_name,
490
		       pos->dso->long_name_len + 1);
491
		if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
492 493 494
			rc = -1;
			break;
		}
495 496
	}

497
	free(event);
498
	return rc;
499 500
}

501 502
static int __event__synthesize_thread(union perf_event *comm_event,
				      union perf_event *mmap_event,
503
				      union perf_event *fork_event,
504
				      union perf_event *namespaces_event,
505
				      pid_t pid, int full,
506
				      perf_event__handler_t process,
507
				      struct perf_tool *tool,
508 509 510
				      struct machine *machine,
				      bool mmap_data,
				      unsigned int proc_map_timeout)
511
{
512 513
	char filename[PATH_MAX];
	DIR *tasks;
514
	struct dirent *dirent;
515
	pid_t tgid, ppid;
516
	int rc = 0;
517 518 519 520 521 522 523 524 525

	/* special case: only send one comm event using passed in pid */
	if (!full) {
		tgid = perf_event__synthesize_comm(tool, comm_event, pid,
						   process, machine);

		if (tgid == -1)
			return -1;

526 527 528 529 530
		if (perf_event__synthesize_namespaces(tool, namespaces_event, pid,
						      tgid, process, machine) < 0)
			return -1;


531
		return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
532 533
							  process, machine, mmap_data,
							  proc_map_timeout);
534 535 536 537 538 539 540 541 542 543 544 545 546 547
	}

	if (machine__is_default_guest(machine))
		return 0;

	snprintf(filename, sizeof(filename), "%s/proc/%d/task",
		 machine->root_dir, pid);

	tasks = opendir(filename);
	if (tasks == NULL) {
		pr_debug("couldn't open %s\n", filename);
		return 0;
	}

548
	while ((dirent = readdir(tasks)) != NULL) {
549 550 551
		char *end;
		pid_t _pid;

552
		_pid = strtol(dirent->d_name, &end, 10);
553 554 555
		if (*end)
			continue;

556
		rc = -1;
557 558
		if (perf_event__prepare_comm(comm_event, _pid, machine,
					     &tgid, &ppid) != 0)
559
			break;
560

561
		if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
562
						ppid, process, machine) < 0)
563
			break;
564 565 566 567 568

		if (perf_event__synthesize_namespaces(tool, namespaces_event, _pid,
						      tgid, process, machine) < 0)
			break;

569 570 571
		/*
		 * Send the prepared comm event
		 */
572
		if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0)
573
			break;
574

575
		rc = 0;
576 577 578
		if (_pid == pid) {
			/* process the parent's maps too */
			rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
579
						process, machine, mmap_data, proc_map_timeout);
580 581
			if (rc)
				break;
582
		}
583 584 585
	}

	closedir(tasks);
586
	return rc;
587 588
}

589
int perf_event__synthesize_thread_map(struct perf_tool *tool,
590
				      struct thread_map *threads,
591
				      perf_event__handler_t process,
592
				      struct machine *machine,
593 594
				      bool mmap_data,
				      unsigned int proc_map_timeout)
595
{
596
	union perf_event *comm_event, *mmap_event, *fork_event;
597
	union perf_event *namespaces_event;
598
	int err = -1, thread, j;
599

600
	comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
601 602 603
	if (comm_event == NULL)
		goto out;

604
	mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
605 606 607
	if (mmap_event == NULL)
		goto out_free_comm;

608 609 610 611
	fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
	if (fork_event == NULL)
		goto out_free_mmap;

612 613 614 615 616 617
	namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
				  (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
				  machine->id_hdr_size);
	if (namespaces_event == NULL)
		goto out_free_fork;

618 619 620
	err = 0;
	for (thread = 0; thread < threads->nr; ++thread) {
		if (__event__synthesize_thread(comm_event, mmap_event,
621
					       fork_event, namespaces_event,
622
					       thread_map__pid(threads, thread), 0,
623
					       process, tool, machine,
624
					       mmap_data, proc_map_timeout)) {
625 626 627
			err = -1;
			break;
		}
628 629 630 631 632

		/*
		 * comm.pid is set to thread group id by
		 * perf_event__synthesize_comm
		 */
633
		if ((int) comm_event->comm.pid != thread_map__pid(threads, thread)) {
634 635 636 637
			bool need_leader = true;

			/* is thread group leader in thread_map? */
			for (j = 0; j < threads->nr; ++j) {
638
				if ((int) comm_event->comm.pid == thread_map__pid(threads, j)) {
639 640 641 642 643 644 645
					need_leader = false;
					break;
				}
			}

			/* if not, generate events for it */
			if (need_leader &&
646
			    __event__synthesize_thread(comm_event, mmap_event,
647
						       fork_event, namespaces_event,
648 649
						       comm_event->comm.pid, 0,
						       process, tool, machine,
650
						       mmap_data, proc_map_timeout)) {
651 652 653 654
				err = -1;
				break;
			}
		}
655
	}
656 657
	free(namespaces_event);
out_free_fork:
658 659
	free(fork_event);
out_free_mmap:
660 661 662 663 664 665 666
	free(mmap_event);
out_free_comm:
	free(comm_event);
out:
	return err;
}

667
int perf_event__synthesize_threads(struct perf_tool *tool,
668
				   perf_event__handler_t process,
669 670 671
				   struct machine *machine,
				   bool mmap_data,
				   unsigned int proc_map_timeout)
672 673
{
	DIR *proc;
674
	char proc_path[PATH_MAX];
675
	struct dirent *dirent;
676
	union perf_event *comm_event, *mmap_event, *fork_event;
677
	union perf_event *namespaces_event;
678 679
	int err = -1;

680 681 682
	if (machine__is_default_guest(machine))
		return 0;

683
	comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
684 685 686
	if (comm_event == NULL)
		goto out;

687
	mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
688 689
	if (mmap_event == NULL)
		goto out_free_comm;
690

691 692 693 694
	fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
	if (fork_event == NULL)
		goto out_free_mmap;

695 696 697 698 699 700
	namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
				  (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
				  machine->id_hdr_size);
	if (namespaces_event == NULL)
		goto out_free_fork;

701 702 703
	snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
	proc = opendir(proc_path);

704
	if (proc == NULL)
705
		goto out_free_namespaces;
706

707
	while ((dirent = readdir(proc)) != NULL) {
708
		char *end;
709
		pid_t pid = strtol(dirent->d_name, &end, 10);
710 711 712

		if (*end) /* only interested in proper numerical dirents */
			continue;
713 714 715 716
		/*
 		 * We may race with exiting thread, so don't stop just because
 		 * one thread couldn't be synthesized.
 		 */
717 718 719
		__event__synthesize_thread(comm_event, mmap_event, fork_event,
					   namespaces_event, pid, 1, process,
					   tool, machine, mmap_data,
720
					   proc_map_timeout);
721 722
	}

723
	err = 0;
724
	closedir(proc);
725 726
out_free_namespaces:
	free(namespaces_event);
727 728
out_free_fork:
	free(fork_event);
729 730 731 732 733 734
out_free_mmap:
	free(mmap_event);
out_free_comm:
	free(comm_event);
out:
	return err;
735
}
736

737 738 739 740 741
struct process_symbol_args {
	const char *name;
	u64	   start;
};

742
static int find_symbol_cb(void *arg, const char *name, char type,
743
			  u64 start)
744 745 746
{
	struct process_symbol_args *args = arg;

747 748 749 750 751 752
	/*
	 * Must be a function or at least an alias, as in PARISC64, where "_text" is
	 * an 'A' to the same address as "_stext".
	 */
	if (!(symbol_type__is_a(type, MAP__FUNCTION) ||
	      type == 'A') || strcmp(name, args->name))
753 754 755 756 757 758
		return 0;

	args->start = start;
	return 1;
}

759 760 761 762 763 764 765 766 767 768 769
u64 kallsyms__get_function_start(const char *kallsyms_filename,
				 const char *symbol_name)
{
	struct process_symbol_args args = { .name = symbol_name, };

	if (kallsyms__parse(kallsyms_filename, &args, find_symbol_cb) <= 0)
		return 0;

	return args.start;
}

770
int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
771
				       perf_event__handler_t process,
772
				       struct machine *machine)
773 774
{
	size_t size;
775
	const char *mmap_name;
776
	char name_buff[PATH_MAX];
777
	struct map *map = machine__kernel_map(machine);
778
	struct kmap *kmap;
779
	int err;
780 781
	union perf_event *event;

782 783
	if (symbol_conf.kptr_restrict)
		return -1;
784
	if (map == NULL)
785 786
		return -1;

787 788 789 790 791
	/*
	 * We should get this from /sys/kernel/sections/.text, but till that is
	 * available use this, and after it is use this as a fallback for older
	 * kernels.
	 */
792
	event = zalloc((sizeof(event->mmap) + machine->id_hdr_size));
793 794 795 796 797
	if (event == NULL) {
		pr_debug("Not enough memory synthesizing mmap event "
			 "for kernel modules\n");
		return -1;
	}
798

799
	mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff));
800
	if (machine__is_host(machine)) {
801 802 803 804
		/*
		 * kernel uses PERF_RECORD_MISC_USER for user space maps,
		 * see kernel/perf_event.c __perf_event_mmap
		 */
805
		event->header.misc = PERF_RECORD_MISC_KERNEL;
806
	} else {
807
		event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
808
	}
809

810
	kmap = map__kmap(map);
811
	size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
812
			"%s%s", mmap_name, kmap->ref_reloc_sym->name) + 1;
813
	size = PERF_ALIGN(size, sizeof(u64));
814 815
	event->mmap.header.type = PERF_RECORD_MMAP;
	event->mmap.header.size = (sizeof(event->mmap) -
816
			(sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
817
	event->mmap.pgoff = kmap->ref_reloc_sym->addr;
818 819 820 821
	event->mmap.start = map->start;
	event->mmap.len   = map->end - event->mmap.start;
	event->mmap.pid   = machine->pid;

822
	err = perf_tool__process_synth_event(tool, event, machine, process);
823 824 825
	free(event);

	return err;
826 827
}

828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863
int perf_event__synthesize_thread_map2(struct perf_tool *tool,
				      struct thread_map *threads,
				      perf_event__handler_t process,
				      struct machine *machine)
{
	union perf_event *event;
	int i, err, size;

	size  = sizeof(event->thread_map);
	size +=	threads->nr * sizeof(event->thread_map.entries[0]);

	event = zalloc(size);
	if (!event)
		return -ENOMEM;

	event->header.type = PERF_RECORD_THREAD_MAP;
	event->header.size = size;
	event->thread_map.nr = threads->nr;

	for (i = 0; i < threads->nr; i++) {
		struct thread_map_event_entry *entry = &event->thread_map.entries[i];
		char *comm = thread_map__comm(threads, i);

		if (!comm)
			comm = (char *) "";

		entry->pid = thread_map__pid(threads, i);
		strncpy((char *) &entry->comm, comm, sizeof(entry->comm));
	}

	err = process(tool, event, NULL, machine);

	free(event);
	return err;
}

864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994
static void synthesize_cpus(struct cpu_map_entries *cpus,
			    struct cpu_map *map)
{
	int i;

	cpus->nr = map->nr;

	for (i = 0; i < map->nr; i++)
		cpus->cpu[i] = map->map[i];
}

static void synthesize_mask(struct cpu_map_mask *mask,
			    struct cpu_map *map, int max)
{
	int i;

	mask->nr = BITS_TO_LONGS(max);
	mask->long_size = sizeof(long);

	for (i = 0; i < map->nr; i++)
		set_bit(map->map[i], mask->mask);
}

static size_t cpus_size(struct cpu_map *map)
{
	return sizeof(struct cpu_map_entries) + map->nr * sizeof(u16);
}

static size_t mask_size(struct cpu_map *map, int *max)
{
	int i;

	*max = 0;

	for (i = 0; i < map->nr; i++) {
		/* bit possition of the cpu is + 1 */
		int bit = map->map[i] + 1;

		if (bit > *max)
			*max = bit;
	}

	return sizeof(struct cpu_map_mask) + BITS_TO_LONGS(*max) * sizeof(long);
}

void *cpu_map_data__alloc(struct cpu_map *map, size_t *size, u16 *type, int *max)
{
	size_t size_cpus, size_mask;
	bool is_dummy = cpu_map__empty(map);

	/*
	 * Both array and mask data have variable size based
	 * on the number of cpus and their actual values.
	 * The size of the 'struct cpu_map_data' is:
	 *
	 *   array = size of 'struct cpu_map_entries' +
	 *           number of cpus * sizeof(u64)
	 *
	 *   mask  = size of 'struct cpu_map_mask' +
	 *           maximum cpu bit converted to size of longs
	 *
	 * and finaly + the size of 'struct cpu_map_data'.
	 */
	size_cpus = cpus_size(map);
	size_mask = mask_size(map, max);

	if (is_dummy || (size_cpus < size_mask)) {
		*size += size_cpus;
		*type  = PERF_CPU_MAP__CPUS;
	} else {
		*size += size_mask;
		*type  = PERF_CPU_MAP__MASK;
	}

	*size += sizeof(struct cpu_map_data);
	return zalloc(*size);
}

void cpu_map_data__synthesize(struct cpu_map_data *data, struct cpu_map *map,
			      u16 type, int max)
{
	data->type = type;

	switch (type) {
	case PERF_CPU_MAP__CPUS:
		synthesize_cpus((struct cpu_map_entries *) data->data, map);
		break;
	case PERF_CPU_MAP__MASK:
		synthesize_mask((struct cpu_map_mask *) data->data, map, max);
	default:
		break;
	};
}

static struct cpu_map_event* cpu_map_event__new(struct cpu_map *map)
{
	size_t size = sizeof(struct cpu_map_event);
	struct cpu_map_event *event;
	int max;
	u16 type;

	event = cpu_map_data__alloc(map, &size, &type, &max);
	if (!event)
		return NULL;

	event->header.type = PERF_RECORD_CPU_MAP;
	event->header.size = size;
	event->data.type   = type;

	cpu_map_data__synthesize(&event->data, map, type, max);
	return event;
}

int perf_event__synthesize_cpu_map(struct perf_tool *tool,
				   struct cpu_map *map,
				   perf_event__handler_t process,
				   struct machine *machine)
{
	struct cpu_map_event *event;
	int err;

	event = cpu_map_event__new(map);
	if (!event)
		return -ENOMEM;

	err = process(tool, (union perf_event *) event, NULL, machine);

	free(event);
	return err;
}

995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032
int perf_event__synthesize_stat_config(struct perf_tool *tool,
				       struct perf_stat_config *config,
				       perf_event__handler_t process,
				       struct machine *machine)
{
	struct stat_config_event *event;
	int size, i = 0, err;

	size  = sizeof(*event);
	size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0]));

	event = zalloc(size);
	if (!event)
		return -ENOMEM;

	event->header.type = PERF_RECORD_STAT_CONFIG;
	event->header.size = size;
	event->nr          = PERF_STAT_CONFIG_TERM__MAX;

#define ADD(__term, __val)					\
	event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term;	\
	event->data[i].val = __val;				\
	i++;

	ADD(AGGR_MODE,	config->aggr_mode)
	ADD(INTERVAL,	config->interval)
	ADD(SCALE,	config->scale)

	WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX,
		  "stat config terms unbalanced\n");
#undef ADD

	err = process(tool, (union perf_event *) event, NULL, machine);

	free(event);
	return err;
}

1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054
int perf_event__synthesize_stat(struct perf_tool *tool,
				u32 cpu, u32 thread, u64 id,
				struct perf_counts_values *count,
				perf_event__handler_t process,
				struct machine *machine)
{
	struct stat_event event;

	event.header.type = PERF_RECORD_STAT;
	event.header.size = sizeof(event);
	event.header.misc = 0;

	event.id        = id;
	event.cpu       = cpu;
	event.thread    = thread;
	event.val       = count->val;
	event.ena       = count->ena;
	event.run       = count->run;

	return process(tool, (union perf_event *) &event, NULL, machine);
}

1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071
int perf_event__synthesize_stat_round(struct perf_tool *tool,
				      u64 evtime, u64 type,
				      perf_event__handler_t process,
				      struct machine *machine)
{
	struct stat_round_event event;

	event.header.type = PERF_RECORD_STAT_ROUND;
	event.header.size = sizeof(event);
	event.header.misc = 0;

	event.time = evtime;
	event.type = type;

	return process(tool, (union perf_event *) &event, NULL, machine);
}

1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095
void perf_event__read_stat_config(struct perf_stat_config *config,
				  struct stat_config_event *event)
{
	unsigned i;

	for (i = 0; i < event->nr; i++) {

		switch (event->data[i].tag) {
#define CASE(__term, __val)					\
		case PERF_STAT_CONFIG_TERM__##__term:		\
			config->__val = event->data[i].val;	\
			break;

		CASE(AGGR_MODE, aggr_mode)
		CASE(SCALE,     scale)
		CASE(INTERVAL,  interval)
#undef CASE
		default:
			pr_warning("unknown stat config term %" PRIu64 "\n",
				   event->data[i].tag);
		}
	}
}

1096 1097
size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp)
{
1098 1099 1100 1101 1102 1103 1104
	const char *s;

	if (event->header.misc & PERF_RECORD_MISC_COMM_EXEC)
		s = " exec";
	else
		s = "";

1105
	return fprintf(fp, "%s: %s:%d/%d\n", s, event->comm.comm, event->comm.pid, event->comm.tid);
1106 1107
}

1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134
size_t perf_event__fprintf_namespaces(union perf_event *event, FILE *fp)
{
	size_t ret = 0;
	struct perf_ns_link_info *ns_link_info;
	u32 nr_namespaces, idx;

	ns_link_info = event->namespaces.link_info;
	nr_namespaces = event->namespaces.nr_namespaces;

	ret += fprintf(fp, " %d/%d - nr_namespaces: %u\n\t\t[",
		       event->namespaces.pid,
		       event->namespaces.tid,
		       nr_namespaces);

	for (idx = 0; idx < nr_namespaces; idx++) {
		if (idx && (idx % 4 == 0))
			ret += fprintf(fp, "\n\t\t ");

		ret  += fprintf(fp, "%u/%s: %" PRIu64 "/%#" PRIx64 "%s", idx,
				perf_ns__name(idx), (u64)ns_link_info[idx].dev,
				(u64)ns_link_info[idx].ino,
				((idx + 1) != nr_namespaces) ? ", " : "]\n");
	}

	return ret;
}

1135
int perf_event__process_comm(struct perf_tool *tool __maybe_unused,
1136
			     union perf_event *event,
1137
			     struct perf_sample *sample,
1138
			     struct machine *machine)
1139
{
1140
	return machine__process_comm_event(machine, event, sample);
1141 1142
}

1143 1144 1145 1146 1147 1148 1149 1150
int perf_event__process_namespaces(struct perf_tool *tool __maybe_unused,
				   union perf_event *event,
				   struct perf_sample *sample,
				   struct machine *machine)
{
	return machine__process_namespaces_event(machine, event, sample);
}

1151
int perf_event__process_lost(struct perf_tool *tool __maybe_unused,
1152
			     union perf_event *event,
1153
			     struct perf_sample *sample,
1154
			     struct machine *machine)
1155
{
1156
	return machine__process_lost_event(machine, event, sample);
1157
}
1158

1159 1160 1161 1162 1163 1164 1165 1166
int perf_event__process_aux(struct perf_tool *tool __maybe_unused,
			    union perf_event *event,
			    struct perf_sample *sample __maybe_unused,
			    struct machine *machine)
{
	return machine__process_aux_event(machine, event);
}

1167 1168 1169 1170 1171 1172 1173 1174
int perf_event__process_itrace_start(struct perf_tool *tool __maybe_unused,
				     union perf_event *event,
				     struct perf_sample *sample __maybe_unused,
				     struct machine *machine)
{
	return machine__process_itrace_start_event(machine, event);
}

1175 1176 1177 1178 1179 1180 1181 1182
int perf_event__process_lost_samples(struct perf_tool *tool __maybe_unused,
				     union perf_event *event,
				     struct perf_sample *sample,
				     struct machine *machine)
{
	return machine__process_lost_samples_event(machine, event, sample);
}

1183 1184 1185 1186 1187 1188 1189 1190
int perf_event__process_switch(struct perf_tool *tool __maybe_unused,
			       union perf_event *event,
			       struct perf_sample *sample __maybe_unused,
			       struct machine *machine)
{
	return machine__process_switch_event(machine, event);
}

1191 1192
size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
{
1193
	return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %c %s\n",
1194
		       event->mmap.pid, event->mmap.tid, event->mmap.start,
1195 1196 1197
		       event->mmap.len, event->mmap.pgoff,
		       (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x',
		       event->mmap.filename);
1198 1199
}

1200 1201 1202
size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp)
{
	return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64
1203
			   " %02x:%02x %"PRIu64" %"PRIu64"]: %c%c%c%c %s\n",
1204 1205 1206 1207
		       event->mmap2.pid, event->mmap2.tid, event->mmap2.start,
		       event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj,
		       event->mmap2.min, event->mmap2.ino,
		       event->mmap2.ino_generation,
1208 1209 1210 1211
		       (event->mmap2.prot & PROT_READ) ? 'r' : '-',
		       (event->mmap2.prot & PROT_WRITE) ? 'w' : '-',
		       (event->mmap2.prot & PROT_EXEC) ? 'x' : '-',
		       (event->mmap2.flags & MAP_SHARED) ? 's' : 'p',
1212 1213 1214
		       event->mmap2.filename);
}

1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230
size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp)
{
	struct thread_map *threads = thread_map__new_event(&event->thread_map);
	size_t ret;

	ret = fprintf(fp, " nr: ");

	if (threads)
		ret += thread_map__fprintf(threads, fp);
	else
		ret += fprintf(fp, "failed to get threads from event\n");

	thread_map__put(threads);
	return ret;
}

1231 1232 1233 1234 1235
size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp)
{
	struct cpu_map *cpus = cpu_map__new_data(&event->cpu_map.data);
	size_t ret;

1236
	ret = fprintf(fp, ": ");
1237 1238 1239 1240 1241 1242 1243 1244 1245 1246

	if (cpus)
		ret += cpu_map__fprintf(cpus, fp);
	else
		ret += fprintf(fp, "failed to get cpumap from event\n");

	cpu_map__put(cpus);
	return ret;
}

1247
int perf_event__process_mmap(struct perf_tool *tool __maybe_unused,
1248
			     union perf_event *event,
1249
			     struct perf_sample *sample,
1250
			     struct machine *machine)
1251
{
1252
	return machine__process_mmap_event(machine, event, sample);
1253 1254
}

1255 1256
int perf_event__process_mmap2(struct perf_tool *tool __maybe_unused,
			     union perf_event *event,
1257
			     struct perf_sample *sample,
1258 1259
			     struct machine *machine)
{
1260
	return machine__process_mmap2_event(machine, event, sample);
1261 1262
}

1263 1264 1265 1266 1267 1268 1269
size_t perf_event__fprintf_task(union perf_event *event, FILE *fp)
{
	return fprintf(fp, "(%d:%d):(%d:%d)\n",
		       event->fork.pid, event->fork.tid,
		       event->fork.ppid, event->fork.ptid);
}

1270
int perf_event__process_fork(struct perf_tool *tool __maybe_unused,
1271
			     union perf_event *event,
1272
			     struct perf_sample *sample,
1273
			     struct machine *machine)
1274
{
1275
	return machine__process_fork_event(machine, event, sample);
1276
}
1277

1278 1279
int perf_event__process_exit(struct perf_tool *tool __maybe_unused,
			     union perf_event *event,
1280
			     struct perf_sample *sample,
1281 1282
			     struct machine *machine)
{
1283
	return machine__process_exit_event(machine, event, sample);
1284 1285
}

1286 1287
size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp)
{
1288
	return fprintf(fp, " offset: %#"PRIx64" size: %#"PRIx64" flags: %#"PRIx64" [%s%s%s]\n",
1289 1290 1291
		       event->aux.aux_offset, event->aux.aux_size,
		       event->aux.flags,
		       event->aux.flags & PERF_AUX_FLAG_TRUNCATED ? "T" : "",
1292 1293
		       event->aux.flags & PERF_AUX_FLAG_OVERWRITE ? "O" : "",
		       event->aux.flags & PERF_AUX_FLAG_PARTIAL   ? "P" : "");
1294 1295
}

1296 1297 1298 1299 1300 1301
size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp)
{
	return fprintf(fp, " pid: %u tid: %u\n",
		       event->itrace_start.pid, event->itrace_start.tid);
}

1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315
size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp)
{
	bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
	const char *in_out = out ? "OUT" : "IN ";

	if (event->header.type == PERF_RECORD_SWITCH)
		return fprintf(fp, " %s\n", in_out);

	return fprintf(fp, " %s  %s pid/tid: %5u/%-5u\n",
		       in_out, out ? "next" : "prev",
		       event->context_switch.next_prev_pid,
		       event->context_switch.next_prev_tid);
}

1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330
size_t perf_event__fprintf(union perf_event *event, FILE *fp)
{
	size_t ret = fprintf(fp, "PERF_RECORD_%s",
			     perf_event__name(event->header.type));

	switch (event->header.type) {
	case PERF_RECORD_COMM:
		ret += perf_event__fprintf_comm(event, fp);
		break;
	case PERF_RECORD_FORK:
	case PERF_RECORD_EXIT:
		ret += perf_event__fprintf_task(event, fp);
		break;
	case PERF_RECORD_MMAP:
		ret += perf_event__fprintf_mmap(event, fp);
1331 1332 1333
		break;
	case PERF_RECORD_NAMESPACES:
		ret += perf_event__fprintf_namespaces(event, fp);
1334
		break;
1335 1336 1337
	case PERF_RECORD_MMAP2:
		ret += perf_event__fprintf_mmap2(event, fp);
		break;
1338 1339 1340
	case PERF_RECORD_AUX:
		ret += perf_event__fprintf_aux(event, fp);
		break;
1341 1342 1343
	case PERF_RECORD_ITRACE_START:
		ret += perf_event__fprintf_itrace_start(event, fp);
		break;
1344 1345 1346 1347
	case PERF_RECORD_SWITCH:
	case PERF_RECORD_SWITCH_CPU_WIDE:
		ret += perf_event__fprintf_switch(event, fp);
		break;
1348 1349 1350 1351 1352 1353 1354
	default:
		ret += fprintf(fp, "\n");
	}

	return ret;
}

1355 1356
int perf_event__process(struct perf_tool *tool __maybe_unused,
			union perf_event *event,
1357
			struct perf_sample *sample,
1358
			struct machine *machine)
1359
{
1360
	return machine__process_event(machine, event, sample);
1361 1362
}

1363
void thread__find_addr_map(struct thread *thread, u8 cpumode,
1364
			   enum map_type type, u64 addr,
1365
			   struct addr_location *al)
1366
{
1367
	struct map_groups *mg = thread->mg;
1368
	struct machine *machine = mg->machine;
1369
	bool load_map = false;
1370

1371
	al->machine = machine;
1372
	al->thread = thread;
1373
	al->addr = addr;
1374
	al->cpumode = cpumode;
1375
	al->filtered = 0;
1376

1377 1378 1379 1380 1381
	if (machine == NULL) {
		al->map = NULL;
		return;
	}

1382
	if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
1383
		al->level = 'k';
1384
		mg = &machine->kmaps;
1385
		load_map = true;
1386
	} else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
1387
		al->level = '.';
1388 1389
	} else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
		al->level = 'g';
1390
		mg = &machine->kmaps;
1391
		load_map = true;
1392 1393
	} else if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) {
		al->level = 'u';
1394
	} else {
1395
		al->level = 'H';
1396
		al->map = NULL;
1397 1398 1399 1400

		if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
			cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
			!perf_guest)
1401
			al->filtered |= (1 << HIST_FILTER__GUEST);
1402 1403 1404
		if ((cpumode == PERF_RECORD_MISC_USER ||
			cpumode == PERF_RECORD_MISC_KERNEL) &&
			!perf_host)
1405
			al->filtered |= (1 << HIST_FILTER__HOST);
1406

1407 1408 1409
		return;
	}
try_again:
1410
	al->map = map_groups__find(mg, type, al->addr);
1411 1412 1413 1414 1415 1416 1417 1418 1419 1420
	if (al->map == NULL) {
		/*
		 * If this is outside of all known maps, and is a negative
		 * address, try to look it up in the kernel dso, as it might be
		 * a vsyscall or vdso (which executes in user-mode).
		 *
		 * XXX This is nasty, we should have a symbol list in the
		 * "[vdso]" dso, but for now lets use the old trick of looking
		 * in the whole kernel symbol list.
		 */
1421 1422 1423
		if (cpumode == PERF_RECORD_MISC_USER && machine &&
		    mg != &machine->kmaps &&
		    machine__kernel_ip(machine, al->addr)) {
1424
			mg = &machine->kmaps;
1425
			load_map = true;
1426 1427
			goto try_again;
		}
1428 1429 1430 1431 1432 1433
	} else {
		/*
		 * Kernel maps might be changed when loading symbols so loading
		 * must be done prior to using kernel maps.
		 */
		if (load_map)
1434
			map__load(al->map);
1435
		al->addr = al->map->map_ip(al->map, al->addr);
1436
	}
1437 1438
}

1439
void thread__find_addr_location(struct thread *thread,
1440
				u8 cpumode, enum map_type type, u64 addr,
1441
				struct addr_location *al)
1442
{
1443
	thread__find_addr_map(thread, cpumode, type, addr, al);
1444
	if (al->map != NULL)
1445
		al->sym = map__find_symbol(al->map, al->addr);
1446 1447
	else
		al->sym = NULL;
1448 1449
}

1450 1451 1452 1453
/*
 * Callers need to drop the reference to al->thread, obtained in
 * machine__findnew_thread()
 */
1454 1455
int machine__resolve(struct machine *machine, struct addr_location *al,
		     struct perf_sample *sample)
1456
{
1457
	struct thread *thread = machine__findnew_thread(machine, sample->pid,
1458
							sample->tid);
1459

1460 1461 1462
	if (thread == NULL)
		return -1;

1463
	dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
1464
	/*
1465
	 * Have we already created the kernel maps for this machine?
1466 1467 1468 1469 1470
	 *
	 * This should have happened earlier, when we processed the kernel MMAP
	 * events, but for older perf.data files there was no such thing, so do
	 * it now.
	 */
1471
	if (sample->cpumode == PERF_RECORD_MISC_KERNEL &&
1472
	    machine__kernel_map(machine) == NULL)
1473
		machine__create_kernel_maps(machine);
1474

1475
	thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, sample->ip, al);
1476 1477 1478
	dump_printf(" ...... dso: %s\n",
		    al->map ? al->map->dso->long_name :
			al->level == 'H' ? "[hypervisor]" : "<not found>");
1479 1480 1481 1482

	if (thread__is_filtered(thread))
		al->filtered |= (1 << HIST_FILTER__THREAD);

1483
	al->sym = NULL;
1484
	al->cpu = sample->cpu;
1485 1486 1487 1488 1489 1490 1491 1492
	al->socket = -1;

	if (al->cpu >= 0) {
		struct perf_env *env = machine->env;

		if (env && env->cpu)
			al->socket = env->cpu[al->cpu].socket_id;
	}
1493 1494

	if (al->map) {
1495 1496
		struct dso *dso = al->map->dso;

1497
		if (symbol_conf.dso_list &&
1498 1499 1500 1501
		    (!dso || !(strlist__has_entry(symbol_conf.dso_list,
						  dso->short_name) ||
			       (dso->short_name != dso->long_name &&
				strlist__has_entry(symbol_conf.dso_list,
1502 1503 1504
						   dso->long_name))))) {
			al->filtered |= (1 << HIST_FILTER__DSO);
		}
1505

1506
		al->sym = map__find_symbol(al->map, al->addr);
1507
	}
1508

1509 1510
	if (symbol_conf.sym_list &&
		(!al->sym || !strlist__has_entry(symbol_conf.sym_list,
1511 1512 1513
						al->sym->name))) {
		al->filtered |= (1 << HIST_FILTER__SYMBOL);
	}
1514 1515

	return 0;
1516
}
1517

1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528
/*
 * The preprocess_sample method will return with reference counts for the
 * in it, when done using (and perhaps getting ref counts if needing to
 * keep a pointer to one of those entries) it must be paired with
 * addr_location__put(), so that the refcounts can be decremented.
 */
void addr_location__put(struct addr_location *al)
{
	thread__zput(al->thread);
}

1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549
bool is_bts_event(struct perf_event_attr *attr)
{
	return attr->type == PERF_TYPE_HARDWARE &&
	       (attr->config & PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
	       attr->sample_period == 1;
}

bool sample_addr_correlates_sym(struct perf_event_attr *attr)
{
	if (attr->type == PERF_TYPE_SOFTWARE &&
	    (attr->config == PERF_COUNT_SW_PAGE_FAULTS ||
	     attr->config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
	     attr->config == PERF_COUNT_SW_PAGE_FAULTS_MAJ))
		return true;

	if (is_bts_event(attr))
		return true;

	return false;
}

1550 1551
void thread__resolve(struct thread *thread, struct addr_location *al,
		     struct perf_sample *sample)
1552
{
1553
	thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, sample->addr, al);
1554
	if (!al->map)
1555
		thread__find_addr_map(thread, sample->cpumode, MAP__VARIABLE,
1556 1557 1558 1559 1560 1561
				      sample->addr, al);

	al->cpu = sample->cpu;
	al->sym = NULL;

	if (al->map)
1562
		al->sym = map__find_symbol(al->map, al->addr);
1563
}