event.c 39.6 KB
Newer Older
1
#include <inttypes.h>
2
#include <linux/kernel.h>
3
#include <linux/types.h>
4
#include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
5
#include <api/fs/fs.h>
6 7
#include "event.h"
#include "debug.h"
8
#include "hist.h"
9
#include "machine.h"
10
#include "sort.h"
11
#include "string.h"
12
#include "strlist.h"
13
#include "thread.h"
14
#include "thread_map.h"
15
#include "symbol/kallsyms.h"
16 17
#include "asm/bug.h"
#include "stat.h"
18

19
static const char *perf_event__names[] = {
20 21
	[0]					= "TOTAL",
	[PERF_RECORD_MMAP]			= "MMAP",
22
	[PERF_RECORD_MMAP2]			= "MMAP2",
23 24 25 26 27 28 29 30
	[PERF_RECORD_LOST]			= "LOST",
	[PERF_RECORD_COMM]			= "COMM",
	[PERF_RECORD_EXIT]			= "EXIT",
	[PERF_RECORD_THROTTLE]			= "THROTTLE",
	[PERF_RECORD_UNTHROTTLE]		= "UNTHROTTLE",
	[PERF_RECORD_FORK]			= "FORK",
	[PERF_RECORD_READ]			= "READ",
	[PERF_RECORD_SAMPLE]			= "SAMPLE",
31
	[PERF_RECORD_AUX]			= "AUX",
32
	[PERF_RECORD_ITRACE_START]		= "ITRACE_START",
33
	[PERF_RECORD_LOST_SAMPLES]		= "LOST_SAMPLES",
34 35
	[PERF_RECORD_SWITCH]			= "SWITCH",
	[PERF_RECORD_SWITCH_CPU_WIDE]		= "SWITCH_CPU_WIDE",
36
	[PERF_RECORD_NAMESPACES]		= "NAMESPACES",
37 38 39 40 41
	[PERF_RECORD_HEADER_ATTR]		= "ATTR",
	[PERF_RECORD_HEADER_EVENT_TYPE]		= "EVENT_TYPE",
	[PERF_RECORD_HEADER_TRACING_DATA]	= "TRACING_DATA",
	[PERF_RECORD_HEADER_BUILD_ID]		= "BUILD_ID",
	[PERF_RECORD_FINISHED_ROUND]		= "FINISHED_ROUND",
A
Adrian Hunter 已提交
42
	[PERF_RECORD_ID_INDEX]			= "ID_INDEX",
43 44
	[PERF_RECORD_AUXTRACE_INFO]		= "AUXTRACE_INFO",
	[PERF_RECORD_AUXTRACE]			= "AUXTRACE",
45
	[PERF_RECORD_AUXTRACE_ERROR]		= "AUXTRACE_ERROR",
46
	[PERF_RECORD_THREAD_MAP]		= "THREAD_MAP",
47
	[PERF_RECORD_CPU_MAP]			= "CPU_MAP",
48
	[PERF_RECORD_STAT_CONFIG]		= "STAT_CONFIG",
J
Jiri Olsa 已提交
49
	[PERF_RECORD_STAT]			= "STAT",
50
	[PERF_RECORD_STAT_ROUND]		= "STAT_ROUND",
51
	[PERF_RECORD_EVENT_UPDATE]		= "EVENT_UPDATE",
52
	[PERF_RECORD_TIME_CONV]			= "TIME_CONV",
53 54
};

55 56 57 58 59 60 61 62 63 64
static const char *perf_ns__names[] = {
	[NET_NS_INDEX]		= "net",
	[UTS_NS_INDEX]		= "uts",
	[IPC_NS_INDEX]		= "ipc",
	[PID_NS_INDEX]		= "pid",
	[USER_NS_INDEX]		= "user",
	[MNT_NS_INDEX]		= "mnt",
	[CGROUP_NS_INDEX]	= "cgroup",
};

65
const char *perf_event__name(unsigned int id)
66
{
67
	if (id >= ARRAY_SIZE(perf_event__names))
68
		return "INVALID";
69
	if (!perf_event__names[id])
70
		return "UNKNOWN";
71
	return perf_event__names[id];
72 73
}

74 75 76 77 78 79 80
static const char *perf_ns__name(unsigned int id)
{
	if (id >= ARRAY_SIZE(perf_ns__names))
		return "UNKNOWN";
	return perf_ns__names[id];
}

81 82 83 84 85 86
static int perf_tool__process_synth_event(struct perf_tool *tool,
					  union perf_event *event,
					  struct machine *machine,
					  perf_event__handler_t process)
{
	struct perf_sample synth_sample = {
87 88 89 90 91 92
	.pid	   = -1,
	.tid	   = -1,
	.time	   = -1,
	.stream_id = -1,
	.cpu	   = -1,
	.period	   = 1,
93 94 95 96
	.cpumode   = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK,
	};

	return process(tool, event, &synth_sample, machine);
97 98
};

99 100
/*
 * Assumes that the first 4095 bytes of /proc/pid/stat contains
101
 * the comm, tgid and ppid.
102
 */
103 104
static int perf_event__get_comm_ids(pid_t pid, char *comm, size_t len,
				    pid_t *tgid, pid_t *ppid)
105 106
{
	char filename[PATH_MAX];
107 108
	char bf[4096];
	int fd;
109 110
	size_t size = 0;
	ssize_t n;
111
	char *name, *tgids, *ppids;
112 113 114

	*tgid = -1;
	*ppid = -1;
115 116 117

	snprintf(filename, sizeof(filename), "/proc/%d/status", pid);

118 119
	fd = open(filename, O_RDONLY);
	if (fd < 0) {
120
		pr_debug("couldn't open %s\n", filename);
121
		return -1;
122 123
	}

124 125 126
	n = read(fd, bf, sizeof(bf) - 1);
	close(fd);
	if (n <= 0) {
127
		pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n",
128 129
			   pid);
		return -1;
130
	}
131
	bf[n] = '\0';
132

133 134
	name = strstr(bf, "Name:");
	tgids = strstr(bf, "Tgid:");
135
	ppids = strstr(bf, "PPid:");
136 137 138

	if (name) {
		name += 5;  /* strlen("Name:") */
139
		name = rtrim(ltrim(name));
140 141 142 143 144 145 146 147 148 149 150
		size = strlen(name);
		if (size >= len)
			size = len - 1;
		memcpy(comm, name, size);
		comm[size] = '\0';
	} else {
		pr_debug("Name: string not found for pid %d\n", pid);
	}

	if (tgids) {
		tgids += 5;  /* strlen("Tgid:") */
151
		*tgid = atoi(tgids);
152 153 154
	} else {
		pr_debug("Tgid: string not found for pid %d\n", pid);
	}
155

156 157 158 159 160 161 162 163
	if (ppids) {
		ppids += 5;  /* strlen("PPid:") */
		*ppid = atoi(ppids);
	} else {
		pr_debug("PPid: string not found for pid %d\n", pid);
	}

	return 0;
164 165
}

166 167 168
static int perf_event__prepare_comm(union perf_event *event, pid_t pid,
				    struct machine *machine,
				    pid_t *tgid, pid_t *ppid)
169 170
{
	size_t size;
171 172

	*ppid = -1;
173 174 175

	memset(&event->comm, 0, sizeof(event->comm));

176 177 178 179 180 181 182 183 184
	if (machine__is_host(machine)) {
		if (perf_event__get_comm_ids(pid, event->comm.comm,
					     sizeof(event->comm.comm),
					     tgid, ppid) != 0) {
			return -1;
		}
	} else {
		*tgid = machine->pid;
	}
185

186 187
	if (*tgid < 0)
		return -1;
188

189
	event->comm.pid = *tgid;
190
	event->comm.header.type = PERF_RECORD_COMM;
191 192

	size = strlen(event->comm.comm) + 1;
193
	size = PERF_ALIGN(size, sizeof(u64));
194
	memset(event->comm.comm + size, 0, machine->id_hdr_size);
195 196
	event->comm.header.size = (sizeof(event->comm) -
				(sizeof(event->comm.comm) - size) +
197
				machine->id_hdr_size);
198
	event->comm.tid = pid;
199 200

	return 0;
201 202
}

203
pid_t perf_event__synthesize_comm(struct perf_tool *tool,
204 205 206 207
					 union perf_event *event, pid_t pid,
					 perf_event__handler_t process,
					 struct machine *machine)
{
208
	pid_t tgid, ppid;
209

210 211
	if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0)
		return -1;
212

213
	if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
214
		return -1;
215

216
	return tgid;
217 218
}

219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270
static void perf_event__get_ns_link_info(pid_t pid, const char *ns,
					 struct perf_ns_link_info *ns_link_info)
{
	struct stat64 st;
	char proc_ns[128];

	sprintf(proc_ns, "/proc/%u/ns/%s", pid, ns);
	if (stat64(proc_ns, &st) == 0) {
		ns_link_info->dev = st.st_dev;
		ns_link_info->ino = st.st_ino;
	}
}

int perf_event__synthesize_namespaces(struct perf_tool *tool,
				      union perf_event *event,
				      pid_t pid, pid_t tgid,
				      perf_event__handler_t process,
				      struct machine *machine)
{
	u32 idx;
	struct perf_ns_link_info *ns_link_info;

	if (!tool || !tool->namespace_events)
		return 0;

	memset(&event->namespaces, 0, (sizeof(event->namespaces) +
	       (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
	       machine->id_hdr_size));

	event->namespaces.pid = tgid;
	event->namespaces.tid = pid;

	event->namespaces.nr_namespaces = NR_NAMESPACES;

	ns_link_info = event->namespaces.link_info;

	for (idx = 0; idx < event->namespaces.nr_namespaces; idx++)
		perf_event__get_ns_link_info(pid, perf_ns__name(idx),
					     &ns_link_info[idx]);

	event->namespaces.header.type = PERF_RECORD_NAMESPACES;

	event->namespaces.header.size = (sizeof(event->namespaces) +
			(NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
			machine->id_hdr_size);

	if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
		return -1;

	return 0;
}

271
static int perf_event__synthesize_fork(struct perf_tool *tool,
272 273 274
				       union perf_event *event,
				       pid_t pid, pid_t tgid, pid_t ppid,
				       perf_event__handler_t process,
275 276 277 278
				       struct machine *machine)
{
	memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);

279 280 281 282 283 284 285 286 287 288 289 290
	/*
	 * for main thread set parent to ppid from status file. For other
	 * threads set parent pid to main thread. ie., assume main thread
	 * spawns all threads in a process
	*/
	if (tgid == pid) {
		event->fork.ppid = ppid;
		event->fork.ptid = ppid;
	} else {
		event->fork.ppid = tgid;
		event->fork.ptid = tgid;
	}
291 292 293 294 295 296
	event->fork.pid  = tgid;
	event->fork.tid  = pid;
	event->fork.header.type = PERF_RECORD_FORK;

	event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);

297
	if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
298 299 300 301 302
		return -1;

	return 0;
}

303 304 305 306 307
int perf_event__synthesize_mmap_events(struct perf_tool *tool,
				       union perf_event *event,
				       pid_t pid, pid_t tgid,
				       perf_event__handler_t process,
				       struct machine *machine,
308 309
				       bool mmap_data,
				       unsigned int proc_map_timeout)
310 311 312
{
	char filename[PATH_MAX];
	FILE *fp;
313 314
	unsigned long long t;
	bool truncation = false;
315
	unsigned long long timeout = proc_map_timeout * 1000000ULL;
316
	int rc = 0;
317 318
	const char *hugetlbfs_mnt = hugetlbfs__mountpoint();
	int hugetlbfs_mnt_len = hugetlbfs_mnt ? strlen(hugetlbfs_mnt) : 0;
319

320 321 322
	if (machine__is_default_guest(machine))
		return 0;

323 324
	snprintf(filename, sizeof(filename), "%s/proc/%d/task/%d/maps",
		 machine->root_dir, pid, pid);
325 326 327 328 329 330 331 332 333 334

	fp = fopen(filename, "r");
	if (fp == NULL) {
		/*
		 * We raced with a task exiting - just return:
		 */
		pr_debug("couldn't open %s\n", filename);
		return -1;
	}

335
	event->header.type = PERF_RECORD_MMAP2;
336
	t = rdclock();
337

338
	while (1) {
339 340 341 342
		char bf[BUFSIZ];
		char prot[5];
		char execname[PATH_MAX];
		char anonstr[] = "//anon";
343
		unsigned int ino;
344
		size_t size;
345
		ssize_t n;
346

347 348 349
		if (fgets(bf, sizeof(bf), fp) == NULL)
			break;

350 351 352 353 354
		if ((rdclock() - t) > timeout) {
			pr_warning("Reading %s time out. "
				   "You may want to increase "
				   "the time limit by --proc-map-timeout\n",
				   filename);
355 356 357 358
			truncation = true;
			goto out;
		}

359 360 361
		/* ensure null termination since stack will be reused. */
		strcpy(execname, "");

362
		/* 00400000-0040c000 r-xp 00000000 fd:01 41038  /bin/cat */
363
		n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %[^\n]\n",
364 365 366 367 368
		       &event->mmap2.start, &event->mmap2.len, prot,
		       &event->mmap2.pgoff, &event->mmap2.maj,
		       &event->mmap2.min,
		       &ino, execname);

369 370 371
		/*
 		 * Anon maps don't have the execname.
 		 */
372
		if (n < 7)
373
			continue;
374 375 376

		event->mmap2.ino = (u64)ino;

377 378 379
		/*
		 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
		 */
380 381 382 383
		if (machine__is_host(machine))
			event->header.misc = PERF_RECORD_MISC_USER;
		else
			event->header.misc = PERF_RECORD_MISC_GUEST_USER;
384

385 386 387 388 389 390 391 392 393 394 395 396 397 398 399
		/* map protection and flags bits */
		event->mmap2.prot = 0;
		event->mmap2.flags = 0;
		if (prot[0] == 'r')
			event->mmap2.prot |= PROT_READ;
		if (prot[1] == 'w')
			event->mmap2.prot |= PROT_WRITE;
		if (prot[2] == 'x')
			event->mmap2.prot |= PROT_EXEC;

		if (prot[3] == 's')
			event->mmap2.flags |= MAP_SHARED;
		else
			event->mmap2.flags |= MAP_PRIVATE;

400 401 402 403 404 405
		if (prot[2] != 'x') {
			if (!mmap_data || prot[0] != 'r')
				continue;

			event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
		}
406

407 408 409 410
out:
		if (truncation)
			event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT;

411 412
		if (!strcmp(execname, ""))
			strcpy(execname, anonstr);
413

414 415
		if (hugetlbfs_mnt_len &&
		    !strncmp(execname, hugetlbfs_mnt, hugetlbfs_mnt_len)) {
416 417 418
			strcpy(execname, anonstr);
			event->mmap2.flags |= MAP_HUGETLB;
		}
419 420

		size = strlen(execname) + 1;
421
		memcpy(event->mmap2.filename, execname, size);
422
		size = PERF_ALIGN(size, sizeof(u64));
423 424 425 426 427 428 429
		event->mmap2.len -= event->mmap.start;
		event->mmap2.header.size = (sizeof(event->mmap2) -
					(sizeof(event->mmap2.filename) - size));
		memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
		event->mmap2.header.size += machine->id_hdr_size;
		event->mmap2.pid = tgid;
		event->mmap2.tid = pid;
430

431
		if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
432 433
			rc = -1;
			break;
434
		}
435 436 437

		if (truncation)
			break;
438 439 440
	}

	fclose(fp);
441
	return rc;
442 443
}

444
int perf_event__synthesize_modules(struct perf_tool *tool,
445
				   perf_event__handler_t process,
446
				   struct machine *machine)
447
{
448
	int rc = 0;
449
	struct map *pos;
450
	struct map_groups *kmaps = &machine->kmaps;
451
	struct maps *maps = &kmaps->maps[MAP__FUNCTION];
452
	union perf_event *event = zalloc((sizeof(event->mmap) +
453
					  machine->id_hdr_size));
454 455 456 457 458 459 460
	if (event == NULL) {
		pr_debug("Not enough memory synthesizing mmap event "
			 "for kernel modules\n");
		return -1;
	}

	event->header.type = PERF_RECORD_MMAP;
461

462 463 464 465
	/*
	 * kernel uses 0 for user space maps, see kernel/perf_event.c
	 * __perf_event_mmap
	 */
466
	if (machine__is_host(machine))
467
		event->header.misc = PERF_RECORD_MISC_KERNEL;
468
	else
469
		event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
470

471
	for (pos = maps__first(maps); pos; pos = map__next(pos)) {
472 473
		size_t size;

474
		if (__map__is_kernel(pos))
475 476
			continue;

477
		size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
478 479 480
		event->mmap.header.type = PERF_RECORD_MMAP;
		event->mmap.header.size = (sizeof(event->mmap) -
				        (sizeof(event->mmap.filename) - size));
481 482
		memset(event->mmap.filename + size, 0, machine->id_hdr_size);
		event->mmap.header.size += machine->id_hdr_size;
483 484 485 486 487
		event->mmap.start = pos->start;
		event->mmap.len   = pos->end - pos->start;
		event->mmap.pid   = machine->pid;

		memcpy(event->mmap.filename, pos->dso->long_name,
488
		       pos->dso->long_name_len + 1);
489
		if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
490 491 492
			rc = -1;
			break;
		}
493 494
	}

495
	free(event);
496
	return rc;
497 498
}

499 500
static int __event__synthesize_thread(union perf_event *comm_event,
				      union perf_event *mmap_event,
501
				      union perf_event *fork_event,
502
				      union perf_event *namespaces_event,
503
				      pid_t pid, int full,
504
				      perf_event__handler_t process,
505
				      struct perf_tool *tool,
506 507 508
				      struct machine *machine,
				      bool mmap_data,
				      unsigned int proc_map_timeout)
509
{
510 511
	char filename[PATH_MAX];
	DIR *tasks;
512
	struct dirent *dirent;
513
	pid_t tgid, ppid;
514
	int rc = 0;
515 516 517 518 519 520 521 522 523

	/* special case: only send one comm event using passed in pid */
	if (!full) {
		tgid = perf_event__synthesize_comm(tool, comm_event, pid,
						   process, machine);

		if (tgid == -1)
			return -1;

524 525 526 527 528
		if (perf_event__synthesize_namespaces(tool, namespaces_event, pid,
						      tgid, process, machine) < 0)
			return -1;


529
		return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
530 531
							  process, machine, mmap_data,
							  proc_map_timeout);
532 533 534 535 536 537 538 539 540 541 542 543 544 545
	}

	if (machine__is_default_guest(machine))
		return 0;

	snprintf(filename, sizeof(filename), "%s/proc/%d/task",
		 machine->root_dir, pid);

	tasks = opendir(filename);
	if (tasks == NULL) {
		pr_debug("couldn't open %s\n", filename);
		return 0;
	}

546
	while ((dirent = readdir(tasks)) != NULL) {
547 548 549
		char *end;
		pid_t _pid;

550
		_pid = strtol(dirent->d_name, &end, 10);
551 552 553
		if (*end)
			continue;

554
		rc = -1;
555 556
		if (perf_event__prepare_comm(comm_event, _pid, machine,
					     &tgid, &ppid) != 0)
557
			break;
558

559
		if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
560
						ppid, process, machine) < 0)
561
			break;
562 563 564 565 566

		if (perf_event__synthesize_namespaces(tool, namespaces_event, _pid,
						      tgid, process, machine) < 0)
			break;

567 568 569
		/*
		 * Send the prepared comm event
		 */
570
		if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0)
571
			break;
572

573
		rc = 0;
574 575 576
		if (_pid == pid) {
			/* process the parent's maps too */
			rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
577
						process, machine, mmap_data, proc_map_timeout);
578 579
			if (rc)
				break;
580
		}
581 582 583
	}

	closedir(tasks);
584
	return rc;
585 586
}

587
int perf_event__synthesize_thread_map(struct perf_tool *tool,
588
				      struct thread_map *threads,
589
				      perf_event__handler_t process,
590
				      struct machine *machine,
591 592
				      bool mmap_data,
				      unsigned int proc_map_timeout)
593
{
594
	union perf_event *comm_event, *mmap_event, *fork_event;
595
	union perf_event *namespaces_event;
596
	int err = -1, thread, j;
597

598
	comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
599 600 601
	if (comm_event == NULL)
		goto out;

602
	mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
603 604 605
	if (mmap_event == NULL)
		goto out_free_comm;

606 607 608 609
	fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
	if (fork_event == NULL)
		goto out_free_mmap;

610 611 612 613 614 615
	namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
				  (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
				  machine->id_hdr_size);
	if (namespaces_event == NULL)
		goto out_free_fork;

616 617 618
	err = 0;
	for (thread = 0; thread < threads->nr; ++thread) {
		if (__event__synthesize_thread(comm_event, mmap_event,
619
					       fork_event, namespaces_event,
620
					       thread_map__pid(threads, thread), 0,
621
					       process, tool, machine,
622
					       mmap_data, proc_map_timeout)) {
623 624 625
			err = -1;
			break;
		}
626 627 628 629 630

		/*
		 * comm.pid is set to thread group id by
		 * perf_event__synthesize_comm
		 */
631
		if ((int) comm_event->comm.pid != thread_map__pid(threads, thread)) {
632 633 634 635
			bool need_leader = true;

			/* is thread group leader in thread_map? */
			for (j = 0; j < threads->nr; ++j) {
636
				if ((int) comm_event->comm.pid == thread_map__pid(threads, j)) {
637 638 639 640 641 642 643
					need_leader = false;
					break;
				}
			}

			/* if not, generate events for it */
			if (need_leader &&
644
			    __event__synthesize_thread(comm_event, mmap_event,
645
						       fork_event, namespaces_event,
646 647
						       comm_event->comm.pid, 0,
						       process, tool, machine,
648
						       mmap_data, proc_map_timeout)) {
649 650 651 652
				err = -1;
				break;
			}
		}
653
	}
654 655
	free(namespaces_event);
out_free_fork:
656 657
	free(fork_event);
out_free_mmap:
658 659 660 661 662 663 664
	free(mmap_event);
out_free_comm:
	free(comm_event);
out:
	return err;
}

665
int perf_event__synthesize_threads(struct perf_tool *tool,
666
				   perf_event__handler_t process,
667 668 669
				   struct machine *machine,
				   bool mmap_data,
				   unsigned int proc_map_timeout)
670 671
{
	DIR *proc;
672
	char proc_path[PATH_MAX];
673
	struct dirent *dirent;
674
	union perf_event *comm_event, *mmap_event, *fork_event;
675
	union perf_event *namespaces_event;
676 677
	int err = -1;

678 679 680
	if (machine__is_default_guest(machine))
		return 0;

681
	comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
682 683 684
	if (comm_event == NULL)
		goto out;

685
	mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
686 687
	if (mmap_event == NULL)
		goto out_free_comm;
688

689 690 691 692
	fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
	if (fork_event == NULL)
		goto out_free_mmap;

693 694 695 696 697 698
	namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
				  (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
				  machine->id_hdr_size);
	if (namespaces_event == NULL)
		goto out_free_fork;

699 700 701
	snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
	proc = opendir(proc_path);

702
	if (proc == NULL)
703
		goto out_free_namespaces;
704

705
	while ((dirent = readdir(proc)) != NULL) {
706
		char *end;
707
		pid_t pid = strtol(dirent->d_name, &end, 10);
708 709 710

		if (*end) /* only interested in proper numerical dirents */
			continue;
711 712 713 714
		/*
 		 * We may race with exiting thread, so don't stop just because
 		 * one thread couldn't be synthesized.
 		 */
715 716 717
		__event__synthesize_thread(comm_event, mmap_event, fork_event,
					   namespaces_event, pid, 1, process,
					   tool, machine, mmap_data,
718
					   proc_map_timeout);
719 720
	}

721
	err = 0;
722
	closedir(proc);
723 724
out_free_namespaces:
	free(namespaces_event);
725 726
out_free_fork:
	free(fork_event);
727 728 729 730 731 732
out_free_mmap:
	free(mmap_event);
out_free_comm:
	free(comm_event);
out:
	return err;
733
}
734

735 736 737 738 739
struct process_symbol_args {
	const char *name;
	u64	   start;
};

740
static int find_symbol_cb(void *arg, const char *name, char type,
741
			  u64 start)
742 743 744
{
	struct process_symbol_args *args = arg;

745 746 747 748 749 750
	/*
	 * Must be a function or at least an alias, as in PARISC64, where "_text" is
	 * an 'A' to the same address as "_stext".
	 */
	if (!(symbol_type__is_a(type, MAP__FUNCTION) ||
	      type == 'A') || strcmp(name, args->name))
751 752 753 754 755 756
		return 0;

	args->start = start;
	return 1;
}

757 758 759 760 761 762 763 764 765 766 767
u64 kallsyms__get_function_start(const char *kallsyms_filename,
				 const char *symbol_name)
{
	struct process_symbol_args args = { .name = symbol_name, };

	if (kallsyms__parse(kallsyms_filename, &args, find_symbol_cb) <= 0)
		return 0;

	return args.start;
}

768
int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
769
				       perf_event__handler_t process,
770
				       struct machine *machine)
771 772
{
	size_t size;
773
	const char *mmap_name;
774
	char name_buff[PATH_MAX];
775
	struct map *map = machine__kernel_map(machine);
776
	struct kmap *kmap;
777
	int err;
778 779
	union perf_event *event;

780 781
	if (symbol_conf.kptr_restrict)
		return -1;
782
	if (map == NULL)
783 784
		return -1;

785 786 787 788 789
	/*
	 * We should get this from /sys/kernel/sections/.text, but till that is
	 * available use this, and after it is use this as a fallback for older
	 * kernels.
	 */
790
	event = zalloc((sizeof(event->mmap) + machine->id_hdr_size));
791 792 793 794 795
	if (event == NULL) {
		pr_debug("Not enough memory synthesizing mmap event "
			 "for kernel modules\n");
		return -1;
	}
796

797
	mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff));
798
	if (machine__is_host(machine)) {
799 800 801 802
		/*
		 * kernel uses PERF_RECORD_MISC_USER for user space maps,
		 * see kernel/perf_event.c __perf_event_mmap
		 */
803
		event->header.misc = PERF_RECORD_MISC_KERNEL;
804
	} else {
805
		event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
806
	}
807

808
	kmap = map__kmap(map);
809
	size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
810
			"%s%s", mmap_name, kmap->ref_reloc_sym->name) + 1;
811
	size = PERF_ALIGN(size, sizeof(u64));
812 813
	event->mmap.header.type = PERF_RECORD_MMAP;
	event->mmap.header.size = (sizeof(event->mmap) -
814
			(sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
815
	event->mmap.pgoff = kmap->ref_reloc_sym->addr;
816 817 818 819
	event->mmap.start = map->start;
	event->mmap.len   = map->end - event->mmap.start;
	event->mmap.pid   = machine->pid;

820
	err = perf_tool__process_synth_event(tool, event, machine, process);
821 822 823
	free(event);

	return err;
824 825
}

826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861
int perf_event__synthesize_thread_map2(struct perf_tool *tool,
				      struct thread_map *threads,
				      perf_event__handler_t process,
				      struct machine *machine)
{
	union perf_event *event;
	int i, err, size;

	size  = sizeof(event->thread_map);
	size +=	threads->nr * sizeof(event->thread_map.entries[0]);

	event = zalloc(size);
	if (!event)
		return -ENOMEM;

	event->header.type = PERF_RECORD_THREAD_MAP;
	event->header.size = size;
	event->thread_map.nr = threads->nr;

	for (i = 0; i < threads->nr; i++) {
		struct thread_map_event_entry *entry = &event->thread_map.entries[i];
		char *comm = thread_map__comm(threads, i);

		if (!comm)
			comm = (char *) "";

		entry->pid = thread_map__pid(threads, i);
		strncpy((char *) &entry->comm, comm, sizeof(entry->comm));
	}

	err = process(tool, event, NULL, machine);

	free(event);
	return err;
}

862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992
static void synthesize_cpus(struct cpu_map_entries *cpus,
			    struct cpu_map *map)
{
	int i;

	cpus->nr = map->nr;

	for (i = 0; i < map->nr; i++)
		cpus->cpu[i] = map->map[i];
}

static void synthesize_mask(struct cpu_map_mask *mask,
			    struct cpu_map *map, int max)
{
	int i;

	mask->nr = BITS_TO_LONGS(max);
	mask->long_size = sizeof(long);

	for (i = 0; i < map->nr; i++)
		set_bit(map->map[i], mask->mask);
}

static size_t cpus_size(struct cpu_map *map)
{
	return sizeof(struct cpu_map_entries) + map->nr * sizeof(u16);
}

static size_t mask_size(struct cpu_map *map, int *max)
{
	int i;

	*max = 0;

	for (i = 0; i < map->nr; i++) {
		/* bit possition of the cpu is + 1 */
		int bit = map->map[i] + 1;

		if (bit > *max)
			*max = bit;
	}

	return sizeof(struct cpu_map_mask) + BITS_TO_LONGS(*max) * sizeof(long);
}

void *cpu_map_data__alloc(struct cpu_map *map, size_t *size, u16 *type, int *max)
{
	size_t size_cpus, size_mask;
	bool is_dummy = cpu_map__empty(map);

	/*
	 * Both array and mask data have variable size based
	 * on the number of cpus and their actual values.
	 * The size of the 'struct cpu_map_data' is:
	 *
	 *   array = size of 'struct cpu_map_entries' +
	 *           number of cpus * sizeof(u64)
	 *
	 *   mask  = size of 'struct cpu_map_mask' +
	 *           maximum cpu bit converted to size of longs
	 *
	 * and finaly + the size of 'struct cpu_map_data'.
	 */
	size_cpus = cpus_size(map);
	size_mask = mask_size(map, max);

	if (is_dummy || (size_cpus < size_mask)) {
		*size += size_cpus;
		*type  = PERF_CPU_MAP__CPUS;
	} else {
		*size += size_mask;
		*type  = PERF_CPU_MAP__MASK;
	}

	*size += sizeof(struct cpu_map_data);
	return zalloc(*size);
}

void cpu_map_data__synthesize(struct cpu_map_data *data, struct cpu_map *map,
			      u16 type, int max)
{
	data->type = type;

	switch (type) {
	case PERF_CPU_MAP__CPUS:
		synthesize_cpus((struct cpu_map_entries *) data->data, map);
		break;
	case PERF_CPU_MAP__MASK:
		synthesize_mask((struct cpu_map_mask *) data->data, map, max);
	default:
		break;
	};
}

static struct cpu_map_event* cpu_map_event__new(struct cpu_map *map)
{
	size_t size = sizeof(struct cpu_map_event);
	struct cpu_map_event *event;
	int max;
	u16 type;

	event = cpu_map_data__alloc(map, &size, &type, &max);
	if (!event)
		return NULL;

	event->header.type = PERF_RECORD_CPU_MAP;
	event->header.size = size;
	event->data.type   = type;

	cpu_map_data__synthesize(&event->data, map, type, max);
	return event;
}

int perf_event__synthesize_cpu_map(struct perf_tool *tool,
				   struct cpu_map *map,
				   perf_event__handler_t process,
				   struct machine *machine)
{
	struct cpu_map_event *event;
	int err;

	event = cpu_map_event__new(map);
	if (!event)
		return -ENOMEM;

	err = process(tool, (union perf_event *) event, NULL, machine);

	free(event);
	return err;
}

993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030
int perf_event__synthesize_stat_config(struct perf_tool *tool,
				       struct perf_stat_config *config,
				       perf_event__handler_t process,
				       struct machine *machine)
{
	struct stat_config_event *event;
	int size, i = 0, err;

	size  = sizeof(*event);
	size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0]));

	event = zalloc(size);
	if (!event)
		return -ENOMEM;

	event->header.type = PERF_RECORD_STAT_CONFIG;
	event->header.size = size;
	event->nr          = PERF_STAT_CONFIG_TERM__MAX;

#define ADD(__term, __val)					\
	event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term;	\
	event->data[i].val = __val;				\
	i++;

	ADD(AGGR_MODE,	config->aggr_mode)
	ADD(INTERVAL,	config->interval)
	ADD(SCALE,	config->scale)

	WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX,
		  "stat config terms unbalanced\n");
#undef ADD

	err = process(tool, (union perf_event *) event, NULL, machine);

	free(event);
	return err;
}

1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052
int perf_event__synthesize_stat(struct perf_tool *tool,
				u32 cpu, u32 thread, u64 id,
				struct perf_counts_values *count,
				perf_event__handler_t process,
				struct machine *machine)
{
	struct stat_event event;

	event.header.type = PERF_RECORD_STAT;
	event.header.size = sizeof(event);
	event.header.misc = 0;

	event.id        = id;
	event.cpu       = cpu;
	event.thread    = thread;
	event.val       = count->val;
	event.ena       = count->ena;
	event.run       = count->run;

	return process(tool, (union perf_event *) &event, NULL, machine);
}

1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069
int perf_event__synthesize_stat_round(struct perf_tool *tool,
				      u64 evtime, u64 type,
				      perf_event__handler_t process,
				      struct machine *machine)
{
	struct stat_round_event event;

	event.header.type = PERF_RECORD_STAT_ROUND;
	event.header.size = sizeof(event);
	event.header.misc = 0;

	event.time = evtime;
	event.type = type;

	return process(tool, (union perf_event *) &event, NULL, machine);
}

1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093
void perf_event__read_stat_config(struct perf_stat_config *config,
				  struct stat_config_event *event)
{
	unsigned i;

	for (i = 0; i < event->nr; i++) {

		switch (event->data[i].tag) {
#define CASE(__term, __val)					\
		case PERF_STAT_CONFIG_TERM__##__term:		\
			config->__val = event->data[i].val;	\
			break;

		CASE(AGGR_MODE, aggr_mode)
		CASE(SCALE,     scale)
		CASE(INTERVAL,  interval)
#undef CASE
		default:
			pr_warning("unknown stat config term %" PRIu64 "\n",
				   event->data[i].tag);
		}
	}
}

1094 1095
size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp)
{
1096 1097 1098 1099 1100 1101 1102
	const char *s;

	if (event->header.misc & PERF_RECORD_MISC_COMM_EXEC)
		s = " exec";
	else
		s = "";

1103
	return fprintf(fp, "%s: %s:%d/%d\n", s, event->comm.comm, event->comm.pid, event->comm.tid);
1104 1105
}

1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132
size_t perf_event__fprintf_namespaces(union perf_event *event, FILE *fp)
{
	size_t ret = 0;
	struct perf_ns_link_info *ns_link_info;
	u32 nr_namespaces, idx;

	ns_link_info = event->namespaces.link_info;
	nr_namespaces = event->namespaces.nr_namespaces;

	ret += fprintf(fp, " %d/%d - nr_namespaces: %u\n\t\t[",
		       event->namespaces.pid,
		       event->namespaces.tid,
		       nr_namespaces);

	for (idx = 0; idx < nr_namespaces; idx++) {
		if (idx && (idx % 4 == 0))
			ret += fprintf(fp, "\n\t\t ");

		ret  += fprintf(fp, "%u/%s: %" PRIu64 "/%#" PRIx64 "%s", idx,
				perf_ns__name(idx), (u64)ns_link_info[idx].dev,
				(u64)ns_link_info[idx].ino,
				((idx + 1) != nr_namespaces) ? ", " : "]\n");
	}

	return ret;
}

1133
int perf_event__process_comm(struct perf_tool *tool __maybe_unused,
1134
			     union perf_event *event,
1135
			     struct perf_sample *sample,
1136
			     struct machine *machine)
1137
{
1138
	return machine__process_comm_event(machine, event, sample);
1139 1140
}

1141 1142 1143 1144 1145 1146 1147 1148
int perf_event__process_namespaces(struct perf_tool *tool __maybe_unused,
				   union perf_event *event,
				   struct perf_sample *sample,
				   struct machine *machine)
{
	return machine__process_namespaces_event(machine, event, sample);
}

1149
int perf_event__process_lost(struct perf_tool *tool __maybe_unused,
1150
			     union perf_event *event,
1151
			     struct perf_sample *sample,
1152
			     struct machine *machine)
1153
{
1154
	return machine__process_lost_event(machine, event, sample);
1155
}
1156

1157 1158 1159 1160 1161 1162 1163 1164
int perf_event__process_aux(struct perf_tool *tool __maybe_unused,
			    union perf_event *event,
			    struct perf_sample *sample __maybe_unused,
			    struct machine *machine)
{
	return machine__process_aux_event(machine, event);
}

1165 1166 1167 1168 1169 1170 1171 1172
int perf_event__process_itrace_start(struct perf_tool *tool __maybe_unused,
				     union perf_event *event,
				     struct perf_sample *sample __maybe_unused,
				     struct machine *machine)
{
	return machine__process_itrace_start_event(machine, event);
}

1173 1174 1175 1176 1177 1178 1179 1180
int perf_event__process_lost_samples(struct perf_tool *tool __maybe_unused,
				     union perf_event *event,
				     struct perf_sample *sample,
				     struct machine *machine)
{
	return machine__process_lost_samples_event(machine, event, sample);
}

1181 1182 1183 1184 1185 1186 1187 1188
int perf_event__process_switch(struct perf_tool *tool __maybe_unused,
			       union perf_event *event,
			       struct perf_sample *sample __maybe_unused,
			       struct machine *machine)
{
	return machine__process_switch_event(machine, event);
}

1189 1190
size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
{
1191
	return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %c %s\n",
1192
		       event->mmap.pid, event->mmap.tid, event->mmap.start,
1193 1194 1195
		       event->mmap.len, event->mmap.pgoff,
		       (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x',
		       event->mmap.filename);
1196 1197
}

1198 1199 1200
size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp)
{
	return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64
1201
			   " %02x:%02x %"PRIu64" %"PRIu64"]: %c%c%c%c %s\n",
1202 1203 1204 1205
		       event->mmap2.pid, event->mmap2.tid, event->mmap2.start,
		       event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj,
		       event->mmap2.min, event->mmap2.ino,
		       event->mmap2.ino_generation,
1206 1207 1208 1209
		       (event->mmap2.prot & PROT_READ) ? 'r' : '-',
		       (event->mmap2.prot & PROT_WRITE) ? 'w' : '-',
		       (event->mmap2.prot & PROT_EXEC) ? 'x' : '-',
		       (event->mmap2.flags & MAP_SHARED) ? 's' : 'p',
1210 1211 1212
		       event->mmap2.filename);
}

1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228
size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp)
{
	struct thread_map *threads = thread_map__new_event(&event->thread_map);
	size_t ret;

	ret = fprintf(fp, " nr: ");

	if (threads)
		ret += thread_map__fprintf(threads, fp);
	else
		ret += fprintf(fp, "failed to get threads from event\n");

	thread_map__put(threads);
	return ret;
}

1229 1230 1231 1232 1233
size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp)
{
	struct cpu_map *cpus = cpu_map__new_data(&event->cpu_map.data);
	size_t ret;

1234
	ret = fprintf(fp, ": ");
1235 1236 1237 1238 1239 1240 1241 1242 1243 1244

	if (cpus)
		ret += cpu_map__fprintf(cpus, fp);
	else
		ret += fprintf(fp, "failed to get cpumap from event\n");

	cpu_map__put(cpus);
	return ret;
}

1245
int perf_event__process_mmap(struct perf_tool *tool __maybe_unused,
1246
			     union perf_event *event,
1247
			     struct perf_sample *sample,
1248
			     struct machine *machine)
1249
{
1250
	return machine__process_mmap_event(machine, event, sample);
1251 1252
}

1253 1254
int perf_event__process_mmap2(struct perf_tool *tool __maybe_unused,
			     union perf_event *event,
1255
			     struct perf_sample *sample,
1256 1257
			     struct machine *machine)
{
1258
	return machine__process_mmap2_event(machine, event, sample);
1259 1260
}

1261 1262 1263 1264 1265 1266 1267
size_t perf_event__fprintf_task(union perf_event *event, FILE *fp)
{
	return fprintf(fp, "(%d:%d):(%d:%d)\n",
		       event->fork.pid, event->fork.tid,
		       event->fork.ppid, event->fork.ptid);
}

1268
int perf_event__process_fork(struct perf_tool *tool __maybe_unused,
1269
			     union perf_event *event,
1270
			     struct perf_sample *sample,
1271
			     struct machine *machine)
1272
{
1273
	return machine__process_fork_event(machine, event, sample);
1274
}
1275

1276 1277
int perf_event__process_exit(struct perf_tool *tool __maybe_unused,
			     union perf_event *event,
1278
			     struct perf_sample *sample,
1279 1280
			     struct machine *machine)
{
1281
	return machine__process_exit_event(machine, event, sample);
1282 1283
}

1284 1285
size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp)
{
1286
	return fprintf(fp, " offset: %#"PRIx64" size: %#"PRIx64" flags: %#"PRIx64" [%s%s%s]\n",
1287 1288 1289
		       event->aux.aux_offset, event->aux.aux_size,
		       event->aux.flags,
		       event->aux.flags & PERF_AUX_FLAG_TRUNCATED ? "T" : "",
1290 1291
		       event->aux.flags & PERF_AUX_FLAG_OVERWRITE ? "O" : "",
		       event->aux.flags & PERF_AUX_FLAG_PARTIAL   ? "P" : "");
1292 1293
}

1294 1295 1296 1297 1298 1299
size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp)
{
	return fprintf(fp, " pid: %u tid: %u\n",
		       event->itrace_start.pid, event->itrace_start.tid);
}

1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313
size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp)
{
	bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
	const char *in_out = out ? "OUT" : "IN ";

	if (event->header.type == PERF_RECORD_SWITCH)
		return fprintf(fp, " %s\n", in_out);

	return fprintf(fp, " %s  %s pid/tid: %5u/%-5u\n",
		       in_out, out ? "next" : "prev",
		       event->context_switch.next_prev_pid,
		       event->context_switch.next_prev_tid);
}

1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328
size_t perf_event__fprintf(union perf_event *event, FILE *fp)
{
	size_t ret = fprintf(fp, "PERF_RECORD_%s",
			     perf_event__name(event->header.type));

	switch (event->header.type) {
	case PERF_RECORD_COMM:
		ret += perf_event__fprintf_comm(event, fp);
		break;
	case PERF_RECORD_FORK:
	case PERF_RECORD_EXIT:
		ret += perf_event__fprintf_task(event, fp);
		break;
	case PERF_RECORD_MMAP:
		ret += perf_event__fprintf_mmap(event, fp);
1329 1330 1331
		break;
	case PERF_RECORD_NAMESPACES:
		ret += perf_event__fprintf_namespaces(event, fp);
1332
		break;
1333 1334 1335
	case PERF_RECORD_MMAP2:
		ret += perf_event__fprintf_mmap2(event, fp);
		break;
1336 1337 1338
	case PERF_RECORD_AUX:
		ret += perf_event__fprintf_aux(event, fp);
		break;
1339 1340 1341
	case PERF_RECORD_ITRACE_START:
		ret += perf_event__fprintf_itrace_start(event, fp);
		break;
1342 1343 1344 1345
	case PERF_RECORD_SWITCH:
	case PERF_RECORD_SWITCH_CPU_WIDE:
		ret += perf_event__fprintf_switch(event, fp);
		break;
1346 1347 1348 1349 1350 1351 1352
	default:
		ret += fprintf(fp, "\n");
	}

	return ret;
}

1353 1354
int perf_event__process(struct perf_tool *tool __maybe_unused,
			union perf_event *event,
1355
			struct perf_sample *sample,
1356
			struct machine *machine)
1357
{
1358
	return machine__process_event(machine, event, sample);
1359 1360
}

1361
void thread__find_addr_map(struct thread *thread, u8 cpumode,
1362
			   enum map_type type, u64 addr,
1363
			   struct addr_location *al)
1364
{
1365
	struct map_groups *mg = thread->mg;
1366
	struct machine *machine = mg->machine;
1367
	bool load_map = false;
1368

1369
	al->machine = machine;
1370
	al->thread = thread;
1371
	al->addr = addr;
1372
	al->cpumode = cpumode;
1373
	al->filtered = 0;
1374

1375 1376 1377 1378 1379
	if (machine == NULL) {
		al->map = NULL;
		return;
	}

1380
	if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
1381
		al->level = 'k';
1382
		mg = &machine->kmaps;
1383
		load_map = true;
1384
	} else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
1385
		al->level = '.';
1386 1387
	} else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
		al->level = 'g';
1388
		mg = &machine->kmaps;
1389
		load_map = true;
1390 1391
	} else if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) {
		al->level = 'u';
1392
	} else {
1393
		al->level = 'H';
1394
		al->map = NULL;
1395 1396 1397 1398

		if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
			cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
			!perf_guest)
1399
			al->filtered |= (1 << HIST_FILTER__GUEST);
1400 1401 1402
		if ((cpumode == PERF_RECORD_MISC_USER ||
			cpumode == PERF_RECORD_MISC_KERNEL) &&
			!perf_host)
1403
			al->filtered |= (1 << HIST_FILTER__HOST);
1404

1405 1406 1407
		return;
	}
try_again:
1408
	al->map = map_groups__find(mg, type, al->addr);
1409 1410 1411 1412 1413 1414 1415 1416 1417 1418
	if (al->map == NULL) {
		/*
		 * If this is outside of all known maps, and is a negative
		 * address, try to look it up in the kernel dso, as it might be
		 * a vsyscall or vdso (which executes in user-mode).
		 *
		 * XXX This is nasty, we should have a symbol list in the
		 * "[vdso]" dso, but for now lets use the old trick of looking
		 * in the whole kernel symbol list.
		 */
1419 1420 1421
		if (cpumode == PERF_RECORD_MISC_USER && machine &&
		    mg != &machine->kmaps &&
		    machine__kernel_ip(machine, al->addr)) {
1422
			mg = &machine->kmaps;
1423
			load_map = true;
1424 1425
			goto try_again;
		}
1426 1427 1428 1429 1430 1431
	} else {
		/*
		 * Kernel maps might be changed when loading symbols so loading
		 * must be done prior to using kernel maps.
		 */
		if (load_map)
1432
			map__load(al->map);
1433
		al->addr = al->map->map_ip(al->map, al->addr);
1434
	}
1435 1436
}

1437
void thread__find_addr_location(struct thread *thread,
1438
				u8 cpumode, enum map_type type, u64 addr,
1439
				struct addr_location *al)
1440
{
1441
	thread__find_addr_map(thread, cpumode, type, addr, al);
1442
	if (al->map != NULL)
1443
		al->sym = map__find_symbol(al->map, al->addr);
1444 1445
	else
		al->sym = NULL;
1446 1447
}

1448 1449 1450 1451
/*
 * Callers need to drop the reference to al->thread, obtained in
 * machine__findnew_thread()
 */
1452 1453
int machine__resolve(struct machine *machine, struct addr_location *al,
		     struct perf_sample *sample)
1454
{
1455
	struct thread *thread = machine__findnew_thread(machine, sample->pid,
1456
							sample->tid);
1457

1458 1459 1460
	if (thread == NULL)
		return -1;

1461
	dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
1462
	/*
1463
	 * Have we already created the kernel maps for this machine?
1464 1465 1466 1467 1468
	 *
	 * This should have happened earlier, when we processed the kernel MMAP
	 * events, but for older perf.data files there was no such thing, so do
	 * it now.
	 */
1469
	if (sample->cpumode == PERF_RECORD_MISC_KERNEL &&
1470
	    machine__kernel_map(machine) == NULL)
1471
		machine__create_kernel_maps(machine);
1472

1473
	thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, sample->ip, al);
1474 1475 1476
	dump_printf(" ...... dso: %s\n",
		    al->map ? al->map->dso->long_name :
			al->level == 'H' ? "[hypervisor]" : "<not found>");
1477 1478 1479 1480

	if (thread__is_filtered(thread))
		al->filtered |= (1 << HIST_FILTER__THREAD);

1481
	al->sym = NULL;
1482
	al->cpu = sample->cpu;
1483 1484 1485 1486 1487 1488 1489 1490
	al->socket = -1;

	if (al->cpu >= 0) {
		struct perf_env *env = machine->env;

		if (env && env->cpu)
			al->socket = env->cpu[al->cpu].socket_id;
	}
1491 1492

	if (al->map) {
1493 1494
		struct dso *dso = al->map->dso;

1495
		if (symbol_conf.dso_list &&
1496 1497 1498 1499
		    (!dso || !(strlist__has_entry(symbol_conf.dso_list,
						  dso->short_name) ||
			       (dso->short_name != dso->long_name &&
				strlist__has_entry(symbol_conf.dso_list,
1500 1501 1502
						   dso->long_name))))) {
			al->filtered |= (1 << HIST_FILTER__DSO);
		}
1503

1504
		al->sym = map__find_symbol(al->map, al->addr);
1505
	}
1506

1507 1508
	if (symbol_conf.sym_list &&
		(!al->sym || !strlist__has_entry(symbol_conf.sym_list,
1509 1510 1511
						al->sym->name))) {
		al->filtered |= (1 << HIST_FILTER__SYMBOL);
	}
1512 1513

	return 0;
1514
}
1515

1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526
/*
 * The preprocess_sample method will return with reference counts for the
 * in it, when done using (and perhaps getting ref counts if needing to
 * keep a pointer to one of those entries) it must be paired with
 * addr_location__put(), so that the refcounts can be decremented.
 */
void addr_location__put(struct addr_location *al)
{
	thread__zput(al->thread);
}

1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547
bool is_bts_event(struct perf_event_attr *attr)
{
	return attr->type == PERF_TYPE_HARDWARE &&
	       (attr->config & PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
	       attr->sample_period == 1;
}

bool sample_addr_correlates_sym(struct perf_event_attr *attr)
{
	if (attr->type == PERF_TYPE_SOFTWARE &&
	    (attr->config == PERF_COUNT_SW_PAGE_FAULTS ||
	     attr->config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
	     attr->config == PERF_COUNT_SW_PAGE_FAULTS_MAJ))
		return true;

	if (is_bts_event(attr))
		return true;

	return false;
}

1548 1549
void thread__resolve(struct thread *thread, struct addr_location *al,
		     struct perf_sample *sample)
1550
{
1551
	thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, sample->addr, al);
1552
	if (!al->map)
1553
		thread__find_addr_map(thread, sample->cpumode, MAP__VARIABLE,
1554 1555 1556 1557 1558 1559
				      sample->addr, al);

	al->cpu = sample->cpu;
	al->sym = NULL;

	if (al->map)
1560
		al->sym = map__find_symbol(al->map, al->addr);
1561
}