event.c 37.3 KB
Newer Older
1
#include <linux/types.h>
2
#include <linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
3
#include <api/fs/fs.h>
4 5
#include "event.h"
#include "debug.h"
6
#include "hist.h"
7
#include "machine.h"
8
#include "sort.h"
9
#include "string.h"
10
#include "strlist.h"
11
#include "thread.h"
12
#include "thread_map.h"
13
#include "symbol/kallsyms.h"
14 15
#include "asm/bug.h"
#include "stat.h"
16

17
static const char *perf_event__names[] = {
18 19
	[0]					= "TOTAL",
	[PERF_RECORD_MMAP]			= "MMAP",
20
	[PERF_RECORD_MMAP2]			= "MMAP2",
21 22 23 24 25 26 27 28
	[PERF_RECORD_LOST]			= "LOST",
	[PERF_RECORD_COMM]			= "COMM",
	[PERF_RECORD_EXIT]			= "EXIT",
	[PERF_RECORD_THROTTLE]			= "THROTTLE",
	[PERF_RECORD_UNTHROTTLE]		= "UNTHROTTLE",
	[PERF_RECORD_FORK]			= "FORK",
	[PERF_RECORD_READ]			= "READ",
	[PERF_RECORD_SAMPLE]			= "SAMPLE",
29
	[PERF_RECORD_AUX]			= "AUX",
30
	[PERF_RECORD_ITRACE_START]		= "ITRACE_START",
31
	[PERF_RECORD_LOST_SAMPLES]		= "LOST_SAMPLES",
32 33
	[PERF_RECORD_SWITCH]			= "SWITCH",
	[PERF_RECORD_SWITCH_CPU_WIDE]		= "SWITCH_CPU_WIDE",
34
	[PERF_RECORD_NAMESPACES]		= "NAMESPACES",
35 36 37 38 39
	[PERF_RECORD_HEADER_ATTR]		= "ATTR",
	[PERF_RECORD_HEADER_EVENT_TYPE]		= "EVENT_TYPE",
	[PERF_RECORD_HEADER_TRACING_DATA]	= "TRACING_DATA",
	[PERF_RECORD_HEADER_BUILD_ID]		= "BUILD_ID",
	[PERF_RECORD_FINISHED_ROUND]		= "FINISHED_ROUND",
A
Adrian Hunter 已提交
40
	[PERF_RECORD_ID_INDEX]			= "ID_INDEX",
41 42
	[PERF_RECORD_AUXTRACE_INFO]		= "AUXTRACE_INFO",
	[PERF_RECORD_AUXTRACE]			= "AUXTRACE",
43
	[PERF_RECORD_AUXTRACE_ERROR]		= "AUXTRACE_ERROR",
44
	[PERF_RECORD_THREAD_MAP]		= "THREAD_MAP",
45
	[PERF_RECORD_CPU_MAP]			= "CPU_MAP",
46
	[PERF_RECORD_STAT_CONFIG]		= "STAT_CONFIG",
J
Jiri Olsa 已提交
47
	[PERF_RECORD_STAT]			= "STAT",
48
	[PERF_RECORD_STAT_ROUND]		= "STAT_ROUND",
49
	[PERF_RECORD_EVENT_UPDATE]		= "EVENT_UPDATE",
50
	[PERF_RECORD_TIME_CONV]			= "TIME_CONV",
51 52
};

53 54 55 56 57 58 59 60 61 62
static const char *perf_ns__names[] = {
	[NET_NS_INDEX]		= "net",
	[UTS_NS_INDEX]		= "uts",
	[IPC_NS_INDEX]		= "ipc",
	[PID_NS_INDEX]		= "pid",
	[USER_NS_INDEX]		= "user",
	[MNT_NS_INDEX]		= "mnt",
	[CGROUP_NS_INDEX]	= "cgroup",
};

63
const char *perf_event__name(unsigned int id)
64
{
65
	if (id >= ARRAY_SIZE(perf_event__names))
66
		return "INVALID";
67
	if (!perf_event__names[id])
68
		return "UNKNOWN";
69
	return perf_event__names[id];
70 71
}

72 73 74 75 76 77 78
static const char *perf_ns__name(unsigned int id)
{
	if (id >= ARRAY_SIZE(perf_ns__names))
		return "UNKNOWN";
	return perf_ns__names[id];
}

79 80 81 82 83 84
static int perf_tool__process_synth_event(struct perf_tool *tool,
					  union perf_event *event,
					  struct machine *machine,
					  perf_event__handler_t process)
{
	struct perf_sample synth_sample = {
85 86 87 88 89 90
	.pid	   = -1,
	.tid	   = -1,
	.time	   = -1,
	.stream_id = -1,
	.cpu	   = -1,
	.period	   = 1,
91 92 93 94
	.cpumode   = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK,
	};

	return process(tool, event, &synth_sample, machine);
95 96
};

97 98
/*
 * Assumes that the first 4095 bytes of /proc/pid/stat contains
99
 * the comm, tgid and ppid.
100
 */
101 102
static int perf_event__get_comm_ids(pid_t pid, char *comm, size_t len,
				    pid_t *tgid, pid_t *ppid)
103 104
{
	char filename[PATH_MAX];
105 106
	char bf[4096];
	int fd;
107 108
	size_t size = 0;
	ssize_t n;
109 110 111 112
	char *nl, *name, *tgids, *ppids;

	*tgid = -1;
	*ppid = -1;
113 114 115

	snprintf(filename, sizeof(filename), "/proc/%d/status", pid);

116 117
	fd = open(filename, O_RDONLY);
	if (fd < 0) {
118
		pr_debug("couldn't open %s\n", filename);
119
		return -1;
120 121
	}

122 123 124
	n = read(fd, bf, sizeof(bf) - 1);
	close(fd);
	if (n <= 0) {
125
		pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n",
126 127
			   pid);
		return -1;
128
	}
129
	bf[n] = '\0';
130

131 132
	name = strstr(bf, "Name:");
	tgids = strstr(bf, "Tgid:");
133
	ppids = strstr(bf, "PPid:");
134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155

	if (name) {
		name += 5;  /* strlen("Name:") */

		while (*name && isspace(*name))
			++name;

		nl = strchr(name, '\n');
		if (nl)
			*nl = '\0';

		size = strlen(name);
		if (size >= len)
			size = len - 1;
		memcpy(comm, name, size);
		comm[size] = '\0';
	} else {
		pr_debug("Name: string not found for pid %d\n", pid);
	}

	if (tgids) {
		tgids += 5;  /* strlen("Tgid:") */
156
		*tgid = atoi(tgids);
157 158 159
	} else {
		pr_debug("Tgid: string not found for pid %d\n", pid);
	}
160

161 162 163 164 165 166 167 168
	if (ppids) {
		ppids += 5;  /* strlen("PPid:") */
		*ppid = atoi(ppids);
	} else {
		pr_debug("PPid: string not found for pid %d\n", pid);
	}

	return 0;
169 170
}

171 172 173
static int perf_event__prepare_comm(union perf_event *event, pid_t pid,
				    struct machine *machine,
				    pid_t *tgid, pid_t *ppid)
174 175
{
	size_t size;
176 177

	*ppid = -1;
178 179 180

	memset(&event->comm, 0, sizeof(event->comm));

181 182 183 184 185 186 187 188 189
	if (machine__is_host(machine)) {
		if (perf_event__get_comm_ids(pid, event->comm.comm,
					     sizeof(event->comm.comm),
					     tgid, ppid) != 0) {
			return -1;
		}
	} else {
		*tgid = machine->pid;
	}
190

191 192
	if (*tgid < 0)
		return -1;
193

194
	event->comm.pid = *tgid;
195
	event->comm.header.type = PERF_RECORD_COMM;
196 197

	size = strlen(event->comm.comm) + 1;
198
	size = PERF_ALIGN(size, sizeof(u64));
199
	memset(event->comm.comm + size, 0, machine->id_hdr_size);
200 201
	event->comm.header.size = (sizeof(event->comm) -
				(sizeof(event->comm.comm) - size) +
202
				machine->id_hdr_size);
203
	event->comm.tid = pid;
204 205

	return 0;
206 207
}

208
pid_t perf_event__synthesize_comm(struct perf_tool *tool,
209 210 211 212
					 union perf_event *event, pid_t pid,
					 perf_event__handler_t process,
					 struct machine *machine)
{
213
	pid_t tgid, ppid;
214

215 216
	if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0)
		return -1;
217

218
	if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
219
		return -1;
220

221
	return tgid;
222 223
}

224
static int perf_event__synthesize_fork(struct perf_tool *tool,
225 226 227
				       union perf_event *event,
				       pid_t pid, pid_t tgid, pid_t ppid,
				       perf_event__handler_t process,
228 229 230 231
				       struct machine *machine)
{
	memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);

232 233 234 235 236 237 238 239 240 241 242 243
	/*
	 * for main thread set parent to ppid from status file. For other
	 * threads set parent pid to main thread. ie., assume main thread
	 * spawns all threads in a process
	*/
	if (tgid == pid) {
		event->fork.ppid = ppid;
		event->fork.ptid = ppid;
	} else {
		event->fork.ppid = tgid;
		event->fork.ptid = tgid;
	}
244 245 246 247 248 249
	event->fork.pid  = tgid;
	event->fork.tid  = pid;
	event->fork.header.type = PERF_RECORD_FORK;

	event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);

250
	if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
251 252 253 254 255
		return -1;

	return 0;
}

256 257 258 259 260
int perf_event__synthesize_mmap_events(struct perf_tool *tool,
				       union perf_event *event,
				       pid_t pid, pid_t tgid,
				       perf_event__handler_t process,
				       struct machine *machine,
261 262
				       bool mmap_data,
				       unsigned int proc_map_timeout)
263 264 265
{
	char filename[PATH_MAX];
	FILE *fp;
266 267
	unsigned long long t;
	bool truncation = false;
268
	unsigned long long timeout = proc_map_timeout * 1000000ULL;
269
	int rc = 0;
270 271
	const char *hugetlbfs_mnt = hugetlbfs__mountpoint();
	int hugetlbfs_mnt_len = hugetlbfs_mnt ? strlen(hugetlbfs_mnt) : 0;
272

273 274 275
	if (machine__is_default_guest(machine))
		return 0;

276 277
	snprintf(filename, sizeof(filename), "%s/proc/%d/maps",
		 machine->root_dir, pid);
278 279 280 281 282 283 284 285 286 287

	fp = fopen(filename, "r");
	if (fp == NULL) {
		/*
		 * We raced with a task exiting - just return:
		 */
		pr_debug("couldn't open %s\n", filename);
		return -1;
	}

288
	event->header.type = PERF_RECORD_MMAP2;
289
	t = rdclock();
290

291
	while (1) {
292 293 294 295
		char bf[BUFSIZ];
		char prot[5];
		char execname[PATH_MAX];
		char anonstr[] = "//anon";
296
		unsigned int ino;
297
		size_t size;
298
		ssize_t n;
299

300 301 302
		if (fgets(bf, sizeof(bf), fp) == NULL)
			break;

303 304 305 306 307
		if ((rdclock() - t) > timeout) {
			pr_warning("Reading %s time out. "
				   "You may want to increase "
				   "the time limit by --proc-map-timeout\n",
				   filename);
308 309 310 311
			truncation = true;
			goto out;
		}

312 313 314
		/* ensure null termination since stack will be reused. */
		strcpy(execname, "");

315
		/* 00400000-0040c000 r-xp 00000000 fd:01 41038  /bin/cat */
316
		n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %[^\n]\n",
317 318 319 320 321
		       &event->mmap2.start, &event->mmap2.len, prot,
		       &event->mmap2.pgoff, &event->mmap2.maj,
		       &event->mmap2.min,
		       &ino, execname);

322 323 324
		/*
 		 * Anon maps don't have the execname.
 		 */
325
		if (n < 7)
326
			continue;
327 328 329

		event->mmap2.ino = (u64)ino;

330 331 332
		/*
		 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
		 */
333 334 335 336
		if (machine__is_host(machine))
			event->header.misc = PERF_RECORD_MISC_USER;
		else
			event->header.misc = PERF_RECORD_MISC_GUEST_USER;
337

338 339 340 341 342 343 344 345 346 347 348 349 350 351 352
		/* map protection and flags bits */
		event->mmap2.prot = 0;
		event->mmap2.flags = 0;
		if (prot[0] == 'r')
			event->mmap2.prot |= PROT_READ;
		if (prot[1] == 'w')
			event->mmap2.prot |= PROT_WRITE;
		if (prot[2] == 'x')
			event->mmap2.prot |= PROT_EXEC;

		if (prot[3] == 's')
			event->mmap2.flags |= MAP_SHARED;
		else
			event->mmap2.flags |= MAP_PRIVATE;

353 354 355 356 357 358
		if (prot[2] != 'x') {
			if (!mmap_data || prot[0] != 'r')
				continue;

			event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
		}
359

360 361 362 363
out:
		if (truncation)
			event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT;

364 365
		if (!strcmp(execname, ""))
			strcpy(execname, anonstr);
366

367 368
		if (hugetlbfs_mnt_len &&
		    !strncmp(execname, hugetlbfs_mnt, hugetlbfs_mnt_len)) {
369 370 371
			strcpy(execname, anonstr);
			event->mmap2.flags |= MAP_HUGETLB;
		}
372 373

		size = strlen(execname) + 1;
374
		memcpy(event->mmap2.filename, execname, size);
375
		size = PERF_ALIGN(size, sizeof(u64));
376 377 378 379 380 381 382
		event->mmap2.len -= event->mmap.start;
		event->mmap2.header.size = (sizeof(event->mmap2) -
					(sizeof(event->mmap2.filename) - size));
		memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
		event->mmap2.header.size += machine->id_hdr_size;
		event->mmap2.pid = tgid;
		event->mmap2.tid = pid;
383

384
		if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
385 386
			rc = -1;
			break;
387
		}
388 389 390

		if (truncation)
			break;
391 392 393
	}

	fclose(fp);
394
	return rc;
395 396
}

397
int perf_event__synthesize_modules(struct perf_tool *tool,
398
				   perf_event__handler_t process,
399
				   struct machine *machine)
400
{
401
	int rc = 0;
402
	struct map *pos;
403
	struct map_groups *kmaps = &machine->kmaps;
404
	struct maps *maps = &kmaps->maps[MAP__FUNCTION];
405
	union perf_event *event = zalloc((sizeof(event->mmap) +
406
					  machine->id_hdr_size));
407 408 409 410 411 412 413
	if (event == NULL) {
		pr_debug("Not enough memory synthesizing mmap event "
			 "for kernel modules\n");
		return -1;
	}

	event->header.type = PERF_RECORD_MMAP;
414

415 416 417 418
	/*
	 * kernel uses 0 for user space maps, see kernel/perf_event.c
	 * __perf_event_mmap
	 */
419
	if (machine__is_host(machine))
420
		event->header.misc = PERF_RECORD_MISC_KERNEL;
421
	else
422
		event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
423

424
	for (pos = maps__first(maps); pos; pos = map__next(pos)) {
425 426
		size_t size;

427
		if (__map__is_kernel(pos))
428 429
			continue;

430
		size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
431 432 433
		event->mmap.header.type = PERF_RECORD_MMAP;
		event->mmap.header.size = (sizeof(event->mmap) -
				        (sizeof(event->mmap.filename) - size));
434 435
		memset(event->mmap.filename + size, 0, machine->id_hdr_size);
		event->mmap.header.size += machine->id_hdr_size;
436 437 438 439 440
		event->mmap.start = pos->start;
		event->mmap.len   = pos->end - pos->start;
		event->mmap.pid   = machine->pid;

		memcpy(event->mmap.filename, pos->dso->long_name,
441
		       pos->dso->long_name_len + 1);
442
		if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
443 444 445
			rc = -1;
			break;
		}
446 447
	}

448
	free(event);
449
	return rc;
450 451
}

452 453
static int __event__synthesize_thread(union perf_event *comm_event,
				      union perf_event *mmap_event,
454
				      union perf_event *fork_event,
455 456
				      pid_t pid, int full,
					  perf_event__handler_t process,
457
				      struct perf_tool *tool,
458 459 460
				      struct machine *machine,
				      bool mmap_data,
				      unsigned int proc_map_timeout)
461
{
462 463
	char filename[PATH_MAX];
	DIR *tasks;
464
	struct dirent *dirent;
465
	pid_t tgid, ppid;
466
	int rc = 0;
467 468 469 470 471 472 473 474 475 476

	/* special case: only send one comm event using passed in pid */
	if (!full) {
		tgid = perf_event__synthesize_comm(tool, comm_event, pid,
						   process, machine);

		if (tgid == -1)
			return -1;

		return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
477 478
							  process, machine, mmap_data,
							  proc_map_timeout);
479 480 481 482 483 484 485 486 487 488 489 490 491 492
	}

	if (machine__is_default_guest(machine))
		return 0;

	snprintf(filename, sizeof(filename), "%s/proc/%d/task",
		 machine->root_dir, pid);

	tasks = opendir(filename);
	if (tasks == NULL) {
		pr_debug("couldn't open %s\n", filename);
		return 0;
	}

493
	while ((dirent = readdir(tasks)) != NULL) {
494 495 496
		char *end;
		pid_t _pid;

497
		_pid = strtol(dirent->d_name, &end, 10);
498 499 500
		if (*end)
			continue;

501
		rc = -1;
502 503
		if (perf_event__prepare_comm(comm_event, _pid, machine,
					     &tgid, &ppid) != 0)
504
			break;
505

506
		if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
507
						ppid, process, machine) < 0)
508
			break;
509 510 511
		/*
		 * Send the prepared comm event
		 */
512
		if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0)
513
			break;
514

515
		rc = 0;
516 517 518
		if (_pid == pid) {
			/* process the parent's maps too */
			rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
519
						process, machine, mmap_data, proc_map_timeout);
520 521
			if (rc)
				break;
522
		}
523 524 525
	}

	closedir(tasks);
526
	return rc;
527 528
}

529
int perf_event__synthesize_thread_map(struct perf_tool *tool,
530
				      struct thread_map *threads,
531
				      perf_event__handler_t process,
532
				      struct machine *machine,
533 534
				      bool mmap_data,
				      unsigned int proc_map_timeout)
535
{
536
	union perf_event *comm_event, *mmap_event, *fork_event;
537
	int err = -1, thread, j;
538

539
	comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
540 541 542
	if (comm_event == NULL)
		goto out;

543
	mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
544 545 546
	if (mmap_event == NULL)
		goto out_free_comm;

547 548 549 550
	fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
	if (fork_event == NULL)
		goto out_free_mmap;

551 552 553
	err = 0;
	for (thread = 0; thread < threads->nr; ++thread) {
		if (__event__synthesize_thread(comm_event, mmap_event,
554
					       fork_event,
555
					       thread_map__pid(threads, thread), 0,
556
					       process, tool, machine,
557
					       mmap_data, proc_map_timeout)) {
558 559 560
			err = -1;
			break;
		}
561 562 563 564 565

		/*
		 * comm.pid is set to thread group id by
		 * perf_event__synthesize_comm
		 */
566
		if ((int) comm_event->comm.pid != thread_map__pid(threads, thread)) {
567 568 569 570
			bool need_leader = true;

			/* is thread group leader in thread_map? */
			for (j = 0; j < threads->nr; ++j) {
571
				if ((int) comm_event->comm.pid == thread_map__pid(threads, j)) {
572 573 574 575 576 577 578
					need_leader = false;
					break;
				}
			}

			/* if not, generate events for it */
			if (need_leader &&
579
			    __event__synthesize_thread(comm_event, mmap_event,
580
						       fork_event,
581 582
						       comm_event->comm.pid, 0,
						       process, tool, machine,
583
						       mmap_data, proc_map_timeout)) {
584 585 586 587
				err = -1;
				break;
			}
		}
588
	}
589 590
	free(fork_event);
out_free_mmap:
591 592 593 594 595 596 597
	free(mmap_event);
out_free_comm:
	free(comm_event);
out:
	return err;
}

598
int perf_event__synthesize_threads(struct perf_tool *tool,
599
				   perf_event__handler_t process,
600 601 602
				   struct machine *machine,
				   bool mmap_data,
				   unsigned int proc_map_timeout)
603 604
{
	DIR *proc;
605
	char proc_path[PATH_MAX];
606
	struct dirent *dirent;
607
	union perf_event *comm_event, *mmap_event, *fork_event;
608 609
	int err = -1;

610 611 612
	if (machine__is_default_guest(machine))
		return 0;

613
	comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
614 615 616
	if (comm_event == NULL)
		goto out;

617
	mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
618 619
	if (mmap_event == NULL)
		goto out_free_comm;
620

621 622 623 624
	fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
	if (fork_event == NULL)
		goto out_free_mmap;

625 626 627
	snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
	proc = opendir(proc_path);

628
	if (proc == NULL)
629
		goto out_free_fork;
630

631
	while ((dirent = readdir(proc)) != NULL) {
632
		char *end;
633
		pid_t pid = strtol(dirent->d_name, &end, 10);
634 635 636

		if (*end) /* only interested in proper numerical dirents */
			continue;
637 638 639 640
		/*
 		 * We may race with exiting thread, so don't stop just because
 		 * one thread couldn't be synthesized.
 		 */
641
		__event__synthesize_thread(comm_event, mmap_event, fork_event, pid,
642 643
					   1, process, tool, machine, mmap_data,
					   proc_map_timeout);
644 645
	}

646
	err = 0;
647
	closedir(proc);
648 649
out_free_fork:
	free(fork_event);
650 651 652 653 654 655
out_free_mmap:
	free(mmap_event);
out_free_comm:
	free(comm_event);
out:
	return err;
656
}
657

658 659 660 661 662
struct process_symbol_args {
	const char *name;
	u64	   start;
};

663
static int find_symbol_cb(void *arg, const char *name, char type,
664
			  u64 start)
665 666 667
{
	struct process_symbol_args *args = arg;

668 669 670 671 672 673
	/*
	 * Must be a function or at least an alias, as in PARISC64, where "_text" is
	 * an 'A' to the same address as "_stext".
	 */
	if (!(symbol_type__is_a(type, MAP__FUNCTION) ||
	      type == 'A') || strcmp(name, args->name))
674 675 676 677 678 679
		return 0;

	args->start = start;
	return 1;
}

680 681 682 683 684 685 686 687 688 689 690
u64 kallsyms__get_function_start(const char *kallsyms_filename,
				 const char *symbol_name)
{
	struct process_symbol_args args = { .name = symbol_name, };

	if (kallsyms__parse(kallsyms_filename, &args, find_symbol_cb) <= 0)
		return 0;

	return args.start;
}

691
int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
692
				       perf_event__handler_t process,
693
				       struct machine *machine)
694 695
{
	size_t size;
696
	const char *mmap_name;
697
	char name_buff[PATH_MAX];
698
	struct map *map = machine__kernel_map(machine);
699
	struct kmap *kmap;
700
	int err;
701 702
	union perf_event *event;

703 704
	if (symbol_conf.kptr_restrict)
		return -1;
705
	if (map == NULL)
706 707
		return -1;

708 709 710 711 712
	/*
	 * We should get this from /sys/kernel/sections/.text, but till that is
	 * available use this, and after it is use this as a fallback for older
	 * kernels.
	 */
713
	event = zalloc((sizeof(event->mmap) + machine->id_hdr_size));
714 715 716 717 718
	if (event == NULL) {
		pr_debug("Not enough memory synthesizing mmap event "
			 "for kernel modules\n");
		return -1;
	}
719

720
	mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff));
721
	if (machine__is_host(machine)) {
722 723 724 725
		/*
		 * kernel uses PERF_RECORD_MISC_USER for user space maps,
		 * see kernel/perf_event.c __perf_event_mmap
		 */
726
		event->header.misc = PERF_RECORD_MISC_KERNEL;
727
	} else {
728
		event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
729
	}
730

731
	kmap = map__kmap(map);
732
	size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
733
			"%s%s", mmap_name, kmap->ref_reloc_sym->name) + 1;
734
	size = PERF_ALIGN(size, sizeof(u64));
735 736
	event->mmap.header.type = PERF_RECORD_MMAP;
	event->mmap.header.size = (sizeof(event->mmap) -
737
			(sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
738
	event->mmap.pgoff = kmap->ref_reloc_sym->addr;
739 740 741 742
	event->mmap.start = map->start;
	event->mmap.len   = map->end - event->mmap.start;
	event->mmap.pid   = machine->pid;

743
	err = perf_tool__process_synth_event(tool, event, machine, process);
744 745 746
	free(event);

	return err;
747 748
}

749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784
int perf_event__synthesize_thread_map2(struct perf_tool *tool,
				      struct thread_map *threads,
				      perf_event__handler_t process,
				      struct machine *machine)
{
	union perf_event *event;
	int i, err, size;

	size  = sizeof(event->thread_map);
	size +=	threads->nr * sizeof(event->thread_map.entries[0]);

	event = zalloc(size);
	if (!event)
		return -ENOMEM;

	event->header.type = PERF_RECORD_THREAD_MAP;
	event->header.size = size;
	event->thread_map.nr = threads->nr;

	for (i = 0; i < threads->nr; i++) {
		struct thread_map_event_entry *entry = &event->thread_map.entries[i];
		char *comm = thread_map__comm(threads, i);

		if (!comm)
			comm = (char *) "";

		entry->pid = thread_map__pid(threads, i);
		strncpy((char *) &entry->comm, comm, sizeof(entry->comm));
	}

	err = process(tool, event, NULL, machine);

	free(event);
	return err;
}

785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915
static void synthesize_cpus(struct cpu_map_entries *cpus,
			    struct cpu_map *map)
{
	int i;

	cpus->nr = map->nr;

	for (i = 0; i < map->nr; i++)
		cpus->cpu[i] = map->map[i];
}

static void synthesize_mask(struct cpu_map_mask *mask,
			    struct cpu_map *map, int max)
{
	int i;

	mask->nr = BITS_TO_LONGS(max);
	mask->long_size = sizeof(long);

	for (i = 0; i < map->nr; i++)
		set_bit(map->map[i], mask->mask);
}

static size_t cpus_size(struct cpu_map *map)
{
	return sizeof(struct cpu_map_entries) + map->nr * sizeof(u16);
}

static size_t mask_size(struct cpu_map *map, int *max)
{
	int i;

	*max = 0;

	for (i = 0; i < map->nr; i++) {
		/* bit possition of the cpu is + 1 */
		int bit = map->map[i] + 1;

		if (bit > *max)
			*max = bit;
	}

	return sizeof(struct cpu_map_mask) + BITS_TO_LONGS(*max) * sizeof(long);
}

void *cpu_map_data__alloc(struct cpu_map *map, size_t *size, u16 *type, int *max)
{
	size_t size_cpus, size_mask;
	bool is_dummy = cpu_map__empty(map);

	/*
	 * Both array and mask data have variable size based
	 * on the number of cpus and their actual values.
	 * The size of the 'struct cpu_map_data' is:
	 *
	 *   array = size of 'struct cpu_map_entries' +
	 *           number of cpus * sizeof(u64)
	 *
	 *   mask  = size of 'struct cpu_map_mask' +
	 *           maximum cpu bit converted to size of longs
	 *
	 * and finaly + the size of 'struct cpu_map_data'.
	 */
	size_cpus = cpus_size(map);
	size_mask = mask_size(map, max);

	if (is_dummy || (size_cpus < size_mask)) {
		*size += size_cpus;
		*type  = PERF_CPU_MAP__CPUS;
	} else {
		*size += size_mask;
		*type  = PERF_CPU_MAP__MASK;
	}

	*size += sizeof(struct cpu_map_data);
	return zalloc(*size);
}

void cpu_map_data__synthesize(struct cpu_map_data *data, struct cpu_map *map,
			      u16 type, int max)
{
	data->type = type;

	switch (type) {
	case PERF_CPU_MAP__CPUS:
		synthesize_cpus((struct cpu_map_entries *) data->data, map);
		break;
	case PERF_CPU_MAP__MASK:
		synthesize_mask((struct cpu_map_mask *) data->data, map, max);
	default:
		break;
	};
}

static struct cpu_map_event* cpu_map_event__new(struct cpu_map *map)
{
	size_t size = sizeof(struct cpu_map_event);
	struct cpu_map_event *event;
	int max;
	u16 type;

	event = cpu_map_data__alloc(map, &size, &type, &max);
	if (!event)
		return NULL;

	event->header.type = PERF_RECORD_CPU_MAP;
	event->header.size = size;
	event->data.type   = type;

	cpu_map_data__synthesize(&event->data, map, type, max);
	return event;
}

int perf_event__synthesize_cpu_map(struct perf_tool *tool,
				   struct cpu_map *map,
				   perf_event__handler_t process,
				   struct machine *machine)
{
	struct cpu_map_event *event;
	int err;

	event = cpu_map_event__new(map);
	if (!event)
		return -ENOMEM;

	err = process(tool, (union perf_event *) event, NULL, machine);

	free(event);
	return err;
}

916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953
int perf_event__synthesize_stat_config(struct perf_tool *tool,
				       struct perf_stat_config *config,
				       perf_event__handler_t process,
				       struct machine *machine)
{
	struct stat_config_event *event;
	int size, i = 0, err;

	size  = sizeof(*event);
	size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0]));

	event = zalloc(size);
	if (!event)
		return -ENOMEM;

	event->header.type = PERF_RECORD_STAT_CONFIG;
	event->header.size = size;
	event->nr          = PERF_STAT_CONFIG_TERM__MAX;

#define ADD(__term, __val)					\
	event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term;	\
	event->data[i].val = __val;				\
	i++;

	ADD(AGGR_MODE,	config->aggr_mode)
	ADD(INTERVAL,	config->interval)
	ADD(SCALE,	config->scale)

	WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX,
		  "stat config terms unbalanced\n");
#undef ADD

	err = process(tool, (union perf_event *) event, NULL, machine);

	free(event);
	return err;
}

954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975
int perf_event__synthesize_stat(struct perf_tool *tool,
				u32 cpu, u32 thread, u64 id,
				struct perf_counts_values *count,
				perf_event__handler_t process,
				struct machine *machine)
{
	struct stat_event event;

	event.header.type = PERF_RECORD_STAT;
	event.header.size = sizeof(event);
	event.header.misc = 0;

	event.id        = id;
	event.cpu       = cpu;
	event.thread    = thread;
	event.val       = count->val;
	event.ena       = count->ena;
	event.run       = count->run;

	return process(tool, (union perf_event *) &event, NULL, machine);
}

976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992
int perf_event__synthesize_stat_round(struct perf_tool *tool,
				      u64 evtime, u64 type,
				      perf_event__handler_t process,
				      struct machine *machine)
{
	struct stat_round_event event;

	event.header.type = PERF_RECORD_STAT_ROUND;
	event.header.size = sizeof(event);
	event.header.misc = 0;

	event.time = evtime;
	event.type = type;

	return process(tool, (union perf_event *) &event, NULL, machine);
}

993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016
void perf_event__read_stat_config(struct perf_stat_config *config,
				  struct stat_config_event *event)
{
	unsigned i;

	for (i = 0; i < event->nr; i++) {

		switch (event->data[i].tag) {
#define CASE(__term, __val)					\
		case PERF_STAT_CONFIG_TERM__##__term:		\
			config->__val = event->data[i].val;	\
			break;

		CASE(AGGR_MODE, aggr_mode)
		CASE(SCALE,     scale)
		CASE(INTERVAL,  interval)
#undef CASE
		default:
			pr_warning("unknown stat config term %" PRIu64 "\n",
				   event->data[i].tag);
		}
	}
}

1017 1018
size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp)
{
1019 1020 1021 1022 1023 1024 1025
	const char *s;

	if (event->header.misc & PERF_RECORD_MISC_COMM_EXEC)
		s = " exec";
	else
		s = "";

1026
	return fprintf(fp, "%s: %s:%d/%d\n", s, event->comm.comm, event->comm.pid, event->comm.tid);
1027 1028
}

1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055
size_t perf_event__fprintf_namespaces(union perf_event *event, FILE *fp)
{
	size_t ret = 0;
	struct perf_ns_link_info *ns_link_info;
	u32 nr_namespaces, idx;

	ns_link_info = event->namespaces.link_info;
	nr_namespaces = event->namespaces.nr_namespaces;

	ret += fprintf(fp, " %d/%d - nr_namespaces: %u\n\t\t[",
		       event->namespaces.pid,
		       event->namespaces.tid,
		       nr_namespaces);

	for (idx = 0; idx < nr_namespaces; idx++) {
		if (idx && (idx % 4 == 0))
			ret += fprintf(fp, "\n\t\t ");

		ret  += fprintf(fp, "%u/%s: %" PRIu64 "/%#" PRIx64 "%s", idx,
				perf_ns__name(idx), (u64)ns_link_info[idx].dev,
				(u64)ns_link_info[idx].ino,
				((idx + 1) != nr_namespaces) ? ", " : "]\n");
	}

	return ret;
}

1056
int perf_event__process_comm(struct perf_tool *tool __maybe_unused,
1057
			     union perf_event *event,
1058
			     struct perf_sample *sample,
1059
			     struct machine *machine)
1060
{
1061
	return machine__process_comm_event(machine, event, sample);
1062 1063
}

1064 1065 1066 1067 1068 1069 1070 1071
int perf_event__process_namespaces(struct perf_tool *tool __maybe_unused,
				   union perf_event *event,
				   struct perf_sample *sample,
				   struct machine *machine)
{
	return machine__process_namespaces_event(machine, event, sample);
}

1072
int perf_event__process_lost(struct perf_tool *tool __maybe_unused,
1073
			     union perf_event *event,
1074
			     struct perf_sample *sample,
1075
			     struct machine *machine)
1076
{
1077
	return machine__process_lost_event(machine, event, sample);
1078
}
1079

1080 1081 1082 1083 1084 1085 1086 1087
int perf_event__process_aux(struct perf_tool *tool __maybe_unused,
			    union perf_event *event,
			    struct perf_sample *sample __maybe_unused,
			    struct machine *machine)
{
	return machine__process_aux_event(machine, event);
}

1088 1089 1090 1091 1092 1093 1094 1095
int perf_event__process_itrace_start(struct perf_tool *tool __maybe_unused,
				     union perf_event *event,
				     struct perf_sample *sample __maybe_unused,
				     struct machine *machine)
{
	return machine__process_itrace_start_event(machine, event);
}

1096 1097 1098 1099 1100 1101 1102 1103
int perf_event__process_lost_samples(struct perf_tool *tool __maybe_unused,
				     union perf_event *event,
				     struct perf_sample *sample,
				     struct machine *machine)
{
	return machine__process_lost_samples_event(machine, event, sample);
}

1104 1105 1106 1107 1108 1109 1110 1111
int perf_event__process_switch(struct perf_tool *tool __maybe_unused,
			       union perf_event *event,
			       struct perf_sample *sample __maybe_unused,
			       struct machine *machine)
{
	return machine__process_switch_event(machine, event);
}

1112 1113
size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
{
1114
	return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %c %s\n",
1115
		       event->mmap.pid, event->mmap.tid, event->mmap.start,
1116 1117 1118
		       event->mmap.len, event->mmap.pgoff,
		       (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x',
		       event->mmap.filename);
1119 1120
}

1121 1122 1123
size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp)
{
	return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64
1124
			   " %02x:%02x %"PRIu64" %"PRIu64"]: %c%c%c%c %s\n",
1125 1126 1127 1128
		       event->mmap2.pid, event->mmap2.tid, event->mmap2.start,
		       event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj,
		       event->mmap2.min, event->mmap2.ino,
		       event->mmap2.ino_generation,
1129 1130 1131 1132
		       (event->mmap2.prot & PROT_READ) ? 'r' : '-',
		       (event->mmap2.prot & PROT_WRITE) ? 'w' : '-',
		       (event->mmap2.prot & PROT_EXEC) ? 'x' : '-',
		       (event->mmap2.flags & MAP_SHARED) ? 's' : 'p',
1133 1134 1135
		       event->mmap2.filename);
}

1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151
size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp)
{
	struct thread_map *threads = thread_map__new_event(&event->thread_map);
	size_t ret;

	ret = fprintf(fp, " nr: ");

	if (threads)
		ret += thread_map__fprintf(threads, fp);
	else
		ret += fprintf(fp, "failed to get threads from event\n");

	thread_map__put(threads);
	return ret;
}

1152 1153 1154 1155 1156
size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp)
{
	struct cpu_map *cpus = cpu_map__new_data(&event->cpu_map.data);
	size_t ret;

1157
	ret = fprintf(fp, ": ");
1158 1159 1160 1161 1162 1163 1164 1165 1166 1167

	if (cpus)
		ret += cpu_map__fprintf(cpus, fp);
	else
		ret += fprintf(fp, "failed to get cpumap from event\n");

	cpu_map__put(cpus);
	return ret;
}

1168
int perf_event__process_mmap(struct perf_tool *tool __maybe_unused,
1169
			     union perf_event *event,
1170
			     struct perf_sample *sample,
1171
			     struct machine *machine)
1172
{
1173
	return machine__process_mmap_event(machine, event, sample);
1174 1175
}

1176 1177
int perf_event__process_mmap2(struct perf_tool *tool __maybe_unused,
			     union perf_event *event,
1178
			     struct perf_sample *sample,
1179 1180
			     struct machine *machine)
{
1181
	return machine__process_mmap2_event(machine, event, sample);
1182 1183
}

1184 1185 1186 1187 1188 1189 1190
size_t perf_event__fprintf_task(union perf_event *event, FILE *fp)
{
	return fprintf(fp, "(%d:%d):(%d:%d)\n",
		       event->fork.pid, event->fork.tid,
		       event->fork.ppid, event->fork.ptid);
}

1191
int perf_event__process_fork(struct perf_tool *tool __maybe_unused,
1192
			     union perf_event *event,
1193
			     struct perf_sample *sample,
1194
			     struct machine *machine)
1195
{
1196
	return machine__process_fork_event(machine, event, sample);
1197
}
1198

1199 1200
int perf_event__process_exit(struct perf_tool *tool __maybe_unused,
			     union perf_event *event,
1201
			     struct perf_sample *sample,
1202 1203
			     struct machine *machine)
{
1204
	return machine__process_exit_event(machine, event, sample);
1205 1206
}

1207 1208 1209 1210 1211 1212 1213 1214 1215
size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp)
{
	return fprintf(fp, " offset: %#"PRIx64" size: %#"PRIx64" flags: %#"PRIx64" [%s%s]\n",
		       event->aux.aux_offset, event->aux.aux_size,
		       event->aux.flags,
		       event->aux.flags & PERF_AUX_FLAG_TRUNCATED ? "T" : "",
		       event->aux.flags & PERF_AUX_FLAG_OVERWRITE ? "O" : "");
}

1216 1217 1218 1219 1220 1221
size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp)
{
	return fprintf(fp, " pid: %u tid: %u\n",
		       event->itrace_start.pid, event->itrace_start.tid);
}

1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235
size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp)
{
	bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
	const char *in_out = out ? "OUT" : "IN ";

	if (event->header.type == PERF_RECORD_SWITCH)
		return fprintf(fp, " %s\n", in_out);

	return fprintf(fp, " %s  %s pid/tid: %5u/%-5u\n",
		       in_out, out ? "next" : "prev",
		       event->context_switch.next_prev_pid,
		       event->context_switch.next_prev_tid);
}

1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250
size_t perf_event__fprintf(union perf_event *event, FILE *fp)
{
	size_t ret = fprintf(fp, "PERF_RECORD_%s",
			     perf_event__name(event->header.type));

	switch (event->header.type) {
	case PERF_RECORD_COMM:
		ret += perf_event__fprintf_comm(event, fp);
		break;
	case PERF_RECORD_FORK:
	case PERF_RECORD_EXIT:
		ret += perf_event__fprintf_task(event, fp);
		break;
	case PERF_RECORD_MMAP:
		ret += perf_event__fprintf_mmap(event, fp);
1251 1252 1253
		break;
	case PERF_RECORD_NAMESPACES:
		ret += perf_event__fprintf_namespaces(event, fp);
1254
		break;
1255 1256 1257
	case PERF_RECORD_MMAP2:
		ret += perf_event__fprintf_mmap2(event, fp);
		break;
1258 1259 1260
	case PERF_RECORD_AUX:
		ret += perf_event__fprintf_aux(event, fp);
		break;
1261 1262 1263
	case PERF_RECORD_ITRACE_START:
		ret += perf_event__fprintf_itrace_start(event, fp);
		break;
1264 1265 1266 1267
	case PERF_RECORD_SWITCH:
	case PERF_RECORD_SWITCH_CPU_WIDE:
		ret += perf_event__fprintf_switch(event, fp);
		break;
1268 1269 1270 1271 1272 1273 1274
	default:
		ret += fprintf(fp, "\n");
	}

	return ret;
}

1275 1276
int perf_event__process(struct perf_tool *tool __maybe_unused,
			union perf_event *event,
1277
			struct perf_sample *sample,
1278
			struct machine *machine)
1279
{
1280
	return machine__process_event(machine, event, sample);
1281 1282
}

1283
void thread__find_addr_map(struct thread *thread, u8 cpumode,
1284
			   enum map_type type, u64 addr,
1285
			   struct addr_location *al)
1286
{
1287
	struct map_groups *mg = thread->mg;
1288
	struct machine *machine = mg->machine;
1289
	bool load_map = false;
1290

1291
	al->machine = machine;
1292
	al->thread = thread;
1293
	al->addr = addr;
1294
	al->cpumode = cpumode;
1295
	al->filtered = 0;
1296

1297 1298 1299 1300 1301
	if (machine == NULL) {
		al->map = NULL;
		return;
	}

1302
	if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
1303
		al->level = 'k';
1304
		mg = &machine->kmaps;
1305
		load_map = true;
1306
	} else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
1307
		al->level = '.';
1308 1309
	} else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
		al->level = 'g';
1310
		mg = &machine->kmaps;
1311
		load_map = true;
1312 1313
	} else if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) {
		al->level = 'u';
1314
	} else {
1315
		al->level = 'H';
1316
		al->map = NULL;
1317 1318 1319 1320

		if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
			cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
			!perf_guest)
1321
			al->filtered |= (1 << HIST_FILTER__GUEST);
1322 1323 1324
		if ((cpumode == PERF_RECORD_MISC_USER ||
			cpumode == PERF_RECORD_MISC_KERNEL) &&
			!perf_host)
1325
			al->filtered |= (1 << HIST_FILTER__HOST);
1326

1327 1328 1329
		return;
	}
try_again:
1330
	al->map = map_groups__find(mg, type, al->addr);
1331 1332 1333 1334 1335 1336 1337 1338 1339 1340
	if (al->map == NULL) {
		/*
		 * If this is outside of all known maps, and is a negative
		 * address, try to look it up in the kernel dso, as it might be
		 * a vsyscall or vdso (which executes in user-mode).
		 *
		 * XXX This is nasty, we should have a symbol list in the
		 * "[vdso]" dso, but for now lets use the old trick of looking
		 * in the whole kernel symbol list.
		 */
1341 1342 1343
		if (cpumode == PERF_RECORD_MISC_USER && machine &&
		    mg != &machine->kmaps &&
		    machine__kernel_ip(machine, al->addr)) {
1344
			mg = &machine->kmaps;
1345
			load_map = true;
1346 1347
			goto try_again;
		}
1348 1349 1350 1351 1352 1353
	} else {
		/*
		 * Kernel maps might be changed when loading symbols so loading
		 * must be done prior to using kernel maps.
		 */
		if (load_map)
1354
			map__load(al->map);
1355
		al->addr = al->map->map_ip(al->map, al->addr);
1356
	}
1357 1358
}

1359
void thread__find_addr_location(struct thread *thread,
1360
				u8 cpumode, enum map_type type, u64 addr,
1361
				struct addr_location *al)
1362
{
1363
	thread__find_addr_map(thread, cpumode, type, addr, al);
1364
	if (al->map != NULL)
1365
		al->sym = map__find_symbol(al->map, al->addr);
1366 1367
	else
		al->sym = NULL;
1368 1369
}

1370 1371 1372 1373
/*
 * Callers need to drop the reference to al->thread, obtained in
 * machine__findnew_thread()
 */
1374 1375
int machine__resolve(struct machine *machine, struct addr_location *al,
		     struct perf_sample *sample)
1376
{
1377
	struct thread *thread = machine__findnew_thread(machine, sample->pid,
1378
							sample->tid);
1379

1380 1381 1382
	if (thread == NULL)
		return -1;

1383
	dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
1384
	/*
1385
	 * Have we already created the kernel maps for this machine?
1386 1387 1388 1389 1390
	 *
	 * This should have happened earlier, when we processed the kernel MMAP
	 * events, but for older perf.data files there was no such thing, so do
	 * it now.
	 */
1391
	if (sample->cpumode == PERF_RECORD_MISC_KERNEL &&
1392
	    machine__kernel_map(machine) == NULL)
1393
		machine__create_kernel_maps(machine);
1394

1395
	thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, sample->ip, al);
1396 1397 1398
	dump_printf(" ...... dso: %s\n",
		    al->map ? al->map->dso->long_name :
			al->level == 'H' ? "[hypervisor]" : "<not found>");
1399 1400 1401 1402

	if (thread__is_filtered(thread))
		al->filtered |= (1 << HIST_FILTER__THREAD);

1403
	al->sym = NULL;
1404
	al->cpu = sample->cpu;
1405 1406 1407 1408 1409 1410 1411 1412
	al->socket = -1;

	if (al->cpu >= 0) {
		struct perf_env *env = machine->env;

		if (env && env->cpu)
			al->socket = env->cpu[al->cpu].socket_id;
	}
1413 1414

	if (al->map) {
1415 1416
		struct dso *dso = al->map->dso;

1417
		if (symbol_conf.dso_list &&
1418 1419 1420 1421
		    (!dso || !(strlist__has_entry(symbol_conf.dso_list,
						  dso->short_name) ||
			       (dso->short_name != dso->long_name &&
				strlist__has_entry(symbol_conf.dso_list,
1422 1423 1424
						   dso->long_name))))) {
			al->filtered |= (1 << HIST_FILTER__DSO);
		}
1425

1426
		al->sym = map__find_symbol(al->map, al->addr);
1427
	}
1428

1429 1430
	if (symbol_conf.sym_list &&
		(!al->sym || !strlist__has_entry(symbol_conf.sym_list,
1431 1432 1433
						al->sym->name))) {
		al->filtered |= (1 << HIST_FILTER__SYMBOL);
	}
1434 1435

	return 0;
1436
}
1437

1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448
/*
 * The preprocess_sample method will return with reference counts for the
 * in it, when done using (and perhaps getting ref counts if needing to
 * keep a pointer to one of those entries) it must be paired with
 * addr_location__put(), so that the refcounts can be decremented.
 */
void addr_location__put(struct addr_location *al)
{
	thread__zput(al->thread);
}

1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469
bool is_bts_event(struct perf_event_attr *attr)
{
	return attr->type == PERF_TYPE_HARDWARE &&
	       (attr->config & PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
	       attr->sample_period == 1;
}

bool sample_addr_correlates_sym(struct perf_event_attr *attr)
{
	if (attr->type == PERF_TYPE_SOFTWARE &&
	    (attr->config == PERF_COUNT_SW_PAGE_FAULTS ||
	     attr->config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
	     attr->config == PERF_COUNT_SW_PAGE_FAULTS_MAJ))
		return true;

	if (is_bts_event(attr))
		return true;

	return false;
}

1470 1471
void thread__resolve(struct thread *thread, struct addr_location *al,
		     struct perf_sample *sample)
1472
{
1473
	thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, sample->addr, al);
1474
	if (!al->map)
1475
		thread__find_addr_map(thread, sample->cpumode, MAP__VARIABLE,
1476 1477 1478 1479 1480 1481
				      sample->addr, al);

	al->cpu = sample->cpu;
	al->sym = NULL;

	if (al->map)
1482
		al->sym = map__find_symbol(al->map, al->addr);
1483
}