event.c 35.1 KB
Newer Older
1
#include <linux/types.h>
2
#include <sys/mman.h>
3 4
#include "event.h"
#include "debug.h"
5
#include "hist.h"
6
#include "machine.h"
7
#include "sort.h"
8
#include "string.h"
9
#include "strlist.h"
10
#include "thread.h"
11
#include "thread_map.h"
12
#include "symbol/kallsyms.h"
13 14
#include "asm/bug.h"
#include "stat.h"
15

16
static const char *perf_event__names[] = {
17 18
	[0]					= "TOTAL",
	[PERF_RECORD_MMAP]			= "MMAP",
19
	[PERF_RECORD_MMAP2]			= "MMAP2",
20 21 22 23 24 25 26 27
	[PERF_RECORD_LOST]			= "LOST",
	[PERF_RECORD_COMM]			= "COMM",
	[PERF_RECORD_EXIT]			= "EXIT",
	[PERF_RECORD_THROTTLE]			= "THROTTLE",
	[PERF_RECORD_UNTHROTTLE]		= "UNTHROTTLE",
	[PERF_RECORD_FORK]			= "FORK",
	[PERF_RECORD_READ]			= "READ",
	[PERF_RECORD_SAMPLE]			= "SAMPLE",
28
	[PERF_RECORD_AUX]			= "AUX",
29
	[PERF_RECORD_ITRACE_START]		= "ITRACE_START",
30
	[PERF_RECORD_LOST_SAMPLES]		= "LOST_SAMPLES",
31 32
	[PERF_RECORD_SWITCH]			= "SWITCH",
	[PERF_RECORD_SWITCH_CPU_WIDE]		= "SWITCH_CPU_WIDE",
33 34 35 36 37
	[PERF_RECORD_HEADER_ATTR]		= "ATTR",
	[PERF_RECORD_HEADER_EVENT_TYPE]		= "EVENT_TYPE",
	[PERF_RECORD_HEADER_TRACING_DATA]	= "TRACING_DATA",
	[PERF_RECORD_HEADER_BUILD_ID]		= "BUILD_ID",
	[PERF_RECORD_FINISHED_ROUND]		= "FINISHED_ROUND",
A
Adrian Hunter 已提交
38
	[PERF_RECORD_ID_INDEX]			= "ID_INDEX",
39 40
	[PERF_RECORD_AUXTRACE_INFO]		= "AUXTRACE_INFO",
	[PERF_RECORD_AUXTRACE]			= "AUXTRACE",
41
	[PERF_RECORD_AUXTRACE_ERROR]		= "AUXTRACE_ERROR",
42
	[PERF_RECORD_THREAD_MAP]		= "THREAD_MAP",
43
	[PERF_RECORD_CPU_MAP]			= "CPU_MAP",
44
	[PERF_RECORD_STAT_CONFIG]		= "STAT_CONFIG",
J
Jiri Olsa 已提交
45
	[PERF_RECORD_STAT]			= "STAT",
46
	[PERF_RECORD_STAT_ROUND]		= "STAT_ROUND",
47
	[PERF_RECORD_EVENT_UPDATE]		= "EVENT_UPDATE",
48 49
};

50
const char *perf_event__name(unsigned int id)
51
{
52
	if (id >= ARRAY_SIZE(perf_event__names))
53
		return "INVALID";
54
	if (!perf_event__names[id])
55
		return "UNKNOWN";
56
	return perf_event__names[id];
57 58
}

59
static struct perf_sample synth_sample = {
60 61 62 63 64 65 66 67
	.pid	   = -1,
	.tid	   = -1,
	.time	   = -1,
	.stream_id = -1,
	.cpu	   = -1,
	.period	   = 1,
};

68 69
/*
 * Assumes that the first 4095 bytes of /proc/pid/stat contains
70
 * the comm, tgid and ppid.
71
 */
72 73
static int perf_event__get_comm_ids(pid_t pid, char *comm, size_t len,
				    pid_t *tgid, pid_t *ppid)
74 75
{
	char filename[PATH_MAX];
76 77
	char bf[4096];
	int fd;
78 79
	size_t size = 0;
	ssize_t n;
80 81 82 83
	char *nl, *name, *tgids, *ppids;

	*tgid = -1;
	*ppid = -1;
84 85 86

	snprintf(filename, sizeof(filename), "/proc/%d/status", pid);

87 88
	fd = open(filename, O_RDONLY);
	if (fd < 0) {
89
		pr_debug("couldn't open %s\n", filename);
90
		return -1;
91 92
	}

93 94 95
	n = read(fd, bf, sizeof(bf) - 1);
	close(fd);
	if (n <= 0) {
96
		pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n",
97 98
			   pid);
		return -1;
99
	}
100
	bf[n] = '\0';
101

102 103
	name = strstr(bf, "Name:");
	tgids = strstr(bf, "Tgid:");
104
	ppids = strstr(bf, "PPid:");
105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126

	if (name) {
		name += 5;  /* strlen("Name:") */

		while (*name && isspace(*name))
			++name;

		nl = strchr(name, '\n');
		if (nl)
			*nl = '\0';

		size = strlen(name);
		if (size >= len)
			size = len - 1;
		memcpy(comm, name, size);
		comm[size] = '\0';
	} else {
		pr_debug("Name: string not found for pid %d\n", pid);
	}

	if (tgids) {
		tgids += 5;  /* strlen("Tgid:") */
127
		*tgid = atoi(tgids);
128 129 130
	} else {
		pr_debug("Tgid: string not found for pid %d\n", pid);
	}
131

132 133 134 135 136 137 138 139
	if (ppids) {
		ppids += 5;  /* strlen("PPid:") */
		*ppid = atoi(ppids);
	} else {
		pr_debug("PPid: string not found for pid %d\n", pid);
	}

	return 0;
140 141
}

142 143 144
static int perf_event__prepare_comm(union perf_event *event, pid_t pid,
				    struct machine *machine,
				    pid_t *tgid, pid_t *ppid)
145 146
{
	size_t size;
147 148

	*ppid = -1;
149 150 151

	memset(&event->comm, 0, sizeof(event->comm));

152 153 154 155 156 157 158 159 160
	if (machine__is_host(machine)) {
		if (perf_event__get_comm_ids(pid, event->comm.comm,
					     sizeof(event->comm.comm),
					     tgid, ppid) != 0) {
			return -1;
		}
	} else {
		*tgid = machine->pid;
	}
161

162 163
	if (*tgid < 0)
		return -1;
164

165
	event->comm.pid = *tgid;
166
	event->comm.header.type = PERF_RECORD_COMM;
167 168

	size = strlen(event->comm.comm) + 1;
169
	size = PERF_ALIGN(size, sizeof(u64));
170
	memset(event->comm.comm + size, 0, machine->id_hdr_size);
171 172
	event->comm.header.size = (sizeof(event->comm) -
				(sizeof(event->comm.comm) - size) +
173
				machine->id_hdr_size);
174
	event->comm.tid = pid;
175 176

	return 0;
177 178
}

179
pid_t perf_event__synthesize_comm(struct perf_tool *tool,
180 181 182 183
					 union perf_event *event, pid_t pid,
					 perf_event__handler_t process,
					 struct machine *machine)
{
184
	pid_t tgid, ppid;
185

186 187
	if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0)
		return -1;
188

189 190
	if (process(tool, event, &synth_sample, machine) != 0)
		return -1;
191

192
	return tgid;
193 194
}

195
static int perf_event__synthesize_fork(struct perf_tool *tool,
196 197 198
				       union perf_event *event,
				       pid_t pid, pid_t tgid, pid_t ppid,
				       perf_event__handler_t process,
199 200 201 202
				       struct machine *machine)
{
	memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);

203 204 205 206 207 208 209 210 211 212 213 214
	/*
	 * for main thread set parent to ppid from status file. For other
	 * threads set parent pid to main thread. ie., assume main thread
	 * spawns all threads in a process
	*/
	if (tgid == pid) {
		event->fork.ppid = ppid;
		event->fork.ptid = ppid;
	} else {
		event->fork.ppid = tgid;
		event->fork.ptid = tgid;
	}
215 216 217 218 219 220 221 222 223 224 225 226
	event->fork.pid  = tgid;
	event->fork.tid  = pid;
	event->fork.header.type = PERF_RECORD_FORK;

	event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);

	if (process(tool, event, &synth_sample, machine) != 0)
		return -1;

	return 0;
}

227 228 229 230 231
int perf_event__synthesize_mmap_events(struct perf_tool *tool,
				       union perf_event *event,
				       pid_t pid, pid_t tgid,
				       perf_event__handler_t process,
				       struct machine *machine,
232 233
				       bool mmap_data,
				       unsigned int proc_map_timeout)
234 235 236
{
	char filename[PATH_MAX];
	FILE *fp;
237 238
	unsigned long long t;
	bool truncation = false;
239
	unsigned long long timeout = proc_map_timeout * 1000000ULL;
240
	int rc = 0;
241

242 243 244
	if (machine__is_default_guest(machine))
		return 0;

245 246
	snprintf(filename, sizeof(filename), "%s/proc/%d/maps",
		 machine->root_dir, pid);
247 248 249 250 251 252 253 254 255 256

	fp = fopen(filename, "r");
	if (fp == NULL) {
		/*
		 * We raced with a task exiting - just return:
		 */
		pr_debug("couldn't open %s\n", filename);
		return -1;
	}

257
	event->header.type = PERF_RECORD_MMAP2;
258
	t = rdclock();
259

260
	while (1) {
261 262 263 264
		char bf[BUFSIZ];
		char prot[5];
		char execname[PATH_MAX];
		char anonstr[] = "//anon";
265
		unsigned int ino;
266
		size_t size;
267
		ssize_t n;
268

269 270 271
		if (fgets(bf, sizeof(bf), fp) == NULL)
			break;

272 273 274 275 276
		if ((rdclock() - t) > timeout) {
			pr_warning("Reading %s time out. "
				   "You may want to increase "
				   "the time limit by --proc-map-timeout\n",
				   filename);
277 278 279 280
			truncation = true;
			goto out;
		}

281 282 283
		/* ensure null termination since stack will be reused. */
		strcpy(execname, "");

284
		/* 00400000-0040c000 r-xp 00000000 fd:01 41038  /bin/cat */
285
		n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %[^\n]\n",
286 287 288 289 290
		       &event->mmap2.start, &event->mmap2.len, prot,
		       &event->mmap2.pgoff, &event->mmap2.maj,
		       &event->mmap2.min,
		       &ino, execname);

291 292 293
		/*
 		 * Anon maps don't have the execname.
 		 */
294
		if (n < 7)
295
			continue;
296 297 298

		event->mmap2.ino = (u64)ino;

299 300 301
		/*
		 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
		 */
302 303 304 305
		if (machine__is_host(machine))
			event->header.misc = PERF_RECORD_MISC_USER;
		else
			event->header.misc = PERF_RECORD_MISC_GUEST_USER;
306

307 308 309 310 311 312 313 314 315 316 317 318 319 320 321
		/* map protection and flags bits */
		event->mmap2.prot = 0;
		event->mmap2.flags = 0;
		if (prot[0] == 'r')
			event->mmap2.prot |= PROT_READ;
		if (prot[1] == 'w')
			event->mmap2.prot |= PROT_WRITE;
		if (prot[2] == 'x')
			event->mmap2.prot |= PROT_EXEC;

		if (prot[3] == 's')
			event->mmap2.flags |= MAP_SHARED;
		else
			event->mmap2.flags |= MAP_PRIVATE;

322 323 324 325 326 327
		if (prot[2] != 'x') {
			if (!mmap_data || prot[0] != 'r')
				continue;

			event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
		}
328

329 330 331 332
out:
		if (truncation)
			event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT;

333 334 335 336
		if (!strcmp(execname, ""))
			strcpy(execname, anonstr);

		size = strlen(execname) + 1;
337
		memcpy(event->mmap2.filename, execname, size);
338
		size = PERF_ALIGN(size, sizeof(u64));
339 340 341 342 343 344 345
		event->mmap2.len -= event->mmap.start;
		event->mmap2.header.size = (sizeof(event->mmap2) -
					(sizeof(event->mmap2.filename) - size));
		memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
		event->mmap2.header.size += machine->id_hdr_size;
		event->mmap2.pid = tgid;
		event->mmap2.tid = pid;
346 347 348 349

		if (process(tool, event, &synth_sample, machine) != 0) {
			rc = -1;
			break;
350
		}
351 352 353

		if (truncation)
			break;
354 355 356
	}

	fclose(fp);
357
	return rc;
358 359
}

360
int perf_event__synthesize_modules(struct perf_tool *tool,
361
				   perf_event__handler_t process,
362
				   struct machine *machine)
363
{
364
	int rc = 0;
365
	struct map *pos;
366
	struct map_groups *kmaps = &machine->kmaps;
367
	struct maps *maps = &kmaps->maps[MAP__FUNCTION];
368
	union perf_event *event = zalloc((sizeof(event->mmap) +
369
					  machine->id_hdr_size));
370 371 372 373 374 375 376
	if (event == NULL) {
		pr_debug("Not enough memory synthesizing mmap event "
			 "for kernel modules\n");
		return -1;
	}

	event->header.type = PERF_RECORD_MMAP;
377

378 379 380 381
	/*
	 * kernel uses 0 for user space maps, see kernel/perf_event.c
	 * __perf_event_mmap
	 */
382
	if (machine__is_host(machine))
383
		event->header.misc = PERF_RECORD_MISC_KERNEL;
384
	else
385
		event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
386

387
	for (pos = maps__first(maps); pos; pos = map__next(pos)) {
388 389
		size_t size;

390
		if (__map__is_kernel(pos))
391 392
			continue;

393
		size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
394 395 396
		event->mmap.header.type = PERF_RECORD_MMAP;
		event->mmap.header.size = (sizeof(event->mmap) -
				        (sizeof(event->mmap.filename) - size));
397 398
		memset(event->mmap.filename + size, 0, machine->id_hdr_size);
		event->mmap.header.size += machine->id_hdr_size;
399 400 401 402 403
		event->mmap.start = pos->start;
		event->mmap.len   = pos->end - pos->start;
		event->mmap.pid   = machine->pid;

		memcpy(event->mmap.filename, pos->dso->long_name,
404
		       pos->dso->long_name_len + 1);
405 406 407 408
		if (process(tool, event, &synth_sample, machine) != 0) {
			rc = -1;
			break;
		}
409 410
	}

411
	free(event);
412
	return rc;
413 414
}

415 416
static int __event__synthesize_thread(union perf_event *comm_event,
				      union perf_event *mmap_event,
417
				      union perf_event *fork_event,
418 419
				      pid_t pid, int full,
					  perf_event__handler_t process,
420
				      struct perf_tool *tool,
421 422 423
				      struct machine *machine,
				      bool mmap_data,
				      unsigned int proc_map_timeout)
424
{
425 426 427
	char filename[PATH_MAX];
	DIR *tasks;
	struct dirent dirent, *next;
428
	pid_t tgid, ppid;
429
	int rc = 0;
430 431 432 433 434 435 436 437 438 439

	/* special case: only send one comm event using passed in pid */
	if (!full) {
		tgid = perf_event__synthesize_comm(tool, comm_event, pid,
						   process, machine);

		if (tgid == -1)
			return -1;

		return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
440 441
							  process, machine, mmap_data,
							  proc_map_timeout);
442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463
	}

	if (machine__is_default_guest(machine))
		return 0;

	snprintf(filename, sizeof(filename), "%s/proc/%d/task",
		 machine->root_dir, pid);

	tasks = opendir(filename);
	if (tasks == NULL) {
		pr_debug("couldn't open %s\n", filename);
		return 0;
	}

	while (!readdir_r(tasks, &dirent, &next) && next) {
		char *end;
		pid_t _pid;

		_pid = strtol(dirent.d_name, &end, 10);
		if (*end)
			continue;

464
		rc = -1;
465 466
		if (perf_event__prepare_comm(comm_event, _pid, machine,
					     &tgid, &ppid) != 0)
467
			break;
468

469
		if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
470
						ppid, process, machine) < 0)
471
			break;
472 473 474 475
		/*
		 * Send the prepared comm event
		 */
		if (process(tool, comm_event, &synth_sample, machine) != 0)
476
			break;
477

478
		rc = 0;
479 480 481
		if (_pid == pid) {
			/* process the parent's maps too */
			rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
482
						process, machine, mmap_data, proc_map_timeout);
483 484
			if (rc)
				break;
485
		}
486 487 488
	}

	closedir(tasks);
489
	return rc;
490 491
}

492
int perf_event__synthesize_thread_map(struct perf_tool *tool,
493
				      struct thread_map *threads,
494
				      perf_event__handler_t process,
495
				      struct machine *machine,
496 497
				      bool mmap_data,
				      unsigned int proc_map_timeout)
498
{
499
	union perf_event *comm_event, *mmap_event, *fork_event;
500
	int err = -1, thread, j;
501

502
	comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
503 504 505
	if (comm_event == NULL)
		goto out;

506
	mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
507 508 509
	if (mmap_event == NULL)
		goto out_free_comm;

510 511 512 513
	fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
	if (fork_event == NULL)
		goto out_free_mmap;

514 515 516
	err = 0;
	for (thread = 0; thread < threads->nr; ++thread) {
		if (__event__synthesize_thread(comm_event, mmap_event,
517
					       fork_event,
518
					       thread_map__pid(threads, thread), 0,
519
					       process, tool, machine,
520
					       mmap_data, proc_map_timeout)) {
521 522 523
			err = -1;
			break;
		}
524 525 526 527 528

		/*
		 * comm.pid is set to thread group id by
		 * perf_event__synthesize_comm
		 */
529
		if ((int) comm_event->comm.pid != thread_map__pid(threads, thread)) {
530 531 532 533
			bool need_leader = true;

			/* is thread group leader in thread_map? */
			for (j = 0; j < threads->nr; ++j) {
534
				if ((int) comm_event->comm.pid == thread_map__pid(threads, j)) {
535 536 537 538 539 540 541
					need_leader = false;
					break;
				}
			}

			/* if not, generate events for it */
			if (need_leader &&
542
			    __event__synthesize_thread(comm_event, mmap_event,
543
						       fork_event,
544 545
						       comm_event->comm.pid, 0,
						       process, tool, machine,
546
						       mmap_data, proc_map_timeout)) {
547 548 549 550
				err = -1;
				break;
			}
		}
551
	}
552 553
	free(fork_event);
out_free_mmap:
554 555 556 557 558 559 560
	free(mmap_event);
out_free_comm:
	free(comm_event);
out:
	return err;
}

561
int perf_event__synthesize_threads(struct perf_tool *tool,
562
				   perf_event__handler_t process,
563 564 565
				   struct machine *machine,
				   bool mmap_data,
				   unsigned int proc_map_timeout)
566 567
{
	DIR *proc;
568
	char proc_path[PATH_MAX];
569
	struct dirent dirent, *next;
570
	union perf_event *comm_event, *mmap_event, *fork_event;
571 572
	int err = -1;

573 574 575
	if (machine__is_default_guest(machine))
		return 0;

576
	comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
577 578 579
	if (comm_event == NULL)
		goto out;

580
	mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
581 582
	if (mmap_event == NULL)
		goto out_free_comm;
583

584 585 586 587
	fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
	if (fork_event == NULL)
		goto out_free_mmap;

588 589 590
	snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
	proc = opendir(proc_path);

591
	if (proc == NULL)
592
		goto out_free_fork;
593 594 595 596 597 598 599

	while (!readdir_r(proc, &dirent, &next) && next) {
		char *end;
		pid_t pid = strtol(dirent.d_name, &end, 10);

		if (*end) /* only interested in proper numerical dirents */
			continue;
600 601 602 603
		/*
 		 * We may race with exiting thread, so don't stop just because
 		 * one thread couldn't be synthesized.
 		 */
604
		__event__synthesize_thread(comm_event, mmap_event, fork_event, pid,
605 606
					   1, process, tool, machine, mmap_data,
					   proc_map_timeout);
607 608
	}

609
	err = 0;
610
	closedir(proc);
611 612
out_free_fork:
	free(fork_event);
613 614 615 616 617 618
out_free_mmap:
	free(mmap_event);
out_free_comm:
	free(comm_event);
out:
	return err;
619
}
620

621 622 623 624 625
struct process_symbol_args {
	const char *name;
	u64	   start;
};

626
static int find_symbol_cb(void *arg, const char *name, char type,
627
			  u64 start)
628 629 630
{
	struct process_symbol_args *args = arg;

631 632 633 634 635 636
	/*
	 * Must be a function or at least an alias, as in PARISC64, where "_text" is
	 * an 'A' to the same address as "_stext".
	 */
	if (!(symbol_type__is_a(type, MAP__FUNCTION) ||
	      type == 'A') || strcmp(name, args->name))
637 638 639 640 641 642
		return 0;

	args->start = start;
	return 1;
}

643 644 645 646 647 648 649 650 651 652 653
u64 kallsyms__get_function_start(const char *kallsyms_filename,
				 const char *symbol_name)
{
	struct process_symbol_args args = { .name = symbol_name, };

	if (kallsyms__parse(kallsyms_filename, &args, find_symbol_cb) <= 0)
		return 0;

	return args.start;
}

654
int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
655
				       perf_event__handler_t process,
656
				       struct machine *machine)
657 658
{
	size_t size;
659
	const char *mmap_name;
660
	char name_buff[PATH_MAX];
661
	struct map *map = machine__kernel_map(machine);
662
	struct kmap *kmap;
663
	int err;
664 665
	union perf_event *event;

666
	if (map == NULL)
667 668
		return -1;

669 670 671 672 673
	/*
	 * We should get this from /sys/kernel/sections/.text, but till that is
	 * available use this, and after it is use this as a fallback for older
	 * kernels.
	 */
674
	event = zalloc((sizeof(event->mmap) + machine->id_hdr_size));
675 676 677 678 679
	if (event == NULL) {
		pr_debug("Not enough memory synthesizing mmap event "
			 "for kernel modules\n");
		return -1;
	}
680

681
	mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff));
682
	if (machine__is_host(machine)) {
683 684 685 686
		/*
		 * kernel uses PERF_RECORD_MISC_USER for user space maps,
		 * see kernel/perf_event.c __perf_event_mmap
		 */
687
		event->header.misc = PERF_RECORD_MISC_KERNEL;
688
	} else {
689
		event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
690
	}
691

692
	kmap = map__kmap(map);
693
	size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
694
			"%s%s", mmap_name, kmap->ref_reloc_sym->name) + 1;
695
	size = PERF_ALIGN(size, sizeof(u64));
696 697
	event->mmap.header.type = PERF_RECORD_MMAP;
	event->mmap.header.size = (sizeof(event->mmap) -
698
			(sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
699
	event->mmap.pgoff = kmap->ref_reloc_sym->addr;
700 701 702 703
	event->mmap.start = map->start;
	event->mmap.len   = map->end - event->mmap.start;
	event->mmap.pid   = machine->pid;

704
	err = process(tool, event, &synth_sample, machine);
705 706 707
	free(event);

	return err;
708 709
}

710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745
int perf_event__synthesize_thread_map2(struct perf_tool *tool,
				      struct thread_map *threads,
				      perf_event__handler_t process,
				      struct machine *machine)
{
	union perf_event *event;
	int i, err, size;

	size  = sizeof(event->thread_map);
	size +=	threads->nr * sizeof(event->thread_map.entries[0]);

	event = zalloc(size);
	if (!event)
		return -ENOMEM;

	event->header.type = PERF_RECORD_THREAD_MAP;
	event->header.size = size;
	event->thread_map.nr = threads->nr;

	for (i = 0; i < threads->nr; i++) {
		struct thread_map_event_entry *entry = &event->thread_map.entries[i];
		char *comm = thread_map__comm(threads, i);

		if (!comm)
			comm = (char *) "";

		entry->pid = thread_map__pid(threads, i);
		strncpy((char *) &entry->comm, comm, sizeof(entry->comm));
	}

	err = process(tool, event, NULL, machine);

	free(event);
	return err;
}

746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876
static void synthesize_cpus(struct cpu_map_entries *cpus,
			    struct cpu_map *map)
{
	int i;

	cpus->nr = map->nr;

	for (i = 0; i < map->nr; i++)
		cpus->cpu[i] = map->map[i];
}

static void synthesize_mask(struct cpu_map_mask *mask,
			    struct cpu_map *map, int max)
{
	int i;

	mask->nr = BITS_TO_LONGS(max);
	mask->long_size = sizeof(long);

	for (i = 0; i < map->nr; i++)
		set_bit(map->map[i], mask->mask);
}

static size_t cpus_size(struct cpu_map *map)
{
	return sizeof(struct cpu_map_entries) + map->nr * sizeof(u16);
}

static size_t mask_size(struct cpu_map *map, int *max)
{
	int i;

	*max = 0;

	for (i = 0; i < map->nr; i++) {
		/* bit possition of the cpu is + 1 */
		int bit = map->map[i] + 1;

		if (bit > *max)
			*max = bit;
	}

	return sizeof(struct cpu_map_mask) + BITS_TO_LONGS(*max) * sizeof(long);
}

void *cpu_map_data__alloc(struct cpu_map *map, size_t *size, u16 *type, int *max)
{
	size_t size_cpus, size_mask;
	bool is_dummy = cpu_map__empty(map);

	/*
	 * Both array and mask data have variable size based
	 * on the number of cpus and their actual values.
	 * The size of the 'struct cpu_map_data' is:
	 *
	 *   array = size of 'struct cpu_map_entries' +
	 *           number of cpus * sizeof(u64)
	 *
	 *   mask  = size of 'struct cpu_map_mask' +
	 *           maximum cpu bit converted to size of longs
	 *
	 * and finaly + the size of 'struct cpu_map_data'.
	 */
	size_cpus = cpus_size(map);
	size_mask = mask_size(map, max);

	if (is_dummy || (size_cpus < size_mask)) {
		*size += size_cpus;
		*type  = PERF_CPU_MAP__CPUS;
	} else {
		*size += size_mask;
		*type  = PERF_CPU_MAP__MASK;
	}

	*size += sizeof(struct cpu_map_data);
	return zalloc(*size);
}

void cpu_map_data__synthesize(struct cpu_map_data *data, struct cpu_map *map,
			      u16 type, int max)
{
	data->type = type;

	switch (type) {
	case PERF_CPU_MAP__CPUS:
		synthesize_cpus((struct cpu_map_entries *) data->data, map);
		break;
	case PERF_CPU_MAP__MASK:
		synthesize_mask((struct cpu_map_mask *) data->data, map, max);
	default:
		break;
	};
}

static struct cpu_map_event* cpu_map_event__new(struct cpu_map *map)
{
	size_t size = sizeof(struct cpu_map_event);
	struct cpu_map_event *event;
	int max;
	u16 type;

	event = cpu_map_data__alloc(map, &size, &type, &max);
	if (!event)
		return NULL;

	event->header.type = PERF_RECORD_CPU_MAP;
	event->header.size = size;
	event->data.type   = type;

	cpu_map_data__synthesize(&event->data, map, type, max);
	return event;
}

int perf_event__synthesize_cpu_map(struct perf_tool *tool,
				   struct cpu_map *map,
				   perf_event__handler_t process,
				   struct machine *machine)
{
	struct cpu_map_event *event;
	int err;

	event = cpu_map_event__new(map);
	if (!event)
		return -ENOMEM;

	err = process(tool, (union perf_event *) event, NULL, machine);

	free(event);
	return err;
}

877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914
int perf_event__synthesize_stat_config(struct perf_tool *tool,
				       struct perf_stat_config *config,
				       perf_event__handler_t process,
				       struct machine *machine)
{
	struct stat_config_event *event;
	int size, i = 0, err;

	size  = sizeof(*event);
	size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0]));

	event = zalloc(size);
	if (!event)
		return -ENOMEM;

	event->header.type = PERF_RECORD_STAT_CONFIG;
	event->header.size = size;
	event->nr          = PERF_STAT_CONFIG_TERM__MAX;

#define ADD(__term, __val)					\
	event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term;	\
	event->data[i].val = __val;				\
	i++;

	ADD(AGGR_MODE,	config->aggr_mode)
	ADD(INTERVAL,	config->interval)
	ADD(SCALE,	config->scale)

	WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX,
		  "stat config terms unbalanced\n");
#undef ADD

	err = process(tool, (union perf_event *) event, NULL, machine);

	free(event);
	return err;
}

915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936
int perf_event__synthesize_stat(struct perf_tool *tool,
				u32 cpu, u32 thread, u64 id,
				struct perf_counts_values *count,
				perf_event__handler_t process,
				struct machine *machine)
{
	struct stat_event event;

	event.header.type = PERF_RECORD_STAT;
	event.header.size = sizeof(event);
	event.header.misc = 0;

	event.id        = id;
	event.cpu       = cpu;
	event.thread    = thread;
	event.val       = count->val;
	event.ena       = count->ena;
	event.run       = count->run;

	return process(tool, (union perf_event *) &event, NULL, machine);
}

937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953
int perf_event__synthesize_stat_round(struct perf_tool *tool,
				      u64 evtime, u64 type,
				      perf_event__handler_t process,
				      struct machine *machine)
{
	struct stat_round_event event;

	event.header.type = PERF_RECORD_STAT_ROUND;
	event.header.size = sizeof(event);
	event.header.misc = 0;

	event.time = evtime;
	event.type = type;

	return process(tool, (union perf_event *) &event, NULL, machine);
}

954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977
void perf_event__read_stat_config(struct perf_stat_config *config,
				  struct stat_config_event *event)
{
	unsigned i;

	for (i = 0; i < event->nr; i++) {

		switch (event->data[i].tag) {
#define CASE(__term, __val)					\
		case PERF_STAT_CONFIG_TERM__##__term:		\
			config->__val = event->data[i].val;	\
			break;

		CASE(AGGR_MODE, aggr_mode)
		CASE(SCALE,     scale)
		CASE(INTERVAL,  interval)
#undef CASE
		default:
			pr_warning("unknown stat config term %" PRIu64 "\n",
				   event->data[i].tag);
		}
	}
}

978 979
size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp)
{
980 981 982 983 984 985 986
	const char *s;

	if (event->header.misc & PERF_RECORD_MISC_COMM_EXEC)
		s = " exec";
	else
		s = "";

987
	return fprintf(fp, "%s: %s:%d/%d\n", s, event->comm.comm, event->comm.pid, event->comm.tid);
988 989
}

990
int perf_event__process_comm(struct perf_tool *tool __maybe_unused,
991
			     union perf_event *event,
992
			     struct perf_sample *sample,
993
			     struct machine *machine)
994
{
995
	return machine__process_comm_event(machine, event, sample);
996 997
}

998
int perf_event__process_lost(struct perf_tool *tool __maybe_unused,
999
			     union perf_event *event,
1000
			     struct perf_sample *sample,
1001
			     struct machine *machine)
1002
{
1003
	return machine__process_lost_event(machine, event, sample);
1004
}
1005

1006 1007 1008 1009 1010 1011 1012 1013
int perf_event__process_aux(struct perf_tool *tool __maybe_unused,
			    union perf_event *event,
			    struct perf_sample *sample __maybe_unused,
			    struct machine *machine)
{
	return machine__process_aux_event(machine, event);
}

1014 1015 1016 1017 1018 1019 1020 1021
int perf_event__process_itrace_start(struct perf_tool *tool __maybe_unused,
				     union perf_event *event,
				     struct perf_sample *sample __maybe_unused,
				     struct machine *machine)
{
	return machine__process_itrace_start_event(machine, event);
}

1022 1023 1024 1025 1026 1027 1028 1029
int perf_event__process_lost_samples(struct perf_tool *tool __maybe_unused,
				     union perf_event *event,
				     struct perf_sample *sample,
				     struct machine *machine)
{
	return machine__process_lost_samples_event(machine, event, sample);
}

1030 1031 1032 1033 1034 1035 1036 1037
int perf_event__process_switch(struct perf_tool *tool __maybe_unused,
			       union perf_event *event,
			       struct perf_sample *sample __maybe_unused,
			       struct machine *machine)
{
	return machine__process_switch_event(machine, event);
}

1038 1039
size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
{
1040
	return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %c %s\n",
1041
		       event->mmap.pid, event->mmap.tid, event->mmap.start,
1042 1043 1044
		       event->mmap.len, event->mmap.pgoff,
		       (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x',
		       event->mmap.filename);
1045 1046
}

1047 1048 1049
size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp)
{
	return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64
1050
			   " %02x:%02x %"PRIu64" %"PRIu64"]: %c%c%c%c %s\n",
1051 1052 1053 1054
		       event->mmap2.pid, event->mmap2.tid, event->mmap2.start,
		       event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj,
		       event->mmap2.min, event->mmap2.ino,
		       event->mmap2.ino_generation,
1055 1056 1057 1058
		       (event->mmap2.prot & PROT_READ) ? 'r' : '-',
		       (event->mmap2.prot & PROT_WRITE) ? 'w' : '-',
		       (event->mmap2.prot & PROT_EXEC) ? 'x' : '-',
		       (event->mmap2.flags & MAP_SHARED) ? 's' : 'p',
1059 1060 1061
		       event->mmap2.filename);
}

1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077
size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp)
{
	struct thread_map *threads = thread_map__new_event(&event->thread_map);
	size_t ret;

	ret = fprintf(fp, " nr: ");

	if (threads)
		ret += thread_map__fprintf(threads, fp);
	else
		ret += fprintf(fp, "failed to get threads from event\n");

	thread_map__put(threads);
	return ret;
}

1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093
size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp)
{
	struct cpu_map *cpus = cpu_map__new_data(&event->cpu_map.data);
	size_t ret;

	ret = fprintf(fp, " nr: ");

	if (cpus)
		ret += cpu_map__fprintf(cpus, fp);
	else
		ret += fprintf(fp, "failed to get cpumap from event\n");

	cpu_map__put(cpus);
	return ret;
}

1094
int perf_event__process_mmap(struct perf_tool *tool __maybe_unused,
1095
			     union perf_event *event,
1096
			     struct perf_sample *sample,
1097
			     struct machine *machine)
1098
{
1099
	return machine__process_mmap_event(machine, event, sample);
1100 1101
}

1102 1103
int perf_event__process_mmap2(struct perf_tool *tool __maybe_unused,
			     union perf_event *event,
1104
			     struct perf_sample *sample,
1105 1106
			     struct machine *machine)
{
1107
	return machine__process_mmap2_event(machine, event, sample);
1108 1109
}

1110 1111 1112 1113 1114 1115 1116
size_t perf_event__fprintf_task(union perf_event *event, FILE *fp)
{
	return fprintf(fp, "(%d:%d):(%d:%d)\n",
		       event->fork.pid, event->fork.tid,
		       event->fork.ppid, event->fork.ptid);
}

1117
int perf_event__process_fork(struct perf_tool *tool __maybe_unused,
1118
			     union perf_event *event,
1119
			     struct perf_sample *sample,
1120
			     struct machine *machine)
1121
{
1122
	return machine__process_fork_event(machine, event, sample);
1123
}
1124

1125 1126
int perf_event__process_exit(struct perf_tool *tool __maybe_unused,
			     union perf_event *event,
1127
			     struct perf_sample *sample,
1128 1129
			     struct machine *machine)
{
1130
	return machine__process_exit_event(machine, event, sample);
1131 1132
}

1133 1134 1135 1136 1137 1138 1139 1140 1141
size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp)
{
	return fprintf(fp, " offset: %#"PRIx64" size: %#"PRIx64" flags: %#"PRIx64" [%s%s]\n",
		       event->aux.aux_offset, event->aux.aux_size,
		       event->aux.flags,
		       event->aux.flags & PERF_AUX_FLAG_TRUNCATED ? "T" : "",
		       event->aux.flags & PERF_AUX_FLAG_OVERWRITE ? "O" : "");
}

1142 1143 1144 1145 1146 1147
size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp)
{
	return fprintf(fp, " pid: %u tid: %u\n",
		       event->itrace_start.pid, event->itrace_start.tid);
}

1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161
size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp)
{
	bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
	const char *in_out = out ? "OUT" : "IN ";

	if (event->header.type == PERF_RECORD_SWITCH)
		return fprintf(fp, " %s\n", in_out);

	return fprintf(fp, " %s  %s pid/tid: %5u/%-5u\n",
		       in_out, out ? "next" : "prev",
		       event->context_switch.next_prev_pid,
		       event->context_switch.next_prev_tid);
}

1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177
size_t perf_event__fprintf(union perf_event *event, FILE *fp)
{
	size_t ret = fprintf(fp, "PERF_RECORD_%s",
			     perf_event__name(event->header.type));

	switch (event->header.type) {
	case PERF_RECORD_COMM:
		ret += perf_event__fprintf_comm(event, fp);
		break;
	case PERF_RECORD_FORK:
	case PERF_RECORD_EXIT:
		ret += perf_event__fprintf_task(event, fp);
		break;
	case PERF_RECORD_MMAP:
		ret += perf_event__fprintf_mmap(event, fp);
		break;
1178 1179 1180
	case PERF_RECORD_MMAP2:
		ret += perf_event__fprintf_mmap2(event, fp);
		break;
1181 1182 1183
	case PERF_RECORD_AUX:
		ret += perf_event__fprintf_aux(event, fp);
		break;
1184 1185 1186
	case PERF_RECORD_ITRACE_START:
		ret += perf_event__fprintf_itrace_start(event, fp);
		break;
1187 1188 1189 1190
	case PERF_RECORD_SWITCH:
	case PERF_RECORD_SWITCH_CPU_WIDE:
		ret += perf_event__fprintf_switch(event, fp);
		break;
1191 1192 1193 1194 1195 1196 1197
	default:
		ret += fprintf(fp, "\n");
	}

	return ret;
}

1198 1199
int perf_event__process(struct perf_tool *tool __maybe_unused,
			union perf_event *event,
1200
			struct perf_sample *sample,
1201
			struct machine *machine)
1202
{
1203
	return machine__process_event(machine, event, sample);
1204 1205
}

1206
void thread__find_addr_map(struct thread *thread, u8 cpumode,
1207
			   enum map_type type, u64 addr,
1208
			   struct addr_location *al)
1209
{
1210
	struct map_groups *mg = thread->mg;
1211
	struct machine *machine = mg->machine;
1212
	bool load_map = false;
1213

1214
	al->machine = machine;
1215
	al->thread = thread;
1216
	al->addr = addr;
1217
	al->cpumode = cpumode;
1218
	al->filtered = 0;
1219

1220 1221 1222 1223 1224
	if (machine == NULL) {
		al->map = NULL;
		return;
	}

1225
	if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
1226
		al->level = 'k';
1227
		mg = &machine->kmaps;
1228
		load_map = true;
1229
	} else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
1230
		al->level = '.';
1231 1232
	} else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
		al->level = 'g';
1233
		mg = &machine->kmaps;
1234
		load_map = true;
1235 1236
	} else if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) {
		al->level = 'u';
1237
	} else {
1238
		al->level = 'H';
1239
		al->map = NULL;
1240 1241 1242 1243

		if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
			cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
			!perf_guest)
1244
			al->filtered |= (1 << HIST_FILTER__GUEST);
1245 1246 1247
		if ((cpumode == PERF_RECORD_MISC_USER ||
			cpumode == PERF_RECORD_MISC_KERNEL) &&
			!perf_host)
1248
			al->filtered |= (1 << HIST_FILTER__HOST);
1249

1250 1251 1252
		return;
	}
try_again:
1253
	al->map = map_groups__find(mg, type, al->addr);
1254 1255 1256 1257 1258 1259 1260 1261 1262 1263
	if (al->map == NULL) {
		/*
		 * If this is outside of all known maps, and is a negative
		 * address, try to look it up in the kernel dso, as it might be
		 * a vsyscall or vdso (which executes in user-mode).
		 *
		 * XXX This is nasty, we should have a symbol list in the
		 * "[vdso]" dso, but for now lets use the old trick of looking
		 * in the whole kernel symbol list.
		 */
1264 1265 1266
		if (cpumode == PERF_RECORD_MISC_USER && machine &&
		    mg != &machine->kmaps &&
		    machine__kernel_ip(machine, al->addr)) {
1267
			mg = &machine->kmaps;
1268
			load_map = true;
1269 1270
			goto try_again;
		}
1271 1272 1273 1274 1275 1276
	} else {
		/*
		 * Kernel maps might be changed when loading symbols so loading
		 * must be done prior to using kernel maps.
		 */
		if (load_map)
1277
			map__load(al->map, machine->symbol_filter);
1278
		al->addr = al->map->map_ip(al->map, al->addr);
1279
	}
1280 1281
}

1282
void thread__find_addr_location(struct thread *thread,
1283
				u8 cpumode, enum map_type type, u64 addr,
1284
				struct addr_location *al)
1285
{
1286
	thread__find_addr_map(thread, cpumode, type, addr, al);
1287
	if (al->map != NULL)
1288
		al->sym = map__find_symbol(al->map, al->addr,
1289
					   thread->mg->machine->symbol_filter);
1290 1291
	else
		al->sym = NULL;
1292 1293
}

1294 1295 1296 1297
/*
 * Callers need to drop the reference to al->thread, obtained in
 * machine__findnew_thread()
 */
1298 1299
int machine__resolve(struct machine *machine, struct addr_location *al,
		     struct perf_sample *sample)
1300
{
1301
	struct thread *thread = machine__findnew_thread(machine, sample->pid,
1302
							sample->tid);
1303

1304 1305 1306
	if (thread == NULL)
		return -1;

1307
	dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
1308
	/*
1309
	 * Have we already created the kernel maps for this machine?
1310 1311 1312 1313 1314
	 *
	 * This should have happened earlier, when we processed the kernel MMAP
	 * events, but for older perf.data files there was no such thing, so do
	 * it now.
	 */
1315
	if (sample->cpumode == PERF_RECORD_MISC_KERNEL &&
1316
	    machine__kernel_map(machine) == NULL)
1317
		machine__create_kernel_maps(machine);
1318

1319
	thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, sample->ip, al);
1320 1321 1322
	dump_printf(" ...... dso: %s\n",
		    al->map ? al->map->dso->long_name :
			al->level == 'H' ? "[hypervisor]" : "<not found>");
1323 1324 1325 1326

	if (thread__is_filtered(thread))
		al->filtered |= (1 << HIST_FILTER__THREAD);

1327
	al->sym = NULL;
1328
	al->cpu = sample->cpu;
1329 1330 1331 1332 1333 1334 1335 1336
	al->socket = -1;

	if (al->cpu >= 0) {
		struct perf_env *env = machine->env;

		if (env && env->cpu)
			al->socket = env->cpu[al->cpu].socket_id;
	}
1337 1338

	if (al->map) {
1339 1340
		struct dso *dso = al->map->dso;

1341
		if (symbol_conf.dso_list &&
1342 1343 1344 1345
		    (!dso || !(strlist__has_entry(symbol_conf.dso_list,
						  dso->short_name) ||
			       (dso->short_name != dso->long_name &&
				strlist__has_entry(symbol_conf.dso_list,
1346 1347 1348
						   dso->long_name))))) {
			al->filtered |= (1 << HIST_FILTER__DSO);
		}
1349

1350 1351
		al->sym = map__find_symbol(al->map, al->addr,
					   machine->symbol_filter);
1352
	}
1353

1354 1355
	if (symbol_conf.sym_list &&
		(!al->sym || !strlist__has_entry(symbol_conf.sym_list,
1356 1357 1358
						al->sym->name))) {
		al->filtered |= (1 << HIST_FILTER__SYMBOL);
	}
1359 1360

	return 0;
1361
}
1362

1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373
/*
 * The preprocess_sample method will return with reference counts for the
 * in it, when done using (and perhaps getting ref counts if needing to
 * keep a pointer to one of those entries) it must be paired with
 * addr_location__put(), so that the refcounts can be decremented.
 */
void addr_location__put(struct addr_location *al)
{
	thread__zput(al->thread);
}

1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394
bool is_bts_event(struct perf_event_attr *attr)
{
	return attr->type == PERF_TYPE_HARDWARE &&
	       (attr->config & PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
	       attr->sample_period == 1;
}

bool sample_addr_correlates_sym(struct perf_event_attr *attr)
{
	if (attr->type == PERF_TYPE_SOFTWARE &&
	    (attr->config == PERF_COUNT_SW_PAGE_FAULTS ||
	     attr->config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
	     attr->config == PERF_COUNT_SW_PAGE_FAULTS_MAJ))
		return true;

	if (is_bts_event(attr))
		return true;

	return false;
}

1395 1396
void thread__resolve(struct thread *thread, struct addr_location *al,
		     struct perf_sample *sample)
1397
{
1398
	thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, sample->addr, al);
1399
	if (!al->map)
1400
		thread__find_addr_map(thread, sample->cpumode, MAP__VARIABLE,
1401 1402 1403 1404 1405 1406 1407 1408
				      sample->addr, al);

	al->cpu = sample->cpu;
	al->sym = NULL;

	if (al->map)
		al->sym = map__find_symbol(al->map, al->addr, NULL);
}