event.c 35.4 KB
Newer Older
1
#include <linux/types.h>
2
#include <sys/mman.h>
3 4
#include "event.h"
#include "debug.h"
5
#include "hist.h"
6
#include "machine.h"
7
#include "sort.h"
8
#include "string.h"
9
#include "strlist.h"
10
#include "thread.h"
11
#include "thread_map.h"
12
#include "symbol/kallsyms.h"
13 14
#include "asm/bug.h"
#include "stat.h"
15

16
static const char *perf_event__names[] = {
17 18
	[0]					= "TOTAL",
	[PERF_RECORD_MMAP]			= "MMAP",
19
	[PERF_RECORD_MMAP2]			= "MMAP2",
20 21 22 23 24 25 26 27
	[PERF_RECORD_LOST]			= "LOST",
	[PERF_RECORD_COMM]			= "COMM",
	[PERF_RECORD_EXIT]			= "EXIT",
	[PERF_RECORD_THROTTLE]			= "THROTTLE",
	[PERF_RECORD_UNTHROTTLE]		= "UNTHROTTLE",
	[PERF_RECORD_FORK]			= "FORK",
	[PERF_RECORD_READ]			= "READ",
	[PERF_RECORD_SAMPLE]			= "SAMPLE",
28
	[PERF_RECORD_AUX]			= "AUX",
29
	[PERF_RECORD_ITRACE_START]		= "ITRACE_START",
30
	[PERF_RECORD_LOST_SAMPLES]		= "LOST_SAMPLES",
31 32
	[PERF_RECORD_SWITCH]			= "SWITCH",
	[PERF_RECORD_SWITCH_CPU_WIDE]		= "SWITCH_CPU_WIDE",
33 34 35 36 37
	[PERF_RECORD_HEADER_ATTR]		= "ATTR",
	[PERF_RECORD_HEADER_EVENT_TYPE]		= "EVENT_TYPE",
	[PERF_RECORD_HEADER_TRACING_DATA]	= "TRACING_DATA",
	[PERF_RECORD_HEADER_BUILD_ID]		= "BUILD_ID",
	[PERF_RECORD_FINISHED_ROUND]		= "FINISHED_ROUND",
A
Adrian Hunter 已提交
38
	[PERF_RECORD_ID_INDEX]			= "ID_INDEX",
39 40
	[PERF_RECORD_AUXTRACE_INFO]		= "AUXTRACE_INFO",
	[PERF_RECORD_AUXTRACE]			= "AUXTRACE",
41
	[PERF_RECORD_AUXTRACE_ERROR]		= "AUXTRACE_ERROR",
42
	[PERF_RECORD_THREAD_MAP]		= "THREAD_MAP",
43
	[PERF_RECORD_CPU_MAP]			= "CPU_MAP",
44
	[PERF_RECORD_STAT_CONFIG]		= "STAT_CONFIG",
J
Jiri Olsa 已提交
45
	[PERF_RECORD_STAT]			= "STAT",
46
	[PERF_RECORD_STAT_ROUND]		= "STAT_ROUND",
47
	[PERF_RECORD_EVENT_UPDATE]		= "EVENT_UPDATE",
48
	[PERF_RECORD_TIME_CONV]			= "TIME_CONV",
49 50
};

51
const char *perf_event__name(unsigned int id)
52
{
53
	if (id >= ARRAY_SIZE(perf_event__names))
54
		return "INVALID";
55
	if (!perf_event__names[id])
56
		return "UNKNOWN";
57
	return perf_event__names[id];
58 59
}

60 61 62 63 64 65
static int perf_tool__process_synth_event(struct perf_tool *tool,
					  union perf_event *event,
					  struct machine *machine,
					  perf_event__handler_t process)
{
	struct perf_sample synth_sample = {
66 67 68 69 70 71
	.pid	   = -1,
	.tid	   = -1,
	.time	   = -1,
	.stream_id = -1,
	.cpu	   = -1,
	.period	   = 1,
72 73 74 75
	.cpumode   = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK,
	};

	return process(tool, event, &synth_sample, machine);
76 77
};

78 79
/*
 * Assumes that the first 4095 bytes of /proc/pid/stat contains
80
 * the comm, tgid and ppid.
81
 */
82 83
static int perf_event__get_comm_ids(pid_t pid, char *comm, size_t len,
				    pid_t *tgid, pid_t *ppid)
84 85
{
	char filename[PATH_MAX];
86 87
	char bf[4096];
	int fd;
88 89
	size_t size = 0;
	ssize_t n;
90 91 92 93
	char *nl, *name, *tgids, *ppids;

	*tgid = -1;
	*ppid = -1;
94 95 96

	snprintf(filename, sizeof(filename), "/proc/%d/status", pid);

97 98
	fd = open(filename, O_RDONLY);
	if (fd < 0) {
99
		pr_debug("couldn't open %s\n", filename);
100
		return -1;
101 102
	}

103 104 105
	n = read(fd, bf, sizeof(bf) - 1);
	close(fd);
	if (n <= 0) {
106
		pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n",
107 108
			   pid);
		return -1;
109
	}
110
	bf[n] = '\0';
111

112 113
	name = strstr(bf, "Name:");
	tgids = strstr(bf, "Tgid:");
114
	ppids = strstr(bf, "PPid:");
115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136

	if (name) {
		name += 5;  /* strlen("Name:") */

		while (*name && isspace(*name))
			++name;

		nl = strchr(name, '\n');
		if (nl)
			*nl = '\0';

		size = strlen(name);
		if (size >= len)
			size = len - 1;
		memcpy(comm, name, size);
		comm[size] = '\0';
	} else {
		pr_debug("Name: string not found for pid %d\n", pid);
	}

	if (tgids) {
		tgids += 5;  /* strlen("Tgid:") */
137
		*tgid = atoi(tgids);
138 139 140
	} else {
		pr_debug("Tgid: string not found for pid %d\n", pid);
	}
141

142 143 144 145 146 147 148 149
	if (ppids) {
		ppids += 5;  /* strlen("PPid:") */
		*ppid = atoi(ppids);
	} else {
		pr_debug("PPid: string not found for pid %d\n", pid);
	}

	return 0;
150 151
}

152 153 154
static int perf_event__prepare_comm(union perf_event *event, pid_t pid,
				    struct machine *machine,
				    pid_t *tgid, pid_t *ppid)
155 156
{
	size_t size;
157 158

	*ppid = -1;
159 160 161

	memset(&event->comm, 0, sizeof(event->comm));

162 163 164 165 166 167 168 169 170
	if (machine__is_host(machine)) {
		if (perf_event__get_comm_ids(pid, event->comm.comm,
					     sizeof(event->comm.comm),
					     tgid, ppid) != 0) {
			return -1;
		}
	} else {
		*tgid = machine->pid;
	}
171

172 173
	if (*tgid < 0)
		return -1;
174

175
	event->comm.pid = *tgid;
176
	event->comm.header.type = PERF_RECORD_COMM;
177 178

	size = strlen(event->comm.comm) + 1;
179
	size = PERF_ALIGN(size, sizeof(u64));
180
	memset(event->comm.comm + size, 0, machine->id_hdr_size);
181 182
	event->comm.header.size = (sizeof(event->comm) -
				(sizeof(event->comm.comm) - size) +
183
				machine->id_hdr_size);
184
	event->comm.tid = pid;
185 186

	return 0;
187 188
}

189
pid_t perf_event__synthesize_comm(struct perf_tool *tool,
190 191 192 193
					 union perf_event *event, pid_t pid,
					 perf_event__handler_t process,
					 struct machine *machine)
{
194
	pid_t tgid, ppid;
195

196 197
	if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0)
		return -1;
198

199
	if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
200
		return -1;
201

202
	return tgid;
203 204
}

205
static int perf_event__synthesize_fork(struct perf_tool *tool,
206 207 208
				       union perf_event *event,
				       pid_t pid, pid_t tgid, pid_t ppid,
				       perf_event__handler_t process,
209 210 211 212
				       struct machine *machine)
{
	memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);

213 214 215 216 217 218 219 220 221 222 223 224
	/*
	 * for main thread set parent to ppid from status file. For other
	 * threads set parent pid to main thread. ie., assume main thread
	 * spawns all threads in a process
	*/
	if (tgid == pid) {
		event->fork.ppid = ppid;
		event->fork.ptid = ppid;
	} else {
		event->fork.ppid = tgid;
		event->fork.ptid = tgid;
	}
225 226 227 228 229 230
	event->fork.pid  = tgid;
	event->fork.tid  = pid;
	event->fork.header.type = PERF_RECORD_FORK;

	event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);

231
	if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
232 233 234 235 236
		return -1;

	return 0;
}

237 238 239 240 241
int perf_event__synthesize_mmap_events(struct perf_tool *tool,
				       union perf_event *event,
				       pid_t pid, pid_t tgid,
				       perf_event__handler_t process,
				       struct machine *machine,
242 243
				       bool mmap_data,
				       unsigned int proc_map_timeout)
244 245 246
{
	char filename[PATH_MAX];
	FILE *fp;
247 248
	unsigned long long t;
	bool truncation = false;
249
	unsigned long long timeout = proc_map_timeout * 1000000ULL;
250
	int rc = 0;
251

252 253 254
	if (machine__is_default_guest(machine))
		return 0;

255 256
	snprintf(filename, sizeof(filename), "%s/proc/%d/maps",
		 machine->root_dir, pid);
257 258 259 260 261 262 263 264 265 266

	fp = fopen(filename, "r");
	if (fp == NULL) {
		/*
		 * We raced with a task exiting - just return:
		 */
		pr_debug("couldn't open %s\n", filename);
		return -1;
	}

267
	event->header.type = PERF_RECORD_MMAP2;
268
	t = rdclock();
269

270
	while (1) {
271 272 273 274
		char bf[BUFSIZ];
		char prot[5];
		char execname[PATH_MAX];
		char anonstr[] = "//anon";
275
		unsigned int ino;
276
		size_t size;
277
		ssize_t n;
278

279 280 281
		if (fgets(bf, sizeof(bf), fp) == NULL)
			break;

282 283 284 285 286
		if ((rdclock() - t) > timeout) {
			pr_warning("Reading %s time out. "
				   "You may want to increase "
				   "the time limit by --proc-map-timeout\n",
				   filename);
287 288 289 290
			truncation = true;
			goto out;
		}

291 292 293
		/* ensure null termination since stack will be reused. */
		strcpy(execname, "");

294
		/* 00400000-0040c000 r-xp 00000000 fd:01 41038  /bin/cat */
295
		n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %[^\n]\n",
296 297 298 299 300
		       &event->mmap2.start, &event->mmap2.len, prot,
		       &event->mmap2.pgoff, &event->mmap2.maj,
		       &event->mmap2.min,
		       &ino, execname);

301 302 303
		/*
 		 * Anon maps don't have the execname.
 		 */
304
		if (n < 7)
305
			continue;
306 307 308

		event->mmap2.ino = (u64)ino;

309 310 311
		/*
		 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
		 */
312 313 314 315
		if (machine__is_host(machine))
			event->header.misc = PERF_RECORD_MISC_USER;
		else
			event->header.misc = PERF_RECORD_MISC_GUEST_USER;
316

317 318 319 320 321 322 323 324 325 326 327 328 329 330 331
		/* map protection and flags bits */
		event->mmap2.prot = 0;
		event->mmap2.flags = 0;
		if (prot[0] == 'r')
			event->mmap2.prot |= PROT_READ;
		if (prot[1] == 'w')
			event->mmap2.prot |= PROT_WRITE;
		if (prot[2] == 'x')
			event->mmap2.prot |= PROT_EXEC;

		if (prot[3] == 's')
			event->mmap2.flags |= MAP_SHARED;
		else
			event->mmap2.flags |= MAP_PRIVATE;

332 333 334 335 336 337
		if (prot[2] != 'x') {
			if (!mmap_data || prot[0] != 'r')
				continue;

			event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
		}
338

339 340 341 342
out:
		if (truncation)
			event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT;

343 344 345 346
		if (!strcmp(execname, ""))
			strcpy(execname, anonstr);

		size = strlen(execname) + 1;
347
		memcpy(event->mmap2.filename, execname, size);
348
		size = PERF_ALIGN(size, sizeof(u64));
349 350 351 352 353 354 355
		event->mmap2.len -= event->mmap.start;
		event->mmap2.header.size = (sizeof(event->mmap2) -
					(sizeof(event->mmap2.filename) - size));
		memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
		event->mmap2.header.size += machine->id_hdr_size;
		event->mmap2.pid = tgid;
		event->mmap2.tid = pid;
356

357
		if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
358 359
			rc = -1;
			break;
360
		}
361 362 363

		if (truncation)
			break;
364 365 366
	}

	fclose(fp);
367
	return rc;
368 369
}

370
int perf_event__synthesize_modules(struct perf_tool *tool,
371
				   perf_event__handler_t process,
372
				   struct machine *machine)
373
{
374
	int rc = 0;
375
	struct map *pos;
376
	struct map_groups *kmaps = &machine->kmaps;
377
	struct maps *maps = &kmaps->maps[MAP__FUNCTION];
378
	union perf_event *event = zalloc((sizeof(event->mmap) +
379
					  machine->id_hdr_size));
380 381 382 383 384 385 386
	if (event == NULL) {
		pr_debug("Not enough memory synthesizing mmap event "
			 "for kernel modules\n");
		return -1;
	}

	event->header.type = PERF_RECORD_MMAP;
387

388 389 390 391
	/*
	 * kernel uses 0 for user space maps, see kernel/perf_event.c
	 * __perf_event_mmap
	 */
392
	if (machine__is_host(machine))
393
		event->header.misc = PERF_RECORD_MISC_KERNEL;
394
	else
395
		event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
396

397
	for (pos = maps__first(maps); pos; pos = map__next(pos)) {
398 399
		size_t size;

400
		if (__map__is_kernel(pos))
401 402
			continue;

403
		size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
404 405 406
		event->mmap.header.type = PERF_RECORD_MMAP;
		event->mmap.header.size = (sizeof(event->mmap) -
				        (sizeof(event->mmap.filename) - size));
407 408
		memset(event->mmap.filename + size, 0, machine->id_hdr_size);
		event->mmap.header.size += machine->id_hdr_size;
409 410 411 412 413
		event->mmap.start = pos->start;
		event->mmap.len   = pos->end - pos->start;
		event->mmap.pid   = machine->pid;

		memcpy(event->mmap.filename, pos->dso->long_name,
414
		       pos->dso->long_name_len + 1);
415
		if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
416 417 418
			rc = -1;
			break;
		}
419 420
	}

421
	free(event);
422
	return rc;
423 424
}

425 426
static int __event__synthesize_thread(union perf_event *comm_event,
				      union perf_event *mmap_event,
427
				      union perf_event *fork_event,
428 429
				      pid_t pid, int full,
					  perf_event__handler_t process,
430
				      struct perf_tool *tool,
431 432 433
				      struct machine *machine,
				      bool mmap_data,
				      unsigned int proc_map_timeout)
434
{
435 436
	char filename[PATH_MAX];
	DIR *tasks;
437
	struct dirent *dirent;
438
	pid_t tgid, ppid;
439
	int rc = 0;
440 441 442 443 444 445 446 447 448 449

	/* special case: only send one comm event using passed in pid */
	if (!full) {
		tgid = perf_event__synthesize_comm(tool, comm_event, pid,
						   process, machine);

		if (tgid == -1)
			return -1;

		return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
450 451
							  process, machine, mmap_data,
							  proc_map_timeout);
452 453 454 455 456 457 458 459 460 461 462 463 464 465
	}

	if (machine__is_default_guest(machine))
		return 0;

	snprintf(filename, sizeof(filename), "%s/proc/%d/task",
		 machine->root_dir, pid);

	tasks = opendir(filename);
	if (tasks == NULL) {
		pr_debug("couldn't open %s\n", filename);
		return 0;
	}

466
	while ((dirent = readdir(tasks)) != NULL) {
467 468 469
		char *end;
		pid_t _pid;

470
		_pid = strtol(dirent->d_name, &end, 10);
471 472 473
		if (*end)
			continue;

474
		rc = -1;
475 476
		if (perf_event__prepare_comm(comm_event, _pid, machine,
					     &tgid, &ppid) != 0)
477
			break;
478

479
		if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
480
						ppid, process, machine) < 0)
481
			break;
482 483 484
		/*
		 * Send the prepared comm event
		 */
485
		if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0)
486
			break;
487

488
		rc = 0;
489 490 491
		if (_pid == pid) {
			/* process the parent's maps too */
			rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
492
						process, machine, mmap_data, proc_map_timeout);
493 494
			if (rc)
				break;
495
		}
496 497 498
	}

	closedir(tasks);
499
	return rc;
500 501
}

502
int perf_event__synthesize_thread_map(struct perf_tool *tool,
503
				      struct thread_map *threads,
504
				      perf_event__handler_t process,
505
				      struct machine *machine,
506 507
				      bool mmap_data,
				      unsigned int proc_map_timeout)
508
{
509
	union perf_event *comm_event, *mmap_event, *fork_event;
510
	int err = -1, thread, j;
511

512
	comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
513 514 515
	if (comm_event == NULL)
		goto out;

516
	mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
517 518 519
	if (mmap_event == NULL)
		goto out_free_comm;

520 521 522 523
	fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
	if (fork_event == NULL)
		goto out_free_mmap;

524 525 526
	err = 0;
	for (thread = 0; thread < threads->nr; ++thread) {
		if (__event__synthesize_thread(comm_event, mmap_event,
527
					       fork_event,
528
					       thread_map__pid(threads, thread), 0,
529
					       process, tool, machine,
530
					       mmap_data, proc_map_timeout)) {
531 532 533
			err = -1;
			break;
		}
534 535 536 537 538

		/*
		 * comm.pid is set to thread group id by
		 * perf_event__synthesize_comm
		 */
539
		if ((int) comm_event->comm.pid != thread_map__pid(threads, thread)) {
540 541 542 543
			bool need_leader = true;

			/* is thread group leader in thread_map? */
			for (j = 0; j < threads->nr; ++j) {
544
				if ((int) comm_event->comm.pid == thread_map__pid(threads, j)) {
545 546 547 548 549 550 551
					need_leader = false;
					break;
				}
			}

			/* if not, generate events for it */
			if (need_leader &&
552
			    __event__synthesize_thread(comm_event, mmap_event,
553
						       fork_event,
554 555
						       comm_event->comm.pid, 0,
						       process, tool, machine,
556
						       mmap_data, proc_map_timeout)) {
557 558 559 560
				err = -1;
				break;
			}
		}
561
	}
562 563
	free(fork_event);
out_free_mmap:
564 565 566 567 568 569 570
	free(mmap_event);
out_free_comm:
	free(comm_event);
out:
	return err;
}

571
int perf_event__synthesize_threads(struct perf_tool *tool,
572
				   perf_event__handler_t process,
573 574 575
				   struct machine *machine,
				   bool mmap_data,
				   unsigned int proc_map_timeout)
576 577
{
	DIR *proc;
578
	char proc_path[PATH_MAX];
579
	struct dirent *dirent;
580
	union perf_event *comm_event, *mmap_event, *fork_event;
581 582
	int err = -1;

583 584 585
	if (machine__is_default_guest(machine))
		return 0;

586
	comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
587 588 589
	if (comm_event == NULL)
		goto out;

590
	mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
591 592
	if (mmap_event == NULL)
		goto out_free_comm;
593

594 595 596 597
	fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
	if (fork_event == NULL)
		goto out_free_mmap;

598 599 600
	snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
	proc = opendir(proc_path);

601
	if (proc == NULL)
602
		goto out_free_fork;
603

604
	while ((dirent = readdir(proc)) != NULL) {
605
		char *end;
606
		pid_t pid = strtol(dirent->d_name, &end, 10);
607 608 609

		if (*end) /* only interested in proper numerical dirents */
			continue;
610 611 612 613
		/*
 		 * We may race with exiting thread, so don't stop just because
 		 * one thread couldn't be synthesized.
 		 */
614
		__event__synthesize_thread(comm_event, mmap_event, fork_event, pid,
615 616
					   1, process, tool, machine, mmap_data,
					   proc_map_timeout);
617 618
	}

619
	err = 0;
620
	closedir(proc);
621 622
out_free_fork:
	free(fork_event);
623 624 625 626 627 628
out_free_mmap:
	free(mmap_event);
out_free_comm:
	free(comm_event);
out:
	return err;
629
}
630

631 632 633 634 635
struct process_symbol_args {
	const char *name;
	u64	   start;
};

636
static int find_symbol_cb(void *arg, const char *name, char type,
637
			  u64 start)
638 639 640
{
	struct process_symbol_args *args = arg;

641 642 643 644 645 646
	/*
	 * Must be a function or at least an alias, as in PARISC64, where "_text" is
	 * an 'A' to the same address as "_stext".
	 */
	if (!(symbol_type__is_a(type, MAP__FUNCTION) ||
	      type == 'A') || strcmp(name, args->name))
647 648 649 650 651 652
		return 0;

	args->start = start;
	return 1;
}

653 654 655 656 657 658 659 660 661 662 663
u64 kallsyms__get_function_start(const char *kallsyms_filename,
				 const char *symbol_name)
{
	struct process_symbol_args args = { .name = symbol_name, };

	if (kallsyms__parse(kallsyms_filename, &args, find_symbol_cb) <= 0)
		return 0;

	return args.start;
}

664
int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
665
				       perf_event__handler_t process,
666
				       struct machine *machine)
667 668
{
	size_t size;
669
	const char *mmap_name;
670
	char name_buff[PATH_MAX];
671
	struct map *map = machine__kernel_map(machine);
672
	struct kmap *kmap;
673
	int err;
674 675
	union perf_event *event;

676 677
	if (symbol_conf.kptr_restrict)
		return -1;
678
	if (map == NULL)
679 680
		return -1;

681 682 683 684 685
	/*
	 * We should get this from /sys/kernel/sections/.text, but till that is
	 * available use this, and after it is use this as a fallback for older
	 * kernels.
	 */
686
	event = zalloc((sizeof(event->mmap) + machine->id_hdr_size));
687 688 689 690 691
	if (event == NULL) {
		pr_debug("Not enough memory synthesizing mmap event "
			 "for kernel modules\n");
		return -1;
	}
692

693
	mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff));
694
	if (machine__is_host(machine)) {
695 696 697 698
		/*
		 * kernel uses PERF_RECORD_MISC_USER for user space maps,
		 * see kernel/perf_event.c __perf_event_mmap
		 */
699
		event->header.misc = PERF_RECORD_MISC_KERNEL;
700
	} else {
701
		event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
702
	}
703

704
	kmap = map__kmap(map);
705
	size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
706
			"%s%s", mmap_name, kmap->ref_reloc_sym->name) + 1;
707
	size = PERF_ALIGN(size, sizeof(u64));
708 709
	event->mmap.header.type = PERF_RECORD_MMAP;
	event->mmap.header.size = (sizeof(event->mmap) -
710
			(sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
711
	event->mmap.pgoff = kmap->ref_reloc_sym->addr;
712 713 714 715
	event->mmap.start = map->start;
	event->mmap.len   = map->end - event->mmap.start;
	event->mmap.pid   = machine->pid;

716
	err = perf_tool__process_synth_event(tool, event, machine, process);
717 718 719
	free(event);

	return err;
720 721
}

722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757
int perf_event__synthesize_thread_map2(struct perf_tool *tool,
				      struct thread_map *threads,
				      perf_event__handler_t process,
				      struct machine *machine)
{
	union perf_event *event;
	int i, err, size;

	size  = sizeof(event->thread_map);
	size +=	threads->nr * sizeof(event->thread_map.entries[0]);

	event = zalloc(size);
	if (!event)
		return -ENOMEM;

	event->header.type = PERF_RECORD_THREAD_MAP;
	event->header.size = size;
	event->thread_map.nr = threads->nr;

	for (i = 0; i < threads->nr; i++) {
		struct thread_map_event_entry *entry = &event->thread_map.entries[i];
		char *comm = thread_map__comm(threads, i);

		if (!comm)
			comm = (char *) "";

		entry->pid = thread_map__pid(threads, i);
		strncpy((char *) &entry->comm, comm, sizeof(entry->comm));
	}

	err = process(tool, event, NULL, machine);

	free(event);
	return err;
}

758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888
static void synthesize_cpus(struct cpu_map_entries *cpus,
			    struct cpu_map *map)
{
	int i;

	cpus->nr = map->nr;

	for (i = 0; i < map->nr; i++)
		cpus->cpu[i] = map->map[i];
}

static void synthesize_mask(struct cpu_map_mask *mask,
			    struct cpu_map *map, int max)
{
	int i;

	mask->nr = BITS_TO_LONGS(max);
	mask->long_size = sizeof(long);

	for (i = 0; i < map->nr; i++)
		set_bit(map->map[i], mask->mask);
}

static size_t cpus_size(struct cpu_map *map)
{
	return sizeof(struct cpu_map_entries) + map->nr * sizeof(u16);
}

static size_t mask_size(struct cpu_map *map, int *max)
{
	int i;

	*max = 0;

	for (i = 0; i < map->nr; i++) {
		/* bit possition of the cpu is + 1 */
		int bit = map->map[i] + 1;

		if (bit > *max)
			*max = bit;
	}

	return sizeof(struct cpu_map_mask) + BITS_TO_LONGS(*max) * sizeof(long);
}

void *cpu_map_data__alloc(struct cpu_map *map, size_t *size, u16 *type, int *max)
{
	size_t size_cpus, size_mask;
	bool is_dummy = cpu_map__empty(map);

	/*
	 * Both array and mask data have variable size based
	 * on the number of cpus and their actual values.
	 * The size of the 'struct cpu_map_data' is:
	 *
	 *   array = size of 'struct cpu_map_entries' +
	 *           number of cpus * sizeof(u64)
	 *
	 *   mask  = size of 'struct cpu_map_mask' +
	 *           maximum cpu bit converted to size of longs
	 *
	 * and finaly + the size of 'struct cpu_map_data'.
	 */
	size_cpus = cpus_size(map);
	size_mask = mask_size(map, max);

	if (is_dummy || (size_cpus < size_mask)) {
		*size += size_cpus;
		*type  = PERF_CPU_MAP__CPUS;
	} else {
		*size += size_mask;
		*type  = PERF_CPU_MAP__MASK;
	}

	*size += sizeof(struct cpu_map_data);
	return zalloc(*size);
}

void cpu_map_data__synthesize(struct cpu_map_data *data, struct cpu_map *map,
			      u16 type, int max)
{
	data->type = type;

	switch (type) {
	case PERF_CPU_MAP__CPUS:
		synthesize_cpus((struct cpu_map_entries *) data->data, map);
		break;
	case PERF_CPU_MAP__MASK:
		synthesize_mask((struct cpu_map_mask *) data->data, map, max);
	default:
		break;
	};
}

static struct cpu_map_event* cpu_map_event__new(struct cpu_map *map)
{
	size_t size = sizeof(struct cpu_map_event);
	struct cpu_map_event *event;
	int max;
	u16 type;

	event = cpu_map_data__alloc(map, &size, &type, &max);
	if (!event)
		return NULL;

	event->header.type = PERF_RECORD_CPU_MAP;
	event->header.size = size;
	event->data.type   = type;

	cpu_map_data__synthesize(&event->data, map, type, max);
	return event;
}

int perf_event__synthesize_cpu_map(struct perf_tool *tool,
				   struct cpu_map *map,
				   perf_event__handler_t process,
				   struct machine *machine)
{
	struct cpu_map_event *event;
	int err;

	event = cpu_map_event__new(map);
	if (!event)
		return -ENOMEM;

	err = process(tool, (union perf_event *) event, NULL, machine);

	free(event);
	return err;
}

889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926
int perf_event__synthesize_stat_config(struct perf_tool *tool,
				       struct perf_stat_config *config,
				       perf_event__handler_t process,
				       struct machine *machine)
{
	struct stat_config_event *event;
	int size, i = 0, err;

	size  = sizeof(*event);
	size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0]));

	event = zalloc(size);
	if (!event)
		return -ENOMEM;

	event->header.type = PERF_RECORD_STAT_CONFIG;
	event->header.size = size;
	event->nr          = PERF_STAT_CONFIG_TERM__MAX;

#define ADD(__term, __val)					\
	event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term;	\
	event->data[i].val = __val;				\
	i++;

	ADD(AGGR_MODE,	config->aggr_mode)
	ADD(INTERVAL,	config->interval)
	ADD(SCALE,	config->scale)

	WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX,
		  "stat config terms unbalanced\n");
#undef ADD

	err = process(tool, (union perf_event *) event, NULL, machine);

	free(event);
	return err;
}

927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948
int perf_event__synthesize_stat(struct perf_tool *tool,
				u32 cpu, u32 thread, u64 id,
				struct perf_counts_values *count,
				perf_event__handler_t process,
				struct machine *machine)
{
	struct stat_event event;

	event.header.type = PERF_RECORD_STAT;
	event.header.size = sizeof(event);
	event.header.misc = 0;

	event.id        = id;
	event.cpu       = cpu;
	event.thread    = thread;
	event.val       = count->val;
	event.ena       = count->ena;
	event.run       = count->run;

	return process(tool, (union perf_event *) &event, NULL, machine);
}

949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965
int perf_event__synthesize_stat_round(struct perf_tool *tool,
				      u64 evtime, u64 type,
				      perf_event__handler_t process,
				      struct machine *machine)
{
	struct stat_round_event event;

	event.header.type = PERF_RECORD_STAT_ROUND;
	event.header.size = sizeof(event);
	event.header.misc = 0;

	event.time = evtime;
	event.type = type;

	return process(tool, (union perf_event *) &event, NULL, machine);
}

966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989
void perf_event__read_stat_config(struct perf_stat_config *config,
				  struct stat_config_event *event)
{
	unsigned i;

	for (i = 0; i < event->nr; i++) {

		switch (event->data[i].tag) {
#define CASE(__term, __val)					\
		case PERF_STAT_CONFIG_TERM__##__term:		\
			config->__val = event->data[i].val;	\
			break;

		CASE(AGGR_MODE, aggr_mode)
		CASE(SCALE,     scale)
		CASE(INTERVAL,  interval)
#undef CASE
		default:
			pr_warning("unknown stat config term %" PRIu64 "\n",
				   event->data[i].tag);
		}
	}
}

990 991
size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp)
{
992 993 994 995 996 997 998
	const char *s;

	if (event->header.misc & PERF_RECORD_MISC_COMM_EXEC)
		s = " exec";
	else
		s = "";

999
	return fprintf(fp, "%s: %s:%d/%d\n", s, event->comm.comm, event->comm.pid, event->comm.tid);
1000 1001
}

1002
int perf_event__process_comm(struct perf_tool *tool __maybe_unused,
1003
			     union perf_event *event,
1004
			     struct perf_sample *sample,
1005
			     struct machine *machine)
1006
{
1007
	return machine__process_comm_event(machine, event, sample);
1008 1009
}

1010
int perf_event__process_lost(struct perf_tool *tool __maybe_unused,
1011
			     union perf_event *event,
1012
			     struct perf_sample *sample,
1013
			     struct machine *machine)
1014
{
1015
	return machine__process_lost_event(machine, event, sample);
1016
}
1017

1018 1019 1020 1021 1022 1023 1024 1025
int perf_event__process_aux(struct perf_tool *tool __maybe_unused,
			    union perf_event *event,
			    struct perf_sample *sample __maybe_unused,
			    struct machine *machine)
{
	return machine__process_aux_event(machine, event);
}

1026 1027 1028 1029 1030 1031 1032 1033
int perf_event__process_itrace_start(struct perf_tool *tool __maybe_unused,
				     union perf_event *event,
				     struct perf_sample *sample __maybe_unused,
				     struct machine *machine)
{
	return machine__process_itrace_start_event(machine, event);
}

1034 1035 1036 1037 1038 1039 1040 1041
int perf_event__process_lost_samples(struct perf_tool *tool __maybe_unused,
				     union perf_event *event,
				     struct perf_sample *sample,
				     struct machine *machine)
{
	return machine__process_lost_samples_event(machine, event, sample);
}

1042 1043 1044 1045 1046 1047 1048 1049
int perf_event__process_switch(struct perf_tool *tool __maybe_unused,
			       union perf_event *event,
			       struct perf_sample *sample __maybe_unused,
			       struct machine *machine)
{
	return machine__process_switch_event(machine, event);
}

1050 1051
size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
{
1052
	return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %c %s\n",
1053
		       event->mmap.pid, event->mmap.tid, event->mmap.start,
1054 1055 1056
		       event->mmap.len, event->mmap.pgoff,
		       (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x',
		       event->mmap.filename);
1057 1058
}

1059 1060 1061
size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp)
{
	return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64
1062
			   " %02x:%02x %"PRIu64" %"PRIu64"]: %c%c%c%c %s\n",
1063 1064 1065 1066
		       event->mmap2.pid, event->mmap2.tid, event->mmap2.start,
		       event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj,
		       event->mmap2.min, event->mmap2.ino,
		       event->mmap2.ino_generation,
1067 1068 1069 1070
		       (event->mmap2.prot & PROT_READ) ? 'r' : '-',
		       (event->mmap2.prot & PROT_WRITE) ? 'w' : '-',
		       (event->mmap2.prot & PROT_EXEC) ? 'x' : '-',
		       (event->mmap2.flags & MAP_SHARED) ? 's' : 'p',
1071 1072 1073
		       event->mmap2.filename);
}

1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089
size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp)
{
	struct thread_map *threads = thread_map__new_event(&event->thread_map);
	size_t ret;

	ret = fprintf(fp, " nr: ");

	if (threads)
		ret += thread_map__fprintf(threads, fp);
	else
		ret += fprintf(fp, "failed to get threads from event\n");

	thread_map__put(threads);
	return ret;
}

1090 1091 1092 1093 1094
size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp)
{
	struct cpu_map *cpus = cpu_map__new_data(&event->cpu_map.data);
	size_t ret;

1095
	ret = fprintf(fp, ": ");
1096 1097 1098 1099 1100 1101 1102 1103 1104 1105

	if (cpus)
		ret += cpu_map__fprintf(cpus, fp);
	else
		ret += fprintf(fp, "failed to get cpumap from event\n");

	cpu_map__put(cpus);
	return ret;
}

1106
int perf_event__process_mmap(struct perf_tool *tool __maybe_unused,
1107
			     union perf_event *event,
1108
			     struct perf_sample *sample,
1109
			     struct machine *machine)
1110
{
1111
	return machine__process_mmap_event(machine, event, sample);
1112 1113
}

1114 1115
int perf_event__process_mmap2(struct perf_tool *tool __maybe_unused,
			     union perf_event *event,
1116
			     struct perf_sample *sample,
1117 1118
			     struct machine *machine)
{
1119
	return machine__process_mmap2_event(machine, event, sample);
1120 1121
}

1122 1123 1124 1125 1126 1127 1128
size_t perf_event__fprintf_task(union perf_event *event, FILE *fp)
{
	return fprintf(fp, "(%d:%d):(%d:%d)\n",
		       event->fork.pid, event->fork.tid,
		       event->fork.ppid, event->fork.ptid);
}

1129
int perf_event__process_fork(struct perf_tool *tool __maybe_unused,
1130
			     union perf_event *event,
1131
			     struct perf_sample *sample,
1132
			     struct machine *machine)
1133
{
1134
	return machine__process_fork_event(machine, event, sample);
1135
}
1136

1137 1138
int perf_event__process_exit(struct perf_tool *tool __maybe_unused,
			     union perf_event *event,
1139
			     struct perf_sample *sample,
1140 1141
			     struct machine *machine)
{
1142
	return machine__process_exit_event(machine, event, sample);
1143 1144
}

1145 1146 1147 1148 1149 1150 1151 1152 1153
size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp)
{
	return fprintf(fp, " offset: %#"PRIx64" size: %#"PRIx64" flags: %#"PRIx64" [%s%s]\n",
		       event->aux.aux_offset, event->aux.aux_size,
		       event->aux.flags,
		       event->aux.flags & PERF_AUX_FLAG_TRUNCATED ? "T" : "",
		       event->aux.flags & PERF_AUX_FLAG_OVERWRITE ? "O" : "");
}

1154 1155 1156 1157 1158 1159
size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp)
{
	return fprintf(fp, " pid: %u tid: %u\n",
		       event->itrace_start.pid, event->itrace_start.tid);
}

1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173
size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp)
{
	bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
	const char *in_out = out ? "OUT" : "IN ";

	if (event->header.type == PERF_RECORD_SWITCH)
		return fprintf(fp, " %s\n", in_out);

	return fprintf(fp, " %s  %s pid/tid: %5u/%-5u\n",
		       in_out, out ? "next" : "prev",
		       event->context_switch.next_prev_pid,
		       event->context_switch.next_prev_tid);
}

1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189
size_t perf_event__fprintf(union perf_event *event, FILE *fp)
{
	size_t ret = fprintf(fp, "PERF_RECORD_%s",
			     perf_event__name(event->header.type));

	switch (event->header.type) {
	case PERF_RECORD_COMM:
		ret += perf_event__fprintf_comm(event, fp);
		break;
	case PERF_RECORD_FORK:
	case PERF_RECORD_EXIT:
		ret += perf_event__fprintf_task(event, fp);
		break;
	case PERF_RECORD_MMAP:
		ret += perf_event__fprintf_mmap(event, fp);
		break;
1190 1191 1192
	case PERF_RECORD_MMAP2:
		ret += perf_event__fprintf_mmap2(event, fp);
		break;
1193 1194 1195
	case PERF_RECORD_AUX:
		ret += perf_event__fprintf_aux(event, fp);
		break;
1196 1197 1198
	case PERF_RECORD_ITRACE_START:
		ret += perf_event__fprintf_itrace_start(event, fp);
		break;
1199 1200 1201 1202
	case PERF_RECORD_SWITCH:
	case PERF_RECORD_SWITCH_CPU_WIDE:
		ret += perf_event__fprintf_switch(event, fp);
		break;
1203 1204 1205 1206 1207 1208 1209
	default:
		ret += fprintf(fp, "\n");
	}

	return ret;
}

1210 1211
int perf_event__process(struct perf_tool *tool __maybe_unused,
			union perf_event *event,
1212
			struct perf_sample *sample,
1213
			struct machine *machine)
1214
{
1215
	return machine__process_event(machine, event, sample);
1216 1217
}

1218
void thread__find_addr_map(struct thread *thread, u8 cpumode,
1219
			   enum map_type type, u64 addr,
1220
			   struct addr_location *al)
1221
{
1222
	struct map_groups *mg = thread->mg;
1223
	struct machine *machine = mg->machine;
1224
	bool load_map = false;
1225

1226
	al->machine = machine;
1227
	al->thread = thread;
1228
	al->addr = addr;
1229
	al->cpumode = cpumode;
1230
	al->filtered = 0;
1231

1232 1233 1234 1235 1236
	if (machine == NULL) {
		al->map = NULL;
		return;
	}

1237
	if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
1238
		al->level = 'k';
1239
		mg = &machine->kmaps;
1240
		load_map = true;
1241
	} else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
1242
		al->level = '.';
1243 1244
	} else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
		al->level = 'g';
1245
		mg = &machine->kmaps;
1246
		load_map = true;
1247 1248
	} else if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) {
		al->level = 'u';
1249
	} else {
1250
		al->level = 'H';
1251
		al->map = NULL;
1252 1253 1254 1255

		if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
			cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
			!perf_guest)
1256
			al->filtered |= (1 << HIST_FILTER__GUEST);
1257 1258 1259
		if ((cpumode == PERF_RECORD_MISC_USER ||
			cpumode == PERF_RECORD_MISC_KERNEL) &&
			!perf_host)
1260
			al->filtered |= (1 << HIST_FILTER__HOST);
1261

1262 1263 1264
		return;
	}
try_again:
1265
	al->map = map_groups__find(mg, type, al->addr);
1266 1267 1268 1269 1270 1271 1272 1273 1274 1275
	if (al->map == NULL) {
		/*
		 * If this is outside of all known maps, and is a negative
		 * address, try to look it up in the kernel dso, as it might be
		 * a vsyscall or vdso (which executes in user-mode).
		 *
		 * XXX This is nasty, we should have a symbol list in the
		 * "[vdso]" dso, but for now lets use the old trick of looking
		 * in the whole kernel symbol list.
		 */
1276 1277 1278
		if (cpumode == PERF_RECORD_MISC_USER && machine &&
		    mg != &machine->kmaps &&
		    machine__kernel_ip(machine, al->addr)) {
1279
			mg = &machine->kmaps;
1280
			load_map = true;
1281 1282
			goto try_again;
		}
1283 1284 1285 1286 1287 1288
	} else {
		/*
		 * Kernel maps might be changed when loading symbols so loading
		 * must be done prior to using kernel maps.
		 */
		if (load_map)
1289
			map__load(al->map);
1290
		al->addr = al->map->map_ip(al->map, al->addr);
1291
	}
1292 1293
}

1294
void thread__find_addr_location(struct thread *thread,
1295
				u8 cpumode, enum map_type type, u64 addr,
1296
				struct addr_location *al)
1297
{
1298
	thread__find_addr_map(thread, cpumode, type, addr, al);
1299
	if (al->map != NULL)
1300
		al->sym = map__find_symbol(al->map, al->addr);
1301 1302
	else
		al->sym = NULL;
1303 1304
}

1305 1306 1307 1308
/*
 * Callers need to drop the reference to al->thread, obtained in
 * machine__findnew_thread()
 */
1309 1310
int machine__resolve(struct machine *machine, struct addr_location *al,
		     struct perf_sample *sample)
1311
{
1312
	struct thread *thread = machine__findnew_thread(machine, sample->pid,
1313
							sample->tid);
1314

1315 1316 1317
	if (thread == NULL)
		return -1;

1318
	dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
1319
	/*
1320
	 * Have we already created the kernel maps for this machine?
1321 1322 1323 1324 1325
	 *
	 * This should have happened earlier, when we processed the kernel MMAP
	 * events, but for older perf.data files there was no such thing, so do
	 * it now.
	 */
1326
	if (sample->cpumode == PERF_RECORD_MISC_KERNEL &&
1327
	    machine__kernel_map(machine) == NULL)
1328
		machine__create_kernel_maps(machine);
1329

1330
	thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, sample->ip, al);
1331 1332 1333
	dump_printf(" ...... dso: %s\n",
		    al->map ? al->map->dso->long_name :
			al->level == 'H' ? "[hypervisor]" : "<not found>");
1334 1335 1336 1337

	if (thread__is_filtered(thread))
		al->filtered |= (1 << HIST_FILTER__THREAD);

1338
	al->sym = NULL;
1339
	al->cpu = sample->cpu;
1340 1341 1342 1343 1344 1345 1346 1347
	al->socket = -1;

	if (al->cpu >= 0) {
		struct perf_env *env = machine->env;

		if (env && env->cpu)
			al->socket = env->cpu[al->cpu].socket_id;
	}
1348 1349

	if (al->map) {
1350 1351
		struct dso *dso = al->map->dso;

1352
		if (symbol_conf.dso_list &&
1353 1354 1355 1356
		    (!dso || !(strlist__has_entry(symbol_conf.dso_list,
						  dso->short_name) ||
			       (dso->short_name != dso->long_name &&
				strlist__has_entry(symbol_conf.dso_list,
1357 1358 1359
						   dso->long_name))))) {
			al->filtered |= (1 << HIST_FILTER__DSO);
		}
1360

1361
		al->sym = map__find_symbol(al->map, al->addr);
1362
	}
1363

1364 1365
	if (symbol_conf.sym_list &&
		(!al->sym || !strlist__has_entry(symbol_conf.sym_list,
1366 1367 1368
						al->sym->name))) {
		al->filtered |= (1 << HIST_FILTER__SYMBOL);
	}
1369 1370

	return 0;
1371
}
1372

1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383
/*
 * The preprocess_sample method will return with reference counts for the
 * in it, when done using (and perhaps getting ref counts if needing to
 * keep a pointer to one of those entries) it must be paired with
 * addr_location__put(), so that the refcounts can be decremented.
 */
void addr_location__put(struct addr_location *al)
{
	thread__zput(al->thread);
}

1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404
bool is_bts_event(struct perf_event_attr *attr)
{
	return attr->type == PERF_TYPE_HARDWARE &&
	       (attr->config & PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
	       attr->sample_period == 1;
}

bool sample_addr_correlates_sym(struct perf_event_attr *attr)
{
	if (attr->type == PERF_TYPE_SOFTWARE &&
	    (attr->config == PERF_COUNT_SW_PAGE_FAULTS ||
	     attr->config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
	     attr->config == PERF_COUNT_SW_PAGE_FAULTS_MAJ))
		return true;

	if (is_bts_event(attr))
		return true;

	return false;
}

1405 1406
void thread__resolve(struct thread *thread, struct addr_location *al,
		     struct perf_sample *sample)
1407
{
1408
	thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, sample->addr, al);
1409
	if (!al->map)
1410
		thread__find_addr_map(thread, sample->cpumode, MAP__VARIABLE,
1411 1412 1413 1414 1415 1416
				      sample->addr, al);

	al->cpu = sample->cpu;
	al->sym = NULL;

	if (al->map)
1417
		al->sym = map__find_symbol(al->map, al->addr);
1418
}