event.c 34.8 KB
Newer Older
1
#include <linux/types.h>
2
#include <sys/mman.h>
3 4
#include "event.h"
#include "debug.h"
5
#include "hist.h"
6
#include "machine.h"
7
#include "sort.h"
8
#include "string.h"
9
#include "strlist.h"
10
#include "thread.h"
11
#include "thread_map.h"
12
#include "symbol/kallsyms.h"
13 14
#include "asm/bug.h"
#include "stat.h"
15

16
static const char *perf_event__names[] = {
17 18
	[0]					= "TOTAL",
	[PERF_RECORD_MMAP]			= "MMAP",
19
	[PERF_RECORD_MMAP2]			= "MMAP2",
20 21 22 23 24 25 26 27
	[PERF_RECORD_LOST]			= "LOST",
	[PERF_RECORD_COMM]			= "COMM",
	[PERF_RECORD_EXIT]			= "EXIT",
	[PERF_RECORD_THROTTLE]			= "THROTTLE",
	[PERF_RECORD_UNTHROTTLE]		= "UNTHROTTLE",
	[PERF_RECORD_FORK]			= "FORK",
	[PERF_RECORD_READ]			= "READ",
	[PERF_RECORD_SAMPLE]			= "SAMPLE",
28
	[PERF_RECORD_AUX]			= "AUX",
29
	[PERF_RECORD_ITRACE_START]		= "ITRACE_START",
30
	[PERF_RECORD_LOST_SAMPLES]		= "LOST_SAMPLES",
31 32
	[PERF_RECORD_SWITCH]			= "SWITCH",
	[PERF_RECORD_SWITCH_CPU_WIDE]		= "SWITCH_CPU_WIDE",
33 34 35 36 37
	[PERF_RECORD_HEADER_ATTR]		= "ATTR",
	[PERF_RECORD_HEADER_EVENT_TYPE]		= "EVENT_TYPE",
	[PERF_RECORD_HEADER_TRACING_DATA]	= "TRACING_DATA",
	[PERF_RECORD_HEADER_BUILD_ID]		= "BUILD_ID",
	[PERF_RECORD_FINISHED_ROUND]		= "FINISHED_ROUND",
A
Adrian Hunter 已提交
38
	[PERF_RECORD_ID_INDEX]			= "ID_INDEX",
39 40
	[PERF_RECORD_AUXTRACE_INFO]		= "AUXTRACE_INFO",
	[PERF_RECORD_AUXTRACE]			= "AUXTRACE",
41
	[PERF_RECORD_AUXTRACE_ERROR]		= "AUXTRACE_ERROR",
42
	[PERF_RECORD_THREAD_MAP]		= "THREAD_MAP",
43
	[PERF_RECORD_CPU_MAP]			= "CPU_MAP",
44
	[PERF_RECORD_STAT_CONFIG]		= "STAT_CONFIG",
J
Jiri Olsa 已提交
45
	[PERF_RECORD_STAT]			= "STAT",
46
	[PERF_RECORD_STAT_ROUND]		= "STAT_ROUND",
47 48
};

49
const char *perf_event__name(unsigned int id)
50
{
51
	if (id >= ARRAY_SIZE(perf_event__names))
52
		return "INVALID";
53
	if (!perf_event__names[id])
54
		return "UNKNOWN";
55
	return perf_event__names[id];
56 57
}

58
static struct perf_sample synth_sample = {
59 60 61 62 63 64 65 66
	.pid	   = -1,
	.tid	   = -1,
	.time	   = -1,
	.stream_id = -1,
	.cpu	   = -1,
	.period	   = 1,
};

67 68
/*
 * Assumes that the first 4095 bytes of /proc/pid/stat contains
69
 * the comm, tgid and ppid.
70
 */
71 72
static int perf_event__get_comm_ids(pid_t pid, char *comm, size_t len,
				    pid_t *tgid, pid_t *ppid)
73 74
{
	char filename[PATH_MAX];
75 76
	char bf[4096];
	int fd;
77 78
	size_t size = 0;
	ssize_t n;
79 80 81 82
	char *nl, *name, *tgids, *ppids;

	*tgid = -1;
	*ppid = -1;
83 84 85

	snprintf(filename, sizeof(filename), "/proc/%d/status", pid);

86 87
	fd = open(filename, O_RDONLY);
	if (fd < 0) {
88
		pr_debug("couldn't open %s\n", filename);
89
		return -1;
90 91
	}

92 93 94
	n = read(fd, bf, sizeof(bf) - 1);
	close(fd);
	if (n <= 0) {
95
		pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n",
96 97
			   pid);
		return -1;
98
	}
99
	bf[n] = '\0';
100

101 102
	name = strstr(bf, "Name:");
	tgids = strstr(bf, "Tgid:");
103
	ppids = strstr(bf, "PPid:");
104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125

	if (name) {
		name += 5;  /* strlen("Name:") */

		while (*name && isspace(*name))
			++name;

		nl = strchr(name, '\n');
		if (nl)
			*nl = '\0';

		size = strlen(name);
		if (size >= len)
			size = len - 1;
		memcpy(comm, name, size);
		comm[size] = '\0';
	} else {
		pr_debug("Name: string not found for pid %d\n", pid);
	}

	if (tgids) {
		tgids += 5;  /* strlen("Tgid:") */
126
		*tgid = atoi(tgids);
127 128 129
	} else {
		pr_debug("Tgid: string not found for pid %d\n", pid);
	}
130

131 132 133 134 135 136 137 138
	if (ppids) {
		ppids += 5;  /* strlen("PPid:") */
		*ppid = atoi(ppids);
	} else {
		pr_debug("PPid: string not found for pid %d\n", pid);
	}

	return 0;
139 140
}

141 142 143
static int perf_event__prepare_comm(union perf_event *event, pid_t pid,
				    struct machine *machine,
				    pid_t *tgid, pid_t *ppid)
144 145
{
	size_t size;
146 147

	*ppid = -1;
148 149 150

	memset(&event->comm, 0, sizeof(event->comm));

151 152 153 154 155 156 157 158 159
	if (machine__is_host(machine)) {
		if (perf_event__get_comm_ids(pid, event->comm.comm,
					     sizeof(event->comm.comm),
					     tgid, ppid) != 0) {
			return -1;
		}
	} else {
		*tgid = machine->pid;
	}
160

161 162
	if (*tgid < 0)
		return -1;
163

164
	event->comm.pid = *tgid;
165
	event->comm.header.type = PERF_RECORD_COMM;
166 167

	size = strlen(event->comm.comm) + 1;
168
	size = PERF_ALIGN(size, sizeof(u64));
169
	memset(event->comm.comm + size, 0, machine->id_hdr_size);
170 171
	event->comm.header.size = (sizeof(event->comm) -
				(sizeof(event->comm.comm) - size) +
172
				machine->id_hdr_size);
173
	event->comm.tid = pid;
174 175

	return 0;
176 177
}

178
pid_t perf_event__synthesize_comm(struct perf_tool *tool,
179 180 181 182
					 union perf_event *event, pid_t pid,
					 perf_event__handler_t process,
					 struct machine *machine)
{
183
	pid_t tgid, ppid;
184

185 186
	if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0)
		return -1;
187

188 189
	if (process(tool, event, &synth_sample, machine) != 0)
		return -1;
190

191
	return tgid;
192 193
}

194
static int perf_event__synthesize_fork(struct perf_tool *tool,
195 196 197
				       union perf_event *event,
				       pid_t pid, pid_t tgid, pid_t ppid,
				       perf_event__handler_t process,
198 199 200 201
				       struct machine *machine)
{
	memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);

202 203 204 205 206 207 208 209 210 211 212 213
	/*
	 * for main thread set parent to ppid from status file. For other
	 * threads set parent pid to main thread. ie., assume main thread
	 * spawns all threads in a process
	*/
	if (tgid == pid) {
		event->fork.ppid = ppid;
		event->fork.ptid = ppid;
	} else {
		event->fork.ppid = tgid;
		event->fork.ptid = tgid;
	}
214 215 216 217 218 219 220 221 222 223 224 225
	event->fork.pid  = tgid;
	event->fork.tid  = pid;
	event->fork.header.type = PERF_RECORD_FORK;

	event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);

	if (process(tool, event, &synth_sample, machine) != 0)
		return -1;

	return 0;
}

226 227 228 229 230
int perf_event__synthesize_mmap_events(struct perf_tool *tool,
				       union perf_event *event,
				       pid_t pid, pid_t tgid,
				       perf_event__handler_t process,
				       struct machine *machine,
231 232
				       bool mmap_data,
				       unsigned int proc_map_timeout)
233 234 235
{
	char filename[PATH_MAX];
	FILE *fp;
236 237
	unsigned long long t;
	bool truncation = false;
238
	unsigned long long timeout = proc_map_timeout * 1000000ULL;
239
	int rc = 0;
240

241 242 243
	if (machine__is_default_guest(machine))
		return 0;

244 245
	snprintf(filename, sizeof(filename), "%s/proc/%d/maps",
		 machine->root_dir, pid);
246 247 248 249 250 251 252 253 254 255

	fp = fopen(filename, "r");
	if (fp == NULL) {
		/*
		 * We raced with a task exiting - just return:
		 */
		pr_debug("couldn't open %s\n", filename);
		return -1;
	}

256
	event->header.type = PERF_RECORD_MMAP2;
257
	t = rdclock();
258

259
	while (1) {
260 261 262 263
		char bf[BUFSIZ];
		char prot[5];
		char execname[PATH_MAX];
		char anonstr[] = "//anon";
264
		unsigned int ino;
265
		size_t size;
266
		ssize_t n;
267

268 269 270
		if (fgets(bf, sizeof(bf), fp) == NULL)
			break;

271 272 273 274 275
		if ((rdclock() - t) > timeout) {
			pr_warning("Reading %s time out. "
				   "You may want to increase "
				   "the time limit by --proc-map-timeout\n",
				   filename);
276 277 278 279
			truncation = true;
			goto out;
		}

280 281 282
		/* ensure null termination since stack will be reused. */
		strcpy(execname, "");

283
		/* 00400000-0040c000 r-xp 00000000 fd:01 41038  /bin/cat */
284 285 286 287 288 289
		n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %s\n",
		       &event->mmap2.start, &event->mmap2.len, prot,
		       &event->mmap2.pgoff, &event->mmap2.maj,
		       &event->mmap2.min,
		       &ino, execname);

290 291 292
		/*
 		 * Anon maps don't have the execname.
 		 */
293
		if (n < 7)
294
			continue;
295 296 297

		event->mmap2.ino = (u64)ino;

298 299 300
		/*
		 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
		 */
301 302 303 304
		if (machine__is_host(machine))
			event->header.misc = PERF_RECORD_MISC_USER;
		else
			event->header.misc = PERF_RECORD_MISC_GUEST_USER;
305

306 307 308 309 310 311 312 313 314 315 316 317 318 319 320
		/* map protection and flags bits */
		event->mmap2.prot = 0;
		event->mmap2.flags = 0;
		if (prot[0] == 'r')
			event->mmap2.prot |= PROT_READ;
		if (prot[1] == 'w')
			event->mmap2.prot |= PROT_WRITE;
		if (prot[2] == 'x')
			event->mmap2.prot |= PROT_EXEC;

		if (prot[3] == 's')
			event->mmap2.flags |= MAP_SHARED;
		else
			event->mmap2.flags |= MAP_PRIVATE;

321 322 323 324 325 326
		if (prot[2] != 'x') {
			if (!mmap_data || prot[0] != 'r')
				continue;

			event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
		}
327

328 329 330 331
out:
		if (truncation)
			event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT;

332 333 334 335
		if (!strcmp(execname, ""))
			strcpy(execname, anonstr);

		size = strlen(execname) + 1;
336
		memcpy(event->mmap2.filename, execname, size);
337
		size = PERF_ALIGN(size, sizeof(u64));
338 339 340 341 342 343 344
		event->mmap2.len -= event->mmap.start;
		event->mmap2.header.size = (sizeof(event->mmap2) -
					(sizeof(event->mmap2.filename) - size));
		memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
		event->mmap2.header.size += machine->id_hdr_size;
		event->mmap2.pid = tgid;
		event->mmap2.tid = pid;
345 346 347 348

		if (process(tool, event, &synth_sample, machine) != 0) {
			rc = -1;
			break;
349
		}
350 351 352

		if (truncation)
			break;
353 354 355
	}

	fclose(fp);
356
	return rc;
357 358
}

359
int perf_event__synthesize_modules(struct perf_tool *tool,
360
				   perf_event__handler_t process,
361
				   struct machine *machine)
362
{
363
	int rc = 0;
364
	struct map *pos;
365
	struct map_groups *kmaps = &machine->kmaps;
366
	struct maps *maps = &kmaps->maps[MAP__FUNCTION];
367
	union perf_event *event = zalloc((sizeof(event->mmap) +
368
					  machine->id_hdr_size));
369 370 371 372 373 374 375
	if (event == NULL) {
		pr_debug("Not enough memory synthesizing mmap event "
			 "for kernel modules\n");
		return -1;
	}

	event->header.type = PERF_RECORD_MMAP;
376

377 378 379 380
	/*
	 * kernel uses 0 for user space maps, see kernel/perf_event.c
	 * __perf_event_mmap
	 */
381
	if (machine__is_host(machine))
382
		event->header.misc = PERF_RECORD_MISC_KERNEL;
383
	else
384
		event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
385

386
	for (pos = maps__first(maps); pos; pos = map__next(pos)) {
387 388
		size_t size;

389
		if (__map__is_kernel(pos))
390 391
			continue;

392
		size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
393 394 395
		event->mmap.header.type = PERF_RECORD_MMAP;
		event->mmap.header.size = (sizeof(event->mmap) -
				        (sizeof(event->mmap.filename) - size));
396 397
		memset(event->mmap.filename + size, 0, machine->id_hdr_size);
		event->mmap.header.size += machine->id_hdr_size;
398 399 400 401 402
		event->mmap.start = pos->start;
		event->mmap.len   = pos->end - pos->start;
		event->mmap.pid   = machine->pid;

		memcpy(event->mmap.filename, pos->dso->long_name,
403
		       pos->dso->long_name_len + 1);
404 405 406 407
		if (process(tool, event, &synth_sample, machine) != 0) {
			rc = -1;
			break;
		}
408 409
	}

410
	free(event);
411
	return rc;
412 413
}

414 415
static int __event__synthesize_thread(union perf_event *comm_event,
				      union perf_event *mmap_event,
416
				      union perf_event *fork_event,
417 418
				      pid_t pid, int full,
					  perf_event__handler_t process,
419
				      struct perf_tool *tool,
420 421 422
				      struct machine *machine,
				      bool mmap_data,
				      unsigned int proc_map_timeout)
423
{
424 425 426
	char filename[PATH_MAX];
	DIR *tasks;
	struct dirent dirent, *next;
427
	pid_t tgid, ppid;
428
	int rc = 0;
429 430 431 432 433 434 435 436 437 438

	/* special case: only send one comm event using passed in pid */
	if (!full) {
		tgid = perf_event__synthesize_comm(tool, comm_event, pid,
						   process, machine);

		if (tgid == -1)
			return -1;

		return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
439 440
							  process, machine, mmap_data,
							  proc_map_timeout);
441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462
	}

	if (machine__is_default_guest(machine))
		return 0;

	snprintf(filename, sizeof(filename), "%s/proc/%d/task",
		 machine->root_dir, pid);

	tasks = opendir(filename);
	if (tasks == NULL) {
		pr_debug("couldn't open %s\n", filename);
		return 0;
	}

	while (!readdir_r(tasks, &dirent, &next) && next) {
		char *end;
		pid_t _pid;

		_pid = strtol(dirent.d_name, &end, 10);
		if (*end)
			continue;

463
		rc = -1;
464 465
		if (perf_event__prepare_comm(comm_event, _pid, machine,
					     &tgid, &ppid) != 0)
466
			break;
467

468
		if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
469
						ppid, process, machine) < 0)
470
			break;
471 472 473 474
		/*
		 * Send the prepared comm event
		 */
		if (process(tool, comm_event, &synth_sample, machine) != 0)
475
			break;
476

477
		rc = 0;
478 479 480
		if (_pid == pid) {
			/* process the parent's maps too */
			rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
481
						process, machine, mmap_data, proc_map_timeout);
482 483
			if (rc)
				break;
484
		}
485 486 487
	}

	closedir(tasks);
488
	return rc;
489 490
}

491
int perf_event__synthesize_thread_map(struct perf_tool *tool,
492
				      struct thread_map *threads,
493
				      perf_event__handler_t process,
494
				      struct machine *machine,
495 496
				      bool mmap_data,
				      unsigned int proc_map_timeout)
497
{
498
	union perf_event *comm_event, *mmap_event, *fork_event;
499
	int err = -1, thread, j;
500

501
	comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
502 503 504
	if (comm_event == NULL)
		goto out;

505
	mmap_event = malloc(sizeof(mmap_event->mmap) + machine->id_hdr_size);
506 507 508
	if (mmap_event == NULL)
		goto out_free_comm;

509 510 511 512
	fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
	if (fork_event == NULL)
		goto out_free_mmap;

513 514 515
	err = 0;
	for (thread = 0; thread < threads->nr; ++thread) {
		if (__event__synthesize_thread(comm_event, mmap_event,
516
					       fork_event,
517
					       thread_map__pid(threads, thread), 0,
518
					       process, tool, machine,
519
					       mmap_data, proc_map_timeout)) {
520 521 522
			err = -1;
			break;
		}
523 524 525 526 527

		/*
		 * comm.pid is set to thread group id by
		 * perf_event__synthesize_comm
		 */
528
		if ((int) comm_event->comm.pid != thread_map__pid(threads, thread)) {
529 530 531 532
			bool need_leader = true;

			/* is thread group leader in thread_map? */
			for (j = 0; j < threads->nr; ++j) {
533
				if ((int) comm_event->comm.pid == thread_map__pid(threads, j)) {
534 535 536 537 538 539 540
					need_leader = false;
					break;
				}
			}

			/* if not, generate events for it */
			if (need_leader &&
541
			    __event__synthesize_thread(comm_event, mmap_event,
542
						       fork_event,
543 544
						       comm_event->comm.pid, 0,
						       process, tool, machine,
545
						       mmap_data, proc_map_timeout)) {
546 547 548 549
				err = -1;
				break;
			}
		}
550
	}
551 552
	free(fork_event);
out_free_mmap:
553 554 555 556 557 558 559
	free(mmap_event);
out_free_comm:
	free(comm_event);
out:
	return err;
}

560
int perf_event__synthesize_threads(struct perf_tool *tool,
561
				   perf_event__handler_t process,
562 563 564
				   struct machine *machine,
				   bool mmap_data,
				   unsigned int proc_map_timeout)
565 566
{
	DIR *proc;
567
	char proc_path[PATH_MAX];
568
	struct dirent dirent, *next;
569
	union perf_event *comm_event, *mmap_event, *fork_event;
570 571
	int err = -1;

572 573 574
	if (machine__is_default_guest(machine))
		return 0;

575
	comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
576 577 578
	if (comm_event == NULL)
		goto out;

579
	mmap_event = malloc(sizeof(mmap_event->mmap) + machine->id_hdr_size);
580 581
	if (mmap_event == NULL)
		goto out_free_comm;
582

583 584 585 586
	fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
	if (fork_event == NULL)
		goto out_free_mmap;

587 588 589
	snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
	proc = opendir(proc_path);

590
	if (proc == NULL)
591
		goto out_free_fork;
592 593 594 595 596 597 598

	while (!readdir_r(proc, &dirent, &next) && next) {
		char *end;
		pid_t pid = strtol(dirent.d_name, &end, 10);

		if (*end) /* only interested in proper numerical dirents */
			continue;
599 600 601 602
		/*
 		 * We may race with exiting thread, so don't stop just because
 		 * one thread couldn't be synthesized.
 		 */
603
		__event__synthesize_thread(comm_event, mmap_event, fork_event, pid,
604 605
					   1, process, tool, machine, mmap_data,
					   proc_map_timeout);
606 607
	}

608
	err = 0;
609
	closedir(proc);
610 611
out_free_fork:
	free(fork_event);
612 613 614 615 616 617
out_free_mmap:
	free(mmap_event);
out_free_comm:
	free(comm_event);
out:
	return err;
618
}
619

620 621 622 623 624
struct process_symbol_args {
	const char *name;
	u64	   start;
};

625
static int find_symbol_cb(void *arg, const char *name, char type,
626
			  u64 start)
627 628 629
{
	struct process_symbol_args *args = arg;

630 631 632 633 634 635
	/*
	 * Must be a function or at least an alias, as in PARISC64, where "_text" is
	 * an 'A' to the same address as "_stext".
	 */
	if (!(symbol_type__is_a(type, MAP__FUNCTION) ||
	      type == 'A') || strcmp(name, args->name))
636 637 638 639 640 641
		return 0;

	args->start = start;
	return 1;
}

642 643 644 645 646 647 648 649 650 651 652
u64 kallsyms__get_function_start(const char *kallsyms_filename,
				 const char *symbol_name)
{
	struct process_symbol_args args = { .name = symbol_name, };

	if (kallsyms__parse(kallsyms_filename, &args, find_symbol_cb) <= 0)
		return 0;

	return args.start;
}

653
int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
654
				       perf_event__handler_t process,
655
				       struct machine *machine)
656 657
{
	size_t size;
658
	const char *mmap_name;
659
	char name_buff[PATH_MAX];
660
	struct map *map = machine__kernel_map(machine);
661
	struct kmap *kmap;
662
	int err;
663 664
	union perf_event *event;

665
	if (map == NULL)
666 667
		return -1;

668 669 670 671 672
	/*
	 * We should get this from /sys/kernel/sections/.text, but till that is
	 * available use this, and after it is use this as a fallback for older
	 * kernels.
	 */
673
	event = zalloc((sizeof(event->mmap) + machine->id_hdr_size));
674 675 676 677 678
	if (event == NULL) {
		pr_debug("Not enough memory synthesizing mmap event "
			 "for kernel modules\n");
		return -1;
	}
679

680
	mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff));
681
	if (machine__is_host(machine)) {
682 683 684 685
		/*
		 * kernel uses PERF_RECORD_MISC_USER for user space maps,
		 * see kernel/perf_event.c __perf_event_mmap
		 */
686
		event->header.misc = PERF_RECORD_MISC_KERNEL;
687
	} else {
688
		event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
689
	}
690

691
	kmap = map__kmap(map);
692
	size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
693
			"%s%s", mmap_name, kmap->ref_reloc_sym->name) + 1;
694
	size = PERF_ALIGN(size, sizeof(u64));
695 696
	event->mmap.header.type = PERF_RECORD_MMAP;
	event->mmap.header.size = (sizeof(event->mmap) -
697
			(sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
698
	event->mmap.pgoff = kmap->ref_reloc_sym->addr;
699 700 701 702
	event->mmap.start = map->start;
	event->mmap.len   = map->end - event->mmap.start;
	event->mmap.pid   = machine->pid;

703
	err = process(tool, event, &synth_sample, machine);
704 705 706
	free(event);

	return err;
707 708
}

709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744
int perf_event__synthesize_thread_map2(struct perf_tool *tool,
				      struct thread_map *threads,
				      perf_event__handler_t process,
				      struct machine *machine)
{
	union perf_event *event;
	int i, err, size;

	size  = sizeof(event->thread_map);
	size +=	threads->nr * sizeof(event->thread_map.entries[0]);

	event = zalloc(size);
	if (!event)
		return -ENOMEM;

	event->header.type = PERF_RECORD_THREAD_MAP;
	event->header.size = size;
	event->thread_map.nr = threads->nr;

	for (i = 0; i < threads->nr; i++) {
		struct thread_map_event_entry *entry = &event->thread_map.entries[i];
		char *comm = thread_map__comm(threads, i);

		if (!comm)
			comm = (char *) "";

		entry->pid = thread_map__pid(threads, i);
		strncpy((char *) &entry->comm, comm, sizeof(entry->comm));
	}

	err = process(tool, event, NULL, machine);

	free(event);
	return err;
}

745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875
static void synthesize_cpus(struct cpu_map_entries *cpus,
			    struct cpu_map *map)
{
	int i;

	cpus->nr = map->nr;

	for (i = 0; i < map->nr; i++)
		cpus->cpu[i] = map->map[i];
}

static void synthesize_mask(struct cpu_map_mask *mask,
			    struct cpu_map *map, int max)
{
	int i;

	mask->nr = BITS_TO_LONGS(max);
	mask->long_size = sizeof(long);

	for (i = 0; i < map->nr; i++)
		set_bit(map->map[i], mask->mask);
}

static size_t cpus_size(struct cpu_map *map)
{
	return sizeof(struct cpu_map_entries) + map->nr * sizeof(u16);
}

static size_t mask_size(struct cpu_map *map, int *max)
{
	int i;

	*max = 0;

	for (i = 0; i < map->nr; i++) {
		/* bit possition of the cpu is + 1 */
		int bit = map->map[i] + 1;

		if (bit > *max)
			*max = bit;
	}

	return sizeof(struct cpu_map_mask) + BITS_TO_LONGS(*max) * sizeof(long);
}

void *cpu_map_data__alloc(struct cpu_map *map, size_t *size, u16 *type, int *max)
{
	size_t size_cpus, size_mask;
	bool is_dummy = cpu_map__empty(map);

	/*
	 * Both array and mask data have variable size based
	 * on the number of cpus and their actual values.
	 * The size of the 'struct cpu_map_data' is:
	 *
	 *   array = size of 'struct cpu_map_entries' +
	 *           number of cpus * sizeof(u64)
	 *
	 *   mask  = size of 'struct cpu_map_mask' +
	 *           maximum cpu bit converted to size of longs
	 *
	 * and finaly + the size of 'struct cpu_map_data'.
	 */
	size_cpus = cpus_size(map);
	size_mask = mask_size(map, max);

	if (is_dummy || (size_cpus < size_mask)) {
		*size += size_cpus;
		*type  = PERF_CPU_MAP__CPUS;
	} else {
		*size += size_mask;
		*type  = PERF_CPU_MAP__MASK;
	}

	*size += sizeof(struct cpu_map_data);
	return zalloc(*size);
}

void cpu_map_data__synthesize(struct cpu_map_data *data, struct cpu_map *map,
			      u16 type, int max)
{
	data->type = type;

	switch (type) {
	case PERF_CPU_MAP__CPUS:
		synthesize_cpus((struct cpu_map_entries *) data->data, map);
		break;
	case PERF_CPU_MAP__MASK:
		synthesize_mask((struct cpu_map_mask *) data->data, map, max);
	default:
		break;
	};
}

static struct cpu_map_event* cpu_map_event__new(struct cpu_map *map)
{
	size_t size = sizeof(struct cpu_map_event);
	struct cpu_map_event *event;
	int max;
	u16 type;

	event = cpu_map_data__alloc(map, &size, &type, &max);
	if (!event)
		return NULL;

	event->header.type = PERF_RECORD_CPU_MAP;
	event->header.size = size;
	event->data.type   = type;

	cpu_map_data__synthesize(&event->data, map, type, max);
	return event;
}

int perf_event__synthesize_cpu_map(struct perf_tool *tool,
				   struct cpu_map *map,
				   perf_event__handler_t process,
				   struct machine *machine)
{
	struct cpu_map_event *event;
	int err;

	event = cpu_map_event__new(map);
	if (!event)
		return -ENOMEM;

	err = process(tool, (union perf_event *) event, NULL, machine);

	free(event);
	return err;
}

876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913
int perf_event__synthesize_stat_config(struct perf_tool *tool,
				       struct perf_stat_config *config,
				       perf_event__handler_t process,
				       struct machine *machine)
{
	struct stat_config_event *event;
	int size, i = 0, err;

	size  = sizeof(*event);
	size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0]));

	event = zalloc(size);
	if (!event)
		return -ENOMEM;

	event->header.type = PERF_RECORD_STAT_CONFIG;
	event->header.size = size;
	event->nr          = PERF_STAT_CONFIG_TERM__MAX;

#define ADD(__term, __val)					\
	event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term;	\
	event->data[i].val = __val;				\
	i++;

	ADD(AGGR_MODE,	config->aggr_mode)
	ADD(INTERVAL,	config->interval)
	ADD(SCALE,	config->scale)

	WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX,
		  "stat config terms unbalanced\n");
#undef ADD

	err = process(tool, (union perf_event *) event, NULL, machine);

	free(event);
	return err;
}

914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935
int perf_event__synthesize_stat(struct perf_tool *tool,
				u32 cpu, u32 thread, u64 id,
				struct perf_counts_values *count,
				perf_event__handler_t process,
				struct machine *machine)
{
	struct stat_event event;

	event.header.type = PERF_RECORD_STAT;
	event.header.size = sizeof(event);
	event.header.misc = 0;

	event.id        = id;
	event.cpu       = cpu;
	event.thread    = thread;
	event.val       = count->val;
	event.ena       = count->ena;
	event.run       = count->run;

	return process(tool, (union perf_event *) &event, NULL, machine);
}

936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959
void perf_event__read_stat_config(struct perf_stat_config *config,
				  struct stat_config_event *event)
{
	unsigned i;

	for (i = 0; i < event->nr; i++) {

		switch (event->data[i].tag) {
#define CASE(__term, __val)					\
		case PERF_STAT_CONFIG_TERM__##__term:		\
			config->__val = event->data[i].val;	\
			break;

		CASE(AGGR_MODE, aggr_mode)
		CASE(SCALE,     scale)
		CASE(INTERVAL,  interval)
#undef CASE
		default:
			pr_warning("unknown stat config term %" PRIu64 "\n",
				   event->data[i].tag);
		}
	}
}

960 961
size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp)
{
962 963 964 965 966 967 968
	const char *s;

	if (event->header.misc & PERF_RECORD_MISC_COMM_EXEC)
		s = " exec";
	else
		s = "";

969
	return fprintf(fp, "%s: %s:%d/%d\n", s, event->comm.comm, event->comm.pid, event->comm.tid);
970 971
}

972
int perf_event__process_comm(struct perf_tool *tool __maybe_unused,
973
			     union perf_event *event,
974
			     struct perf_sample *sample,
975
			     struct machine *machine)
976
{
977
	return machine__process_comm_event(machine, event, sample);
978 979
}

980
int perf_event__process_lost(struct perf_tool *tool __maybe_unused,
981
			     union perf_event *event,
982
			     struct perf_sample *sample,
983
			     struct machine *machine)
984
{
985
	return machine__process_lost_event(machine, event, sample);
986
}
987

988 989 990 991 992 993 994 995
int perf_event__process_aux(struct perf_tool *tool __maybe_unused,
			    union perf_event *event,
			    struct perf_sample *sample __maybe_unused,
			    struct machine *machine)
{
	return machine__process_aux_event(machine, event);
}

996 997 998 999 1000 1001 1002 1003
int perf_event__process_itrace_start(struct perf_tool *tool __maybe_unused,
				     union perf_event *event,
				     struct perf_sample *sample __maybe_unused,
				     struct machine *machine)
{
	return machine__process_itrace_start_event(machine, event);
}

1004 1005 1006 1007 1008 1009 1010 1011
int perf_event__process_lost_samples(struct perf_tool *tool __maybe_unused,
				     union perf_event *event,
				     struct perf_sample *sample,
				     struct machine *machine)
{
	return machine__process_lost_samples_event(machine, event, sample);
}

1012 1013 1014 1015 1016 1017 1018 1019
int perf_event__process_switch(struct perf_tool *tool __maybe_unused,
			       union perf_event *event,
			       struct perf_sample *sample __maybe_unused,
			       struct machine *machine)
{
	return machine__process_switch_event(machine, event);
}

1020 1021
size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
{
1022
	return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %c %s\n",
1023
		       event->mmap.pid, event->mmap.tid, event->mmap.start,
1024 1025 1026
		       event->mmap.len, event->mmap.pgoff,
		       (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x',
		       event->mmap.filename);
1027 1028
}

1029 1030 1031
size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp)
{
	return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64
1032
			   " %02x:%02x %"PRIu64" %"PRIu64"]: %c%c%c%c %s\n",
1033 1034 1035 1036
		       event->mmap2.pid, event->mmap2.tid, event->mmap2.start,
		       event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj,
		       event->mmap2.min, event->mmap2.ino,
		       event->mmap2.ino_generation,
1037 1038 1039 1040
		       (event->mmap2.prot & PROT_READ) ? 'r' : '-',
		       (event->mmap2.prot & PROT_WRITE) ? 'w' : '-',
		       (event->mmap2.prot & PROT_EXEC) ? 'x' : '-',
		       (event->mmap2.flags & MAP_SHARED) ? 's' : 'p',
1041 1042 1043
		       event->mmap2.filename);
}

1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059
size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp)
{
	struct thread_map *threads = thread_map__new_event(&event->thread_map);
	size_t ret;

	ret = fprintf(fp, " nr: ");

	if (threads)
		ret += thread_map__fprintf(threads, fp);
	else
		ret += fprintf(fp, "failed to get threads from event\n");

	thread_map__put(threads);
	return ret;
}

1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075
size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp)
{
	struct cpu_map *cpus = cpu_map__new_data(&event->cpu_map.data);
	size_t ret;

	ret = fprintf(fp, " nr: ");

	if (cpus)
		ret += cpu_map__fprintf(cpus, fp);
	else
		ret += fprintf(fp, "failed to get cpumap from event\n");

	cpu_map__put(cpus);
	return ret;
}

1076
int perf_event__process_mmap(struct perf_tool *tool __maybe_unused,
1077
			     union perf_event *event,
1078
			     struct perf_sample *sample,
1079
			     struct machine *machine)
1080
{
1081
	return machine__process_mmap_event(machine, event, sample);
1082 1083
}

1084 1085
int perf_event__process_mmap2(struct perf_tool *tool __maybe_unused,
			     union perf_event *event,
1086
			     struct perf_sample *sample,
1087 1088
			     struct machine *machine)
{
1089
	return machine__process_mmap2_event(machine, event, sample);
1090 1091
}

1092 1093 1094 1095 1096 1097 1098
size_t perf_event__fprintf_task(union perf_event *event, FILE *fp)
{
	return fprintf(fp, "(%d:%d):(%d:%d)\n",
		       event->fork.pid, event->fork.tid,
		       event->fork.ppid, event->fork.ptid);
}

1099
int perf_event__process_fork(struct perf_tool *tool __maybe_unused,
1100
			     union perf_event *event,
1101
			     struct perf_sample *sample,
1102
			     struct machine *machine)
1103
{
1104
	return machine__process_fork_event(machine, event, sample);
1105
}
1106

1107 1108
int perf_event__process_exit(struct perf_tool *tool __maybe_unused,
			     union perf_event *event,
1109
			     struct perf_sample *sample,
1110 1111
			     struct machine *machine)
{
1112
	return machine__process_exit_event(machine, event, sample);
1113 1114
}

1115 1116 1117 1118 1119 1120 1121 1122 1123
size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp)
{
	return fprintf(fp, " offset: %#"PRIx64" size: %#"PRIx64" flags: %#"PRIx64" [%s%s]\n",
		       event->aux.aux_offset, event->aux.aux_size,
		       event->aux.flags,
		       event->aux.flags & PERF_AUX_FLAG_TRUNCATED ? "T" : "",
		       event->aux.flags & PERF_AUX_FLAG_OVERWRITE ? "O" : "");
}

1124 1125 1126 1127 1128 1129
size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp)
{
	return fprintf(fp, " pid: %u tid: %u\n",
		       event->itrace_start.pid, event->itrace_start.tid);
}

1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143
size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp)
{
	bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
	const char *in_out = out ? "OUT" : "IN ";

	if (event->header.type == PERF_RECORD_SWITCH)
		return fprintf(fp, " %s\n", in_out);

	return fprintf(fp, " %s  %s pid/tid: %5u/%-5u\n",
		       in_out, out ? "next" : "prev",
		       event->context_switch.next_prev_pid,
		       event->context_switch.next_prev_tid);
}

1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159
size_t perf_event__fprintf(union perf_event *event, FILE *fp)
{
	size_t ret = fprintf(fp, "PERF_RECORD_%s",
			     perf_event__name(event->header.type));

	switch (event->header.type) {
	case PERF_RECORD_COMM:
		ret += perf_event__fprintf_comm(event, fp);
		break;
	case PERF_RECORD_FORK:
	case PERF_RECORD_EXIT:
		ret += perf_event__fprintf_task(event, fp);
		break;
	case PERF_RECORD_MMAP:
		ret += perf_event__fprintf_mmap(event, fp);
		break;
1160 1161 1162
	case PERF_RECORD_MMAP2:
		ret += perf_event__fprintf_mmap2(event, fp);
		break;
1163 1164 1165
	case PERF_RECORD_AUX:
		ret += perf_event__fprintf_aux(event, fp);
		break;
1166 1167 1168
	case PERF_RECORD_ITRACE_START:
		ret += perf_event__fprintf_itrace_start(event, fp);
		break;
1169 1170 1171 1172
	case PERF_RECORD_SWITCH:
	case PERF_RECORD_SWITCH_CPU_WIDE:
		ret += perf_event__fprintf_switch(event, fp);
		break;
1173 1174 1175 1176 1177 1178 1179
	default:
		ret += fprintf(fp, "\n");
	}

	return ret;
}

1180 1181
int perf_event__process(struct perf_tool *tool __maybe_unused,
			union perf_event *event,
1182
			struct perf_sample *sample,
1183
			struct machine *machine)
1184
{
1185
	return machine__process_event(machine, event, sample);
1186 1187
}

1188
void thread__find_addr_map(struct thread *thread, u8 cpumode,
1189
			   enum map_type type, u64 addr,
1190
			   struct addr_location *al)
1191
{
1192
	struct map_groups *mg = thread->mg;
1193
	struct machine *machine = mg->machine;
1194
	bool load_map = false;
1195

1196
	al->machine = machine;
1197
	al->thread = thread;
1198
	al->addr = addr;
1199
	al->cpumode = cpumode;
1200
	al->filtered = 0;
1201

1202 1203 1204 1205 1206
	if (machine == NULL) {
		al->map = NULL;
		return;
	}

1207
	if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
1208
		al->level = 'k';
1209
		mg = &machine->kmaps;
1210
		load_map = true;
1211
	} else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
1212
		al->level = '.';
1213 1214
	} else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
		al->level = 'g';
1215
		mg = &machine->kmaps;
1216
		load_map = true;
1217 1218
	} else if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) {
		al->level = 'u';
1219
	} else {
1220
		al->level = 'H';
1221
		al->map = NULL;
1222 1223 1224 1225

		if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
			cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
			!perf_guest)
1226
			al->filtered |= (1 << HIST_FILTER__GUEST);
1227 1228 1229
		if ((cpumode == PERF_RECORD_MISC_USER ||
			cpumode == PERF_RECORD_MISC_KERNEL) &&
			!perf_host)
1230
			al->filtered |= (1 << HIST_FILTER__HOST);
1231

1232 1233 1234
		return;
	}
try_again:
1235
	al->map = map_groups__find(mg, type, al->addr);
1236 1237 1238 1239 1240 1241 1242 1243 1244 1245
	if (al->map == NULL) {
		/*
		 * If this is outside of all known maps, and is a negative
		 * address, try to look it up in the kernel dso, as it might be
		 * a vsyscall or vdso (which executes in user-mode).
		 *
		 * XXX This is nasty, we should have a symbol list in the
		 * "[vdso]" dso, but for now lets use the old trick of looking
		 * in the whole kernel symbol list.
		 */
1246 1247 1248
		if (cpumode == PERF_RECORD_MISC_USER && machine &&
		    mg != &machine->kmaps &&
		    machine__kernel_ip(machine, al->addr)) {
1249
			mg = &machine->kmaps;
1250
			load_map = true;
1251 1252
			goto try_again;
		}
1253 1254 1255 1256 1257 1258
	} else {
		/*
		 * Kernel maps might be changed when loading symbols so loading
		 * must be done prior to using kernel maps.
		 */
		if (load_map)
1259
			map__load(al->map, machine->symbol_filter);
1260
		al->addr = al->map->map_ip(al->map, al->addr);
1261
	}
1262 1263
}

1264
void thread__find_addr_location(struct thread *thread,
1265
				u8 cpumode, enum map_type type, u64 addr,
1266
				struct addr_location *al)
1267
{
1268
	thread__find_addr_map(thread, cpumode, type, addr, al);
1269
	if (al->map != NULL)
1270
		al->sym = map__find_symbol(al->map, al->addr,
1271
					   thread->mg->machine->symbol_filter);
1272 1273
	else
		al->sym = NULL;
1274 1275
}

1276 1277 1278 1279
/*
 * Callers need to drop the reference to al->thread, obtained in
 * machine__findnew_thread()
 */
1280
int perf_event__preprocess_sample(const union perf_event *event,
1281
				  struct machine *machine,
1282
				  struct addr_location *al,
1283
				  struct perf_sample *sample)
1284
{
1285
	u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1286
	struct thread *thread = machine__findnew_thread(machine, sample->pid,
1287
							sample->tid);
1288

1289 1290 1291
	if (thread == NULL)
		return -1;

1292
	dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
1293
	/*
1294
	 * Have we already created the kernel maps for this machine?
1295 1296 1297 1298 1299 1300
	 *
	 * This should have happened earlier, when we processed the kernel MMAP
	 * events, but for older perf.data files there was no such thing, so do
	 * it now.
	 */
	if (cpumode == PERF_RECORD_MISC_KERNEL &&
1301
	    machine__kernel_map(machine) == NULL)
1302
		machine__create_kernel_maps(machine);
1303

1304
	thread__find_addr_map(thread, cpumode, MAP__FUNCTION, sample->ip, al);
1305 1306 1307
	dump_printf(" ...... dso: %s\n",
		    al->map ? al->map->dso->long_name :
			al->level == 'H' ? "[hypervisor]" : "<not found>");
1308 1309 1310 1311

	if (thread__is_filtered(thread))
		al->filtered |= (1 << HIST_FILTER__THREAD);

1312
	al->sym = NULL;
1313
	al->cpu = sample->cpu;
1314 1315 1316 1317 1318 1319 1320 1321
	al->socket = -1;

	if (al->cpu >= 0) {
		struct perf_env *env = machine->env;

		if (env && env->cpu)
			al->socket = env->cpu[al->cpu].socket_id;
	}
1322 1323

	if (al->map) {
1324 1325
		struct dso *dso = al->map->dso;

1326
		if (symbol_conf.dso_list &&
1327 1328 1329 1330
		    (!dso || !(strlist__has_entry(symbol_conf.dso_list,
						  dso->short_name) ||
			       (dso->short_name != dso->long_name &&
				strlist__has_entry(symbol_conf.dso_list,
1331 1332 1333
						   dso->long_name))))) {
			al->filtered |= (1 << HIST_FILTER__DSO);
		}
1334

1335 1336
		al->sym = map__find_symbol(al->map, al->addr,
					   machine->symbol_filter);
1337
	}
1338

1339 1340
	if (symbol_conf.sym_list &&
		(!al->sym || !strlist__has_entry(symbol_conf.sym_list,
1341 1342 1343
						al->sym->name))) {
		al->filtered |= (1 << HIST_FILTER__SYMBOL);
	}
1344 1345

	return 0;
1346
}
1347

1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358
/*
 * The preprocess_sample method will return with reference counts for the
 * in it, when done using (and perhaps getting ref counts if needing to
 * keep a pointer to one of those entries) it must be paired with
 * addr_location__put(), so that the refcounts can be decremented.
 */
void addr_location__put(struct addr_location *al)
{
	thread__zput(al->thread);
}

1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386
bool is_bts_event(struct perf_event_attr *attr)
{
	return attr->type == PERF_TYPE_HARDWARE &&
	       (attr->config & PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
	       attr->sample_period == 1;
}

bool sample_addr_correlates_sym(struct perf_event_attr *attr)
{
	if (attr->type == PERF_TYPE_SOFTWARE &&
	    (attr->config == PERF_COUNT_SW_PAGE_FAULTS ||
	     attr->config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
	     attr->config == PERF_COUNT_SW_PAGE_FAULTS_MAJ))
		return true;

	if (is_bts_event(attr))
		return true;

	return false;
}

void perf_event__preprocess_sample_addr(union perf_event *event,
					struct perf_sample *sample,
					struct thread *thread,
					struct addr_location *al)
{
	u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;

1387
	thread__find_addr_map(thread, cpumode, MAP__FUNCTION, sample->addr, al);
1388
	if (!al->map)
1389
		thread__find_addr_map(thread, cpumode, MAP__VARIABLE,
1390 1391 1392 1393 1394 1395 1396 1397
				      sample->addr, al);

	al->cpu = sample->cpu;
	al->sym = NULL;

	if (al->map)
		al->sym = map__find_symbol(al->map, al->addr, NULL);
}