event.c 19.4 KB
Newer Older
1 2 3
#include <linux/types.h>
#include "event.h"
#include "debug.h"
4
#include "session.h"
5
#include "sort.h"
6
#include "string.h"
7
#include "strlist.h"
8
#include "thread.h"
9

10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
const char *event__name[] = {
	[0]			 = "TOTAL",
	[PERF_RECORD_MMAP]	 = "MMAP",
	[PERF_RECORD_LOST]	 = "LOST",
	[PERF_RECORD_COMM]	 = "COMM",
	[PERF_RECORD_EXIT]	 = "EXIT",
	[PERF_RECORD_THROTTLE]	 = "THROTTLE",
	[PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE",
	[PERF_RECORD_FORK]	 = "FORK",
	[PERF_RECORD_READ]	 = "READ",
	[PERF_RECORD_SAMPLE]	 = "SAMPLE",
	[PERF_RECORD_HEADER_ATTR]	 = "ATTR",
	[PERF_RECORD_HEADER_EVENT_TYPE]	 = "EVENT_TYPE",
	[PERF_RECORD_HEADER_TRACING_DATA]	 = "TRACING_DATA",
	[PERF_RECORD_HEADER_BUILD_ID]	 = "BUILD_ID",
};

27
static pid_t event__synthesize_comm(pid_t pid, int full,
28
				    event__handler_t process,
29
				    struct perf_session *session)
30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
{
	event_t ev;
	char filename[PATH_MAX];
	char bf[BUFSIZ];
	FILE *fp;
	size_t size = 0;
	DIR *tasks;
	struct dirent dirent, *next;
	pid_t tgid = 0;

	snprintf(filename, sizeof(filename), "/proc/%d/status", pid);

	fp = fopen(filename, "r");
	if (fp == NULL) {
out_race:
		/*
		 * We raced with a task exiting - just return:
		 */
		pr_debug("couldn't open %s\n", filename);
		return 0;
	}

	memset(&ev.comm, 0, sizeof(ev.comm));
	while (!ev.comm.comm[0] || !ev.comm.pid) {
		if (fgets(bf, sizeof(bf), fp) == NULL)
			goto out_failure;

		if (memcmp(bf, "Name:", 5) == 0) {
			char *name = bf + 5;
			while (*name && isspace(*name))
				++name;
			size = strlen(name) - 1;
			memcpy(ev.comm.comm, name, size++);
		} else if (memcmp(bf, "Tgid:", 5) == 0) {
			char *tgids = bf + 5;
			while (*tgids && isspace(*tgids))
				++tgids;
			tgid = ev.comm.pid = atoi(tgids);
		}
	}

	ev.comm.header.type = PERF_RECORD_COMM;
	size = ALIGN(size, sizeof(u64));
	ev.comm.header.size = sizeof(ev.comm) - (sizeof(ev.comm.comm) - size);

	if (!full) {
		ev.comm.tid = pid;

78
		process(&ev, session);
79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95
		goto out_fclose;
	}

	snprintf(filename, sizeof(filename), "/proc/%d/task", pid);

	tasks = opendir(filename);
	if (tasks == NULL)
		goto out_race;

	while (!readdir_r(tasks, &dirent, &next) && next) {
		char *end;
		pid = strtol(dirent.d_name, &end, 10);
		if (*end)
			continue;

		ev.comm.tid = pid;

96
		process(&ev, session);
97 98 99 100 101 102 103 104 105 106 107 108 109
	}
	closedir(tasks);

out_fclose:
	fclose(fp);
	return tgid;

out_failure:
	pr_warning("couldn't get COMM and pgid, malformed %s\n", filename);
	return -1;
}

static int event__synthesize_mmap_events(pid_t pid, pid_t tgid,
110
					 event__handler_t process,
111
					 struct perf_session *session)
112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
{
	char filename[PATH_MAX];
	FILE *fp;

	snprintf(filename, sizeof(filename), "/proc/%d/maps", pid);

	fp = fopen(filename, "r");
	if (fp == NULL) {
		/*
		 * We raced with a task exiting - just return:
		 */
		pr_debug("couldn't open %s\n", filename);
		return -1;
	}

	while (1) {
		char bf[BUFSIZ], *pbf = bf;
		event_t ev = {
130 131
			.header = {
				.type = PERF_RECORD_MMAP,
132 133 134 135 136
				/*
				 * Just like the kernel, see __perf_event_mmap
				 * in kernel/perf_event.c
				 */
				.misc = PERF_RECORD_MISC_USER,
137
			 },
138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153
		};
		int n;
		size_t size;
		if (fgets(bf, sizeof(bf), fp) == NULL)
			break;

		/* 00400000-0040c000 r-xp 00000000 fd:01 41038  /bin/cat */
		n = hex2u64(pbf, &ev.mmap.start);
		if (n < 0)
			continue;
		pbf += n + 1;
		n = hex2u64(pbf, &ev.mmap.len);
		if (n < 0)
			continue;
		pbf += n + 3;
		if (*pbf == 'x') { /* vm_exec */
154
			u64 vm_pgoff;
155 156 157 158 159 160 161 162 163
			char *execname = strchr(bf, '/');

			/* Catch VDSO */
			if (execname == NULL)
				execname = strstr(bf, "[vdso]");

			if (execname == NULL)
				continue;

164 165 166 167 168 169 170 171
			pbf += 3;
			n = hex2u64(pbf, &vm_pgoff);
			/* pgoff is in bytes, not pages */
			if (n >= 0)
				ev.mmap.pgoff = vm_pgoff << getpagesize();
			else
				ev.mmap.pgoff = 0;

172 173 174 175 176 177 178 179 180 181
			size = strlen(execname);
			execname[size - 1] = '\0'; /* Remove \n */
			memcpy(ev.mmap.filename, execname, size);
			size = ALIGN(size, sizeof(u64));
			ev.mmap.len -= ev.mmap.start;
			ev.mmap.header.size = (sizeof(ev.mmap) -
					       (sizeof(ev.mmap.filename) - size));
			ev.mmap.pid = tgid;
			ev.mmap.tid = pid;

182
			process(&ev, session);
183 184 185 186 187 188 189
		}
	}

	fclose(fp);
	return 0;
}

190
int event__synthesize_modules(event__handler_t process,
191
			      struct perf_session *session,
192
			      struct machine *machine)
193 194
{
	struct rb_node *nd;
195
	struct map_groups *kmaps = &machine->kmaps;
196
	u16 misc;
197

198 199 200 201
	/*
	 * kernel uses 0 for user space maps, see kernel/perf_event.c
	 * __perf_event_mmap
	 */
202
	if (machine__is_host(machine))
203 204 205 206 207
		misc = PERF_RECORD_MISC_KERNEL;
	else
		misc = PERF_RECORD_MISC_GUEST_KERNEL;

	for (nd = rb_first(&kmaps->maps[MAP__FUNCTION]);
208 209 210 211 212 213 214 215 216 217
	     nd; nd = rb_next(nd)) {
		event_t ev;
		size_t size;
		struct map *pos = rb_entry(nd, struct map, rb_node);

		if (pos->dso->kernel)
			continue;

		size = ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
		memset(&ev, 0, sizeof(ev));
218
		ev.mmap.header.misc = misc;
219 220 221 222 223
		ev.mmap.header.type = PERF_RECORD_MMAP;
		ev.mmap.header.size = (sizeof(ev.mmap) -
				        (sizeof(ev.mmap.filename) - size));
		ev.mmap.start = pos->start;
		ev.mmap.len   = pos->end - pos->start;
224
		ev.mmap.pid   = machine->pid;
225 226 227 228 229 230 231 232 233

		memcpy(ev.mmap.filename, pos->dso->long_name,
		       pos->dso->long_name_len + 1);
		process(&ev, session);
	}

	return 0;
}

234
int event__synthesize_thread(pid_t pid, event__handler_t process,
235
			     struct perf_session *session)
236
{
237
	pid_t tgid = event__synthesize_comm(pid, 1, process, session);
238 239
	if (tgid == -1)
		return -1;
240
	return event__synthesize_mmap_events(pid, tgid, process, session);
241 242
}

243
void event__synthesize_threads(event__handler_t process,
244
			       struct perf_session *session)
245 246 247 248 249 250 251 252 253 254 255 256 257
{
	DIR *proc;
	struct dirent dirent, *next;

	proc = opendir("/proc");

	while (!readdir_r(proc, &dirent, &next) && next) {
		char *end;
		pid_t pid = strtol(dirent.d_name, &end, 10);

		if (*end) /* only interested in proper numerical dirents */
			continue;

258
		event__synthesize_thread(pid, process, session);
259 260 261 262
	}

	closedir(proc);
}
263

264 265 266 267 268 269 270 271 272
struct process_symbol_args {
	const char *name;
	u64	   start;
};

static int find_symbol_cb(void *arg, const char *name, char type, u64 start)
{
	struct process_symbol_args *args = arg;

273 274 275 276 277 278
	/*
	 * Must be a function or at least an alias, as in PARISC64, where "_text" is
	 * an 'A' to the same address as "_stext".
	 */
	if (!(symbol_type__is_a(type, MAP__FUNCTION) ||
	      type == 'A') || strcmp(name, args->name))
279 280 281 282 283 284
		return 0;

	args->start = start;
	return 1;
}

285
int event__synthesize_kernel_mmap(event__handler_t process,
286
				  struct perf_session *session,
287
				  struct machine *machine,
288 289 290
				  const char *symbol_name)
{
	size_t size;
291 292 293 294 295
	const char *filename, *mmap_name;
	char path[PATH_MAX];
	char name_buff[PATH_MAX];
	struct map *map;

296
	event_t ev = {
297 298 299
		.header = {
			.type = PERF_RECORD_MMAP,
		},
300 301 302 303 304 305 306 307
	};
	/*
	 * We should get this from /sys/kernel/sections/.text, but till that is
	 * available use this, and after it is use this as a fallback for older
	 * kernels.
	 */
	struct process_symbol_args args = { .name = symbol_name, };

308
	mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff));
309
	if (machine__is_host(machine)) {
310 311 312 313 314 315 316 317
		/*
		 * kernel uses PERF_RECORD_MISC_USER for user space maps,
		 * see kernel/perf_event.c __perf_event_mmap
		 */
		ev.header.misc = PERF_RECORD_MISC_KERNEL;
		filename = "/proc/kallsyms";
	} else {
		ev.header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
318
		if (machine__is_default_guest(machine))
319 320
			filename = (char *) symbol_conf.default_guest_kallsyms;
		else {
321
			sprintf(path, "%s/proc/kallsyms", machine->root_dir);
322 323 324 325 326
			filename = path;
		}
	}

	if (kallsyms__parse(filename, &args, find_symbol_cb) <= 0)
327 328
		return -ENOENT;

329
	map = machine->vmlinux_maps[MAP__FUNCTION];
330
	size = snprintf(ev.mmap.filename, sizeof(ev.mmap.filename),
331
			"%s%s", mmap_name, symbol_name) + 1;
332
	size = ALIGN(size, sizeof(u64));
333 334
	ev.mmap.header.size = (sizeof(ev.mmap) -
			(sizeof(ev.mmap.filename) - size));
335
	ev.mmap.pgoff = args.start;
336 337
	ev.mmap.start = map->start;
	ev.mmap.len   = map->end - ev.mmap.start;
338
	ev.mmap.pid   = machine->pid;
339 340 341 342

	return process(&ev, session);
}

343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370
static void thread__comm_adjust(struct thread *self)
{
	char *comm = self->comm;

	if (!symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
	    (!symbol_conf.comm_list ||
	     strlist__has_entry(symbol_conf.comm_list, comm))) {
		unsigned int slen = strlen(comm);

		if (slen > comms__col_width) {
			comms__col_width = slen;
			threads__col_width = slen + 6;
		}
	}
}

static int thread__set_comm_adjust(struct thread *self, const char *comm)
{
	int ret = thread__set_comm(self, comm);

	if (ret)
		return ret;

	thread__comm_adjust(self);

	return 0;
}

371
int event__process_comm(event_t *self, struct perf_session *session)
372
{
373
	struct thread *thread = perf_session__findnew(session, self->comm.tid);
374

375
	dump_printf(": %s:%d\n", self->comm.comm, self->comm.tid);
376

377
	if (thread == NULL || thread__set_comm_adjust(thread, self->comm.comm)) {
378 379 380 381 382 383 384
		dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
		return -1;
	}

	return 0;
}

385
int event__process_lost(event_t *self, struct perf_session *session)
386 387
{
	dump_printf(": id:%Ld: lost:%Ld\n", self->lost.id, self->lost.lost);
388
	session->hists.stats.total_lost += self->lost.lost;
389 390 391
	return 0;
}

392 393 394 395 396 397 398 399 400 401 402 403 404 405
static void event_set_kernel_mmap_len(struct map **maps, event_t *self)
{
	maps[MAP__FUNCTION]->start = self->mmap.start;
	maps[MAP__FUNCTION]->end   = self->mmap.start + self->mmap.len;
	/*
	 * Be a bit paranoid here, some perf.data file came with
	 * a zero sized synthesized MMAP event for the kernel.
	 */
	if (maps[MAP__FUNCTION]->end == 0)
		maps[MAP__FUNCTION]->end = ~0UL;
}

static int event__process_kernel_mmap(event_t *self,
			struct perf_session *session)
406
{
407
	struct map *map;
408
	char kmmap_prefix[PATH_MAX];
409
	struct machine *machine;
410 411 412
	enum dso_kernel_type kernel_type;
	bool is_kernel_mmap;

413 414 415
	machine = perf_session__findnew_machine(session, self->mmap.pid);
	if (!machine) {
		pr_err("Can't find id %d's machine\n", self->mmap.pid);
416 417
		goto out_problem;
	}
418

419
	machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix));
420
	if (machine__is_host(machine))
421 422 423
		kernel_type = DSO_TYPE_KERNEL;
	else
		kernel_type = DSO_TYPE_GUEST_KERNEL;
424

425 426 427 428 429
	is_kernel_mmap = memcmp(self->mmap.filename,
				kmmap_prefix,
				strlen(kmmap_prefix)) == 0;
	if (self->mmap.filename[0] == '/' ||
	    (!is_kernel_mmap && self->mmap.filename[0] == '[')) {
430

431 432
		char short_module_name[1024];
		char *name, *dot;
433

434 435
		if (self->mmap.filename[0] == '/') {
			name = strrchr(self->mmap.filename, '/');
436 437 438 439 440 441 442 443
			if (name == NULL)
				goto out_problem;

			++name; /* skip / */
			dot = strrchr(name, '.');
			if (dot == NULL)
				goto out_problem;
			snprintf(short_module_name, sizeof(short_module_name),
444
					"[%.*s]", (int)(dot - name), name);
445
			strxfrchar(short_module_name, '-', '_');
446 447 448
		} else
			strcpy(short_module_name, self->mmap.filename);

449 450
		map = machine__new_module(machine, self->mmap.start,
					  self->mmap.filename);
451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466
		if (map == NULL)
			goto out_problem;

		name = strdup(short_module_name);
		if (name == NULL)
			goto out_problem;

		map->dso->short_name = name;
		map->end = map->start + self->mmap.len;
	} else if (is_kernel_mmap) {
		const char *symbol_name = (self->mmap.filename +
				strlen(kmmap_prefix));
		/*
		 * Should be there already, from the build-id table in
		 * the header.
		 */
467 468
		struct dso *kernel = __dsos__findnew(&machine->kernel_dsos,
						     kmmap_prefix);
469 470 471 472
		if (kernel == NULL)
			goto out_problem;

		kernel->kernel = kernel_type;
473
		if (__machine__create_kernel_maps(machine, kernel) < 0)
474 475
			goto out_problem;

476 477 478 479 480
		event_set_kernel_mmap_len(machine->vmlinux_maps, self);
		perf_session__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
							 symbol_name,
							 self->mmap.pgoff);
		if (machine__is_default_guest(machine)) {
481
			/*
482
			 * preload dso of guest kernel and modules
483
			 */
484 485
			dso__load(kernel, machine->vmlinux_maps[MAP__FUNCTION],
				  NULL);
486 487 488 489 490 491
		}
	}
	return 0;
out_problem:
	return -1;
}
492

493 494
int event__process_mmap(event_t *self, struct perf_session *session)
{
495
	struct machine *machine;
496 497 498 499
	struct thread *thread;
	struct map *map;
	u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
	int ret = 0;
500

501 502 503 504 505 506 507 508 509
	dump_printf(" %d/%d: [%#Lx(%#Lx) @ %#Lx]: %s\n",
			self->mmap.pid, self->mmap.tid, self->mmap.start,
			self->mmap.len, self->mmap.pgoff, self->mmap.filename);

	if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
	    cpumode == PERF_RECORD_MISC_KERNEL) {
		ret = event__process_kernel_mmap(self, session);
		if (ret < 0)
			goto out_problem;
510 511 512
		return 0;
	}

513
	machine = perf_session__find_host_machine(session);
514 515 516
	if (machine == NULL)
		goto out_problem;
	thread = perf_session__findnew(session, self->mmap.pid);
517
	map = map__new(&machine->user_dsos, self->mmap.start,
518 519 520
			self->mmap.len, self->mmap.pgoff,
			self->mmap.pid, self->mmap.filename,
			MAP__FUNCTION, session->cwd, session->cwdlen);
521

522
	if (thread == NULL || map == NULL)
523 524 525 526
		goto out_problem;

	thread__insert_map(thread, map);
	return 0;
527

528 529
out_problem:
	dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
530 531 532
	return 0;
}

533
int event__process_task(event_t *self, struct perf_session *session)
534
{
535 536
	struct thread *thread = perf_session__findnew(session, self->fork.tid);
	struct thread *parent = perf_session__findnew(session, self->fork.ptid);
537 538 539 540 541 542 543 544 545 546 547 548 549 550 551

	dump_printf("(%d:%d):(%d:%d)\n", self->fork.pid, self->fork.tid,
		    self->fork.ppid, self->fork.ptid);

	if (self->header.type == PERF_RECORD_EXIT)
		return 0;

	if (thread == NULL || parent == NULL ||
	    thread__fork(thread, parent) < 0) {
		dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
		return -1;
	}

	return 0;
}
552

553 554
void thread__find_addr_map(struct thread *self,
			   struct perf_session *session, u8 cpumode,
555
			   enum map_type type, pid_t pid, u64 addr,
556
			   struct addr_location *al)
557
{
558
	struct map_groups *mg = &self->mg;
559
	struct machine *machine = NULL;
560

561
	al->thread = self;
562
	al->addr = addr;
563 564
	al->cpumode = cpumode;
	al->filtered = false;
565

566
	if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
567
		al->level = 'k';
568
		machine = perf_session__find_host_machine(session);
569 570 571 572
		if (machine == NULL) {
			al->map = NULL;
			return;
		}
573
		mg = &machine->kmaps;
574
	} else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
575
		al->level = '.';
576
		machine = perf_session__find_host_machine(session);
577 578
	} else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
		al->level = 'g';
579
		machine = perf_session__find_machine(session, pid);
580
		if (machine == NULL) {
581 582 583
			al->map = NULL;
			return;
		}
584
		mg = &machine->kmaps;
585 586 587 588 589 590 591 592 593
	} else {
		/*
		 * 'u' means guest os user space.
		 * TODO: We don't support guest user space. Might support late.
		 */
		if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest)
			al->level = 'u';
		else
			al->level = 'H';
594
		al->map = NULL;
595 596 597 598 599 600 601 602 603 604

		if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
			cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
			!perf_guest)
			al->filtered = true;
		if ((cpumode == PERF_RECORD_MISC_USER ||
			cpumode == PERF_RECORD_MISC_KERNEL) &&
			!perf_host)
			al->filtered = true;

605 606 607
		return;
	}
try_again:
608
	al->map = map_groups__find(mg, type, al->addr);
609 610 611 612 613 614 615 616 617 618
	if (al->map == NULL) {
		/*
		 * If this is outside of all known maps, and is a negative
		 * address, try to look it up in the kernel dso, as it might be
		 * a vsyscall or vdso (which executes in user-mode).
		 *
		 * XXX This is nasty, we should have a symbol list in the
		 * "[vdso]" dso, but for now lets use the old trick of looking
		 * in the whole kernel symbol list.
		 */
619
		if ((long long)al->addr < 0 &&
620 621 622
		    cpumode == PERF_RECORD_MISC_KERNEL &&
		    machine && mg != &machine->kmaps) {
			mg = &machine->kmaps;
623 624
			goto try_again;
		}
625
	} else
626
		al->addr = al->map->map_ip(al->map, al->addr);
627 628 629 630
}

void thread__find_addr_location(struct thread *self,
				struct perf_session *session, u8 cpumode,
631
				enum map_type type, pid_t pid, u64 addr,
632 633 634
				struct addr_location *al,
				symbol_filter_t filter)
{
635
	thread__find_addr_map(self, session, cpumode, type, pid, addr, al);
636
	if (al->map != NULL)
637
		al->sym = map__find_symbol(al->map, al->addr, filter);
638 639
	else
		al->sym = NULL;
640 641
}

642 643 644 645 646
static void dso__calc_col_width(struct dso *self)
{
	if (!symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
	    (!symbol_conf.dso_list ||
	     strlist__has_entry(symbol_conf.dso_list, self->name))) {
647 648 649 650
		u16 slen = self->short_name_len;
		if (verbose)
			slen = self->long_name_len;
		if (dsos__col_width < slen)
651 652 653 654 655 656
			dsos__col_width = slen;
	}

	self->slen_calculated = 1;
}

657 658
int event__preprocess_sample(const event_t *self, struct perf_session *session,
			     struct addr_location *al, symbol_filter_t filter)
659 660
{
	u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
661
	struct thread *thread = perf_session__findnew(session, self->ip.pid);
662 663 664 665

	if (thread == NULL)
		return -1;

666 667 668 669
	if (symbol_conf.comm_list &&
	    !strlist__has_entry(symbol_conf.comm_list, thread->comm))
		goto out_filtered;

670
	dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
671 672 673 674 675 676 677 678 679 680
	/*
	 * Have we already created the kernel maps for the host machine?
	 *
	 * This should have happened earlier, when we processed the kernel MMAP
	 * events, but for older perf.data files there was no such thing, so do
	 * it now.
	 */
	if (cpumode == PERF_RECORD_MISC_KERNEL &&
	    session->host_machine.vmlinux_maps[MAP__FUNCTION] == NULL)
		machine__create_kernel_maps(&session->host_machine);
681

682
	thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION,
683
			      self->ip.pid, self->ip.ip, al);
684 685 686
	dump_printf(" ...... dso: %s\n",
		    al->map ? al->map->dso->long_name :
			al->level == 'H' ? "[hypervisor]" : "<not found>");
687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706
	al->sym = NULL;

	if (al->map) {
		if (symbol_conf.dso_list &&
		    (!al->map || !al->map->dso ||
		     !(strlist__has_entry(symbol_conf.dso_list,
					  al->map->dso->short_name) ||
		       (al->map->dso->short_name != al->map->dso->long_name &&
			strlist__has_entry(symbol_conf.dso_list,
					   al->map->dso->long_name)))))
			goto out_filtered;
		/*
		 * We have to do this here as we may have a dso with no symbol
		 * hit that has a name longer than the ones with symbols
		 * sampled.
		 */
		if (!sort_dso.elide && !al->map->dso->slen_calculated)
			dso__calc_col_width(al->map->dso);

		al->sym = map__find_symbol(al->map, al->addr, filter);
707 708 709 710 711 712 713
	} else {
		const unsigned int unresolved_col_width = BITS_PER_LONG / 4;

		if (dsos__col_width < unresolved_col_width &&
		    !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
		    !symbol_conf.dso_list)
			dsos__col_width = unresolved_col_width;
714
	}
715 716 717 718 719 720 721 722 723

	if (symbol_conf.sym_list && al->sym &&
	    !strlist__has_entry(symbol_conf.sym_list, al->sym->name))
		goto out_filtered;

	return 0;

out_filtered:
	al->filtered = true;
724 725
	return 0;
}
726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752

int event__parse_sample(event_t *event, u64 type, struct sample_data *data)
{
	u64 *array = event->sample.array;

	if (type & PERF_SAMPLE_IP) {
		data->ip = event->ip.ip;
		array++;
	}

	if (type & PERF_SAMPLE_TID) {
		u32 *p = (u32 *)array;
		data->pid = p[0];
		data->tid = p[1];
		array++;
	}

	if (type & PERF_SAMPLE_TIME) {
		data->time = *array;
		array++;
	}

	if (type & PERF_SAMPLE_ADDR) {
		data->addr = *array;
		array++;
	}

753
	data->id = -1ULL;
754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793
	if (type & PERF_SAMPLE_ID) {
		data->id = *array;
		array++;
	}

	if (type & PERF_SAMPLE_STREAM_ID) {
		data->stream_id = *array;
		array++;
	}

	if (type & PERF_SAMPLE_CPU) {
		u32 *p = (u32 *)array;
		data->cpu = *p;
		array++;
	}

	if (type & PERF_SAMPLE_PERIOD) {
		data->period = *array;
		array++;
	}

	if (type & PERF_SAMPLE_READ) {
		pr_debug("PERF_SAMPLE_READ is unsuported for now\n");
		return -1;
	}

	if (type & PERF_SAMPLE_CALLCHAIN) {
		data->callchain = (struct ip_callchain *)array;
		array += 1 + data->callchain->nr;
	}

	if (type & PERF_SAMPLE_RAW) {
		u32 *p = (u32 *)array;
		data->raw_size = *p;
		p++;
		data->raw_data = p;
	}

	return 0;
}