machine.c 45.5 KB
Newer Older
1
#include "callchain.h"
2 3
#include "debug.h"
#include "event.h"
4 5
#include "evsel.h"
#include "hist.h"
6 7
#include "machine.h"
#include "map.h"
8
#include "sort.h"
9
#include "strlist.h"
10
#include "thread.h"
11
#include "vdso.h"
12
#include <stdbool.h>
13
#include <symbol/kallsyms.h>
14
#include "unwind.h"
15
#include "linux/hash.h"
16

17 18
static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock);

19 20 21 22 23 24
static void dsos__init(struct dsos *dsos)
{
	INIT_LIST_HEAD(&dsos->head);
	dsos->root = RB_ROOT;
}

25 26
int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
{
27
	map_groups__init(&machine->kmaps, machine);
28
	RB_CLEAR_NODE(&machine->rb_node);
29 30
	dsos__init(&machine->user_dsos);
	dsos__init(&machine->kernel_dsos);
31 32

	machine->threads = RB_ROOT;
33
	pthread_rwlock_init(&machine->threads_lock, NULL);
34 35 36
	INIT_LIST_HEAD(&machine->dead_threads);
	machine->last_match = NULL;

37 38
	machine->vdso_info = NULL;

39 40
	machine->pid = pid;

41
	machine->symbol_filter = NULL;
42
	machine->id_hdr_size = 0;
43
	machine->comm_exec = false;
44
	machine->kernel_start = 0;
45

46 47 48 49 50
	machine->root_dir = strdup(root_dir);
	if (machine->root_dir == NULL)
		return -ENOMEM;

	if (pid != HOST_KERNEL_ID) {
51
		struct thread *thread = machine__findnew_thread(machine, -1,
52
								pid);
53 54 55 56 57 58
		char comm[64];

		if (thread == NULL)
			return -ENOMEM;

		snprintf(comm, sizeof(comm), "[guest/%d]", pid);
59
		thread__set_comm(thread, comm, 0);
60
		thread__put(thread);
61 62
	}

63 64
	machine->current_tid = NULL;

65 66 67
	return 0;
}

68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84
struct machine *machine__new_host(void)
{
	struct machine *machine = malloc(sizeof(*machine));

	if (machine != NULL) {
		machine__init(machine, "", HOST_KERNEL_ID);

		if (machine__create_kernel_maps(machine) < 0)
			goto out_delete;
	}

	return machine;
out_delete:
	free(machine);
	return NULL;
}

85
static void dsos__delete(struct dsos *dsos)
86 87 88
{
	struct dso *pos, *n;

89
	list_for_each_entry_safe(pos, n, &dsos->head, node) {
90
		RB_CLEAR_NODE(&pos->rb_node);
91 92 93 94 95
		list_del(&pos->node);
		dso__delete(pos);
	}
}

96 97
void machine__delete_threads(struct machine *machine)
{
98
	struct rb_node *nd;
99

100 101
	pthread_rwlock_wrlock(&machine->threads_lock);
	nd = rb_first(&machine->threads);
102 103 104 105
	while (nd) {
		struct thread *t = rb_entry(nd, struct thread, rb_node);

		nd = rb_next(nd);
106
		__machine__remove_thread(machine, t, false);
107
	}
108
	pthread_rwlock_unlock(&machine->threads_lock);
109 110
}

111 112 113 114 115
void machine__exit(struct machine *machine)
{
	map_groups__exit(&machine->kmaps);
	dsos__delete(&machine->user_dsos);
	dsos__delete(&machine->kernel_dsos);
116
	vdso__exit(machine);
117
	zfree(&machine->root_dir);
118
	zfree(&machine->current_tid);
119
	pthread_rwlock_destroy(&machine->threads_lock);
120 121 122 123 124 125 126 127
}

void machine__delete(struct machine *machine)
{
	machine__exit(machine);
	free(machine);
}

128 129 130 131
void machines__init(struct machines *machines)
{
	machine__init(&machines->host, "", HOST_KERNEL_ID);
	machines->guests = RB_ROOT;
132
	machines->symbol_filter = NULL;
133 134 135 136 137 138 139 140 141
}

void machines__exit(struct machines *machines)
{
	machine__exit(&machines->host);
	/* XXX exit guest */
}

struct machine *machines__add(struct machines *machines, pid_t pid,
142 143
			      const char *root_dir)
{
144
	struct rb_node **p = &machines->guests.rb_node;
145 146 147 148 149 150 151 152 153 154 155
	struct rb_node *parent = NULL;
	struct machine *pos, *machine = malloc(sizeof(*machine));

	if (machine == NULL)
		return NULL;

	if (machine__init(machine, root_dir, pid) != 0) {
		free(machine);
		return NULL;
	}

156 157
	machine->symbol_filter = machines->symbol_filter;

158 159 160 161 162 163 164 165 166 167
	while (*p != NULL) {
		parent = *p;
		pos = rb_entry(parent, struct machine, rb_node);
		if (pid < pos->pid)
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
	}

	rb_link_node(&machine->rb_node, parent, p);
168
	rb_insert_color(&machine->rb_node, &machines->guests);
169 170 171 172

	return machine;
}

173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
void machines__set_symbol_filter(struct machines *machines,
				 symbol_filter_t symbol_filter)
{
	struct rb_node *nd;

	machines->symbol_filter = symbol_filter;
	machines->host.symbol_filter = symbol_filter;

	for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
		struct machine *machine = rb_entry(nd, struct machine, rb_node);

		machine->symbol_filter = symbol_filter;
	}
}

188 189 190 191 192 193 194 195 196 197 198 199 200
void machines__set_comm_exec(struct machines *machines, bool comm_exec)
{
	struct rb_node *nd;

	machines->host.comm_exec = comm_exec;

	for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
		struct machine *machine = rb_entry(nd, struct machine, rb_node);

		machine->comm_exec = comm_exec;
	}
}

201
struct machine *machines__find(struct machines *machines, pid_t pid)
202
{
203
	struct rb_node **p = &machines->guests.rb_node;
204 205 206 207
	struct rb_node *parent = NULL;
	struct machine *machine;
	struct machine *default_machine = NULL;

208 209 210
	if (pid == HOST_KERNEL_ID)
		return &machines->host;

211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226
	while (*p != NULL) {
		parent = *p;
		machine = rb_entry(parent, struct machine, rb_node);
		if (pid < machine->pid)
			p = &(*p)->rb_left;
		else if (pid > machine->pid)
			p = &(*p)->rb_right;
		else
			return machine;
		if (!machine->pid)
			default_machine = machine;
	}

	return default_machine;
}

227
struct machine *machines__findnew(struct machines *machines, pid_t pid)
228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260
{
	char path[PATH_MAX];
	const char *root_dir = "";
	struct machine *machine = machines__find(machines, pid);

	if (machine && (machine->pid == pid))
		goto out;

	if ((pid != HOST_KERNEL_ID) &&
	    (pid != DEFAULT_GUEST_KERNEL_ID) &&
	    (symbol_conf.guestmount)) {
		sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
		if (access(path, R_OK)) {
			static struct strlist *seen;

			if (!seen)
				seen = strlist__new(true, NULL);

			if (!strlist__has_entry(seen, path)) {
				pr_err("Can't access file %s\n", path);
				strlist__add(seen, path);
			}
			machine = NULL;
			goto out;
		}
		root_dir = path;
	}

	machine = machines__add(machines, pid, root_dir);
out:
	return machine;
}

261 262
void machines__process_guests(struct machines *machines,
			      machine__process_t process, void *data)
263 264 265
{
	struct rb_node *nd;

266
	for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285
		struct machine *pos = rb_entry(nd, struct machine, rb_node);
		process(pos, data);
	}
}

char *machine__mmap_name(struct machine *machine, char *bf, size_t size)
{
	if (machine__is_host(machine))
		snprintf(bf, size, "[%s]", "kernel.kallsyms");
	else if (machine__is_default_guest(machine))
		snprintf(bf, size, "[%s]", "guest.kernel.kallsyms");
	else {
		snprintf(bf, size, "[%s.%d]", "guest.kernel.kallsyms",
			 machine->pid);
	}

	return bf;
}

286
void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
287 288 289 290
{
	struct rb_node *node;
	struct machine *machine;

291 292 293
	machines->host.id_hdr_size = id_hdr_size;

	for (node = rb_first(&machines->guests); node; node = rb_next(node)) {
294 295 296 297 298 299 300
		machine = rb_entry(node, struct machine, rb_node);
		machine->id_hdr_size = id_hdr_size;
	}

	return;
}

301 302 303 304 305 306 307 308 309 310 311 312 313
static void machine__update_thread_pid(struct machine *machine,
				       struct thread *th, pid_t pid)
{
	struct thread *leader;

	if (pid == th->pid_ || pid == -1 || th->pid_ != -1)
		return;

	th->pid_ = pid;

	if (th->pid_ == th->tid)
		return;

314
	leader = __machine__findnew_thread(machine, th->pid_, th->pid_);
315 316 317 318
	if (!leader)
		goto out_err;

	if (!leader->mg)
319
		leader->mg = map_groups__new(machine);
320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335

	if (!leader->mg)
		goto out_err;

	if (th->mg == leader->mg)
		return;

	if (th->mg) {
		/*
		 * Maps are created from MMAP events which provide the pid and
		 * tid.  Consequently there never should be any maps on a thread
		 * with an unknown pid.  Just print an error if there are.
		 */
		if (!map_groups__empty(th->mg))
			pr_err("Discarding thread maps for %d:%d\n",
			       th->pid_, th->tid);
336
		map_groups__put(th->mg);
337 338 339 340 341 342 343 344 345 346
	}

	th->mg = map_groups__get(leader->mg);

	return;

out_err:
	pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid);
}

347 348 349
static struct thread *____machine__findnew_thread(struct machine *machine,
						  pid_t pid, pid_t tid,
						  bool create)
350 351 352 353 354 355
{
	struct rb_node **p = &machine->threads.rb_node;
	struct rb_node *parent = NULL;
	struct thread *th;

	/*
356
	 * Front-end cache - TID lookups come in blocks,
357 358 359
	 * so most of the time we dont have to look up
	 * the full rbtree:
	 */
360
	th = machine->last_match;
361 362 363 364 365 366
	if (th != NULL) {
		if (th->tid == tid) {
			machine__update_thread_pid(machine, th, pid);
			return th;
		}

367
		machine->last_match = NULL;
368
	}
369 370 371 372 373

	while (*p != NULL) {
		parent = *p;
		th = rb_entry(parent, struct thread, rb_node);

374
		if (th->tid == tid) {
375
			machine->last_match = th;
376
			machine__update_thread_pid(machine, th, pid);
377 378 379
			return th;
		}

380
		if (tid < th->tid)
381 382 383 384 385 386 387 388
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
	}

	if (!create)
		return NULL;

389
	th = thread__new(pid, tid);
390 391 392
	if (th != NULL) {
		rb_link_node(&th->rb_node, parent, p);
		rb_insert_color(&th->rb_node, &machine->threads);
393 394 395 396 397 398 399 400 401

		/*
		 * We have to initialize map_groups separately
		 * after rb tree is updated.
		 *
		 * The reason is that we call machine__findnew_thread
		 * within thread__init_map_groups to find the thread
		 * leader and that would screwed the rb tree.
		 */
402
		if (thread__init_map_groups(th, machine)) {
403
			rb_erase_init(&th->rb_node, &machine->threads);
404
			RB_CLEAR_NODE(&th->rb_node);
405
			thread__delete(th);
406
			return NULL;
407
		}
408 409 410 411
		/*
		 * It is now in the rbtree, get a ref
		 */
		thread__get(th);
412
		machine->last_match = th;
413 414 415 416 417
	}

	return th;
}

418 419 420 421 422
struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid)
{
	return ____machine__findnew_thread(machine, pid, tid, true);
}

423 424
struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
				       pid_t tid)
425
{
426 427 428 429 430 431
	struct thread *th;

	pthread_rwlock_wrlock(&machine->threads_lock);
	th = thread__get(__machine__findnew_thread(machine, pid, tid));
	pthread_rwlock_unlock(&machine->threads_lock);
	return th;
432 433
}

434 435
struct thread *machine__find_thread(struct machine *machine, pid_t pid,
				    pid_t tid)
436
{
437 438 439 440 441
	struct thread *th;
	pthread_rwlock_rdlock(&machine->threads_lock);
	th =  thread__get(____machine__findnew_thread(machine, pid, tid, false));
	pthread_rwlock_unlock(&machine->threads_lock);
	return th;
442
}
443

444 445 446 447 448 449 450 451 452
struct comm *machine__thread_exec_comm(struct machine *machine,
				       struct thread *thread)
{
	if (machine->comm_exec)
		return thread__exec_comm(thread);
	else
		return thread__comm(thread);
}

453 454
int machine__process_comm_event(struct machine *machine, union perf_event *event,
				struct perf_sample *sample)
455
{
456 457 458
	struct thread *thread = machine__findnew_thread(machine,
							event->comm.pid,
							event->comm.tid);
459
	bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC;
460
	int err = 0;
461

462 463 464
	if (exec)
		machine->comm_exec = true;

465 466 467
	if (dump_trace)
		perf_event__fprintf_comm(event, stdout);

468 469
	if (thread == NULL ||
	    __thread__set_comm(thread, event->comm.comm, sample->time, exec)) {
470
		dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
471
		err = -1;
472 473
	}

474 475 476
	thread__put(thread);

	return err;
477 478 479
}

int machine__process_lost_event(struct machine *machine __maybe_unused,
480
				union perf_event *event, struct perf_sample *sample __maybe_unused)
481 482 483 484 485 486
{
	dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
		    event->lost.id, event->lost.lost);
	return 0;
}

487 488 489
static struct dso*
machine__module_dso(struct machine *machine, struct kmod_path *m,
		    const char *filename)
490 491 492
{
	struct dso *dso;

493
	dso = dsos__find(&machine->kernel_dsos, m->name, true);
494
	if (!dso) {
495
		dso = dsos__addnew(&machine->kernel_dsos, m->name);
496 497 498 499 500 501 502 503 504
		if (dso == NULL)
			return NULL;

		if (machine__is_host(machine))
			dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
		else
			dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;

		/* _KMODULE_COMP should be next to _KMODULE */
505
		if (m->kmod && m->comp)
506
			dso->symtab_type++;
507 508 509

		dso__set_short_name(dso, strdup(m->name), true);
		dso__set_long_name(dso, strdup(filename), true);
510 511 512 513 514
	}

	return dso;
}

515 516 517 518 519 520 521 522
int machine__process_aux_event(struct machine *machine __maybe_unused,
			       union perf_event *event)
{
	if (dump_trace)
		perf_event__fprintf_aux(event, stdout);
	return 0;
}

523 524 525 526 527 528 529 530
int machine__process_itrace_start_event(struct machine *machine __maybe_unused,
					union perf_event *event)
{
	if (dump_trace)
		perf_event__fprintf_itrace_start(event, stdout);
	return 0;
}

531 532 533
struct map *machine__new_module(struct machine *machine, u64 start,
				const char *filename)
{
534 535 536
	struct map *map = NULL;
	struct dso *dso;
	struct kmod_path m;
537

538
	if (kmod_path__parse_name(&m, filename))
539 540
		return NULL;

541 542 543 544 545
	map = map_groups__find_by_name(&machine->kmaps, MAP__FUNCTION,
				       m.name);
	if (map)
		goto out;

546 547 548 549
	dso = machine__module_dso(machine, &m, filename);
	if (dso == NULL)
		goto out;

550 551
	map = map__new2(start, dso, MAP__FUNCTION);
	if (map == NULL)
552
		goto out;
553 554

	map_groups__insert(&machine->kmaps, map);
555 556 557

out:
	free(m.name);
558 559 560
	return map;
}

561
size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
562 563
{
	struct rb_node *nd;
564 565
	size_t ret = __dsos__fprintf(&machines->host.kernel_dsos.head, fp) +
		     __dsos__fprintf(&machines->host.user_dsos.head, fp);
566

567
	for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
568
		struct machine *pos = rb_entry(nd, struct machine, rb_node);
569 570
		ret += __dsos__fprintf(&pos->kernel_dsos.head, fp);
		ret += __dsos__fprintf(&pos->user_dsos.head, fp);
571 572 573 574 575
	}

	return ret;
}

576
size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp,
577 578
				     bool (skip)(struct dso *dso, int parm), int parm)
{
579 580
	return __dsos__fprintf_buildid(&m->kernel_dsos.head, fp, skip, parm) +
	       __dsos__fprintf_buildid(&m->user_dsos.head, fp, skip, parm);
581 582
}

583
size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
584 585 586
				     bool (skip)(struct dso *dso, int parm), int parm)
{
	struct rb_node *nd;
587
	size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
588

589
	for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619
		struct machine *pos = rb_entry(nd, struct machine, rb_node);
		ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
	}
	return ret;
}

size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
{
	int i;
	size_t printed = 0;
	struct dso *kdso = machine->vmlinux_maps[MAP__FUNCTION]->dso;

	if (kdso->has_build_id) {
		char filename[PATH_MAX];
		if (dso__build_id_filename(kdso, filename, sizeof(filename)))
			printed += fprintf(fp, "[0] %s\n", filename);
	}

	for (i = 0; i < vmlinux_path__nr_entries; ++i)
		printed += fprintf(fp, "[%d] %s\n",
				   i + kdso->has_build_id, vmlinux_path[i]);

	return printed;
}

size_t machine__fprintf(struct machine *machine, FILE *fp)
{
	size_t ret = 0;
	struct rb_node *nd;

620 621
	pthread_rwlock_rdlock(&machine->threads_lock);

622 623 624 625 626 627
	for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
		struct thread *pos = rb_entry(nd, struct thread, rb_node);

		ret += thread__fprintf(pos, fp);
	}

628 629
	pthread_rwlock_unlock(&machine->threads_lock);

630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669
	return ret;
}

static struct dso *machine__get_kernel(struct machine *machine)
{
	const char *vmlinux_name = NULL;
	struct dso *kernel;

	if (machine__is_host(machine)) {
		vmlinux_name = symbol_conf.vmlinux_name;
		if (!vmlinux_name)
			vmlinux_name = "[kernel.kallsyms]";

		kernel = dso__kernel_findnew(machine, vmlinux_name,
					     "[kernel]",
					     DSO_TYPE_KERNEL);
	} else {
		char bf[PATH_MAX];

		if (machine__is_default_guest(machine))
			vmlinux_name = symbol_conf.default_guest_vmlinux_name;
		if (!vmlinux_name)
			vmlinux_name = machine__mmap_name(machine, bf,
							  sizeof(bf));

		kernel = dso__kernel_findnew(machine, vmlinux_name,
					     "[guest.kernel]",
					     DSO_TYPE_GUEST_KERNEL);
	}

	if (kernel != NULL && (!kernel->has_build_id))
		dso__read_running_kernel_build_id(kernel, machine);

	return kernel;
}

struct process_args {
	u64 start;
};

670 671 672 673 674 675 676 677 678
static void machine__get_kallsyms_filename(struct machine *machine, char *buf,
					   size_t bufsz)
{
	if (machine__is_default_guest(machine))
		scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
	else
		scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
}

679 680 681 682 683 684
const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};

/* Figure out the start address of kernel map from /proc/kallsyms.
 * Returns the name of the start symbol in *symbol_name. Pass in NULL as
 * symbol_name if it's not that important.
 */
685 686
static u64 machine__get_running_kernel_start(struct machine *machine,
					     const char **symbol_name)
687
{
688
	char filename[PATH_MAX];
689 690 691
	int i;
	const char *name;
	u64 addr = 0;
692

693
	machine__get_kallsyms_filename(machine, filename, PATH_MAX);
694 695 696 697

	if (symbol__restricted_filename(filename, "/proc/kallsyms"))
		return 0;

698 699 700 701 702 703 704 705
	for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
		addr = kallsyms__get_function_start(filename, name);
		if (addr)
			break;
	}

	if (symbol_name)
		*symbol_name = name;
706

707
	return addr;
708 709 710 711 712
}

int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
{
	enum map_type type;
713
	u64 start = machine__get_running_kernel_start(machine, NULL);
714 715 716 717 718 719 720 721 722 723 724 725

	for (type = 0; type < MAP__NR_TYPES; ++type) {
		struct kmap *kmap;

		machine->vmlinux_maps[type] = map__new2(start, kernel, type);
		if (machine->vmlinux_maps[type] == NULL)
			return -1;

		machine->vmlinux_maps[type]->map_ip =
			machine->vmlinux_maps[type]->unmap_ip =
				identity__map_ip;
		kmap = map__kmap(machine->vmlinux_maps[type]);
726 727 728
		if (!kmap)
			return -1;

729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749
		kmap->kmaps = &machine->kmaps;
		map_groups__insert(&machine->kmaps,
				   machine->vmlinux_maps[type]);
	}

	return 0;
}

void machine__destroy_kernel_maps(struct machine *machine)
{
	enum map_type type;

	for (type = 0; type < MAP__NR_TYPES; ++type) {
		struct kmap *kmap;

		if (machine->vmlinux_maps[type] == NULL)
			continue;

		kmap = map__kmap(machine->vmlinux_maps[type]);
		map_groups__remove(&machine->kmaps,
				   machine->vmlinux_maps[type]);
750
		if (kmap && kmap->ref_reloc_sym) {
751 752 753 754 755
			/*
			 * ref_reloc_sym is shared among all maps, so free just
			 * on one of them.
			 */
			if (type == MAP__FUNCTION) {
756 757 758 759
				zfree((char **)&kmap->ref_reloc_sym->name);
				zfree(&kmap->ref_reloc_sym);
			} else
				kmap->ref_reloc_sym = NULL;
760 761 762 763 764 765 766
		}

		map__delete(machine->vmlinux_maps[type]);
		machine->vmlinux_maps[type] = NULL;
	}
}

767
int machines__create_guest_kernel_maps(struct machines *machines)
768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815
{
	int ret = 0;
	struct dirent **namelist = NULL;
	int i, items = 0;
	char path[PATH_MAX];
	pid_t pid;
	char *endp;

	if (symbol_conf.default_guest_vmlinux_name ||
	    symbol_conf.default_guest_modules ||
	    symbol_conf.default_guest_kallsyms) {
		machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
	}

	if (symbol_conf.guestmount) {
		items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
		if (items <= 0)
			return -ENOENT;
		for (i = 0; i < items; i++) {
			if (!isdigit(namelist[i]->d_name[0])) {
				/* Filter out . and .. */
				continue;
			}
			pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
			if ((*endp != '\0') ||
			    (endp == namelist[i]->d_name) ||
			    (errno == ERANGE)) {
				pr_debug("invalid directory (%s). Skipping.\n",
					 namelist[i]->d_name);
				continue;
			}
			sprintf(path, "%s/%s/proc/kallsyms",
				symbol_conf.guestmount,
				namelist[i]->d_name);
			ret = access(path, R_OK);
			if (ret) {
				pr_debug("Can't access file %s\n", path);
				goto failure;
			}
			machines__create_kernel_maps(machines, pid);
		}
failure:
		free(namelist);
	}

	return ret;
}

816
void machines__destroy_kernel_maps(struct machines *machines)
817
{
818 819 820
	struct rb_node *next = rb_first(&machines->guests);

	machine__destroy_kernel_maps(&machines->host);
821 822 823 824 825

	while (next) {
		struct machine *pos = rb_entry(next, struct machine, rb_node);

		next = rb_next(&pos->rb_node);
826
		rb_erase(&pos->rb_node, &machines->guests);
827 828 829 830
		machine__delete(pos);
	}
}

831
int machines__create_kernel_maps(struct machines *machines, pid_t pid)
832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865
{
	struct machine *machine = machines__findnew(machines, pid);

	if (machine == NULL)
		return -1;

	return machine__create_kernel_maps(machine);
}

int machine__load_kallsyms(struct machine *machine, const char *filename,
			   enum map_type type, symbol_filter_t filter)
{
	struct map *map = machine->vmlinux_maps[type];
	int ret = dso__load_kallsyms(map->dso, filename, map, filter);

	if (ret > 0) {
		dso__set_loaded(map->dso, type);
		/*
		 * Since /proc/kallsyms will have multiple sessions for the
		 * kernel, with modules between them, fixup the end of all
		 * sections.
		 */
		__map_groups__fixup_end(&machine->kmaps, type);
	}

	return ret;
}

int machine__load_vmlinux_path(struct machine *machine, enum map_type type,
			       symbol_filter_t filter)
{
	struct map *map = machine->vmlinux_maps[type];
	int ret = dso__load_vmlinux_path(map->dso, map, filter);

866
	if (ret > 0)
867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905
		dso__set_loaded(map->dso, type);

	return ret;
}

static void map_groups__fixup_end(struct map_groups *mg)
{
	int i;
	for (i = 0; i < MAP__NR_TYPES; ++i)
		__map_groups__fixup_end(mg, i);
}

static char *get_kernel_version(const char *root_dir)
{
	char version[PATH_MAX];
	FILE *file;
	char *name, *tmp;
	const char *prefix = "Linux version ";

	sprintf(version, "%s/proc/version", root_dir);
	file = fopen(version, "r");
	if (!file)
		return NULL;

	version[0] = '\0';
	tmp = fgets(version, sizeof(version), file);
	fclose(file);

	name = strstr(version, prefix);
	if (!name)
		return NULL;
	name += strlen(prefix);
	tmp = strchr(name, ' ');
	if (tmp)
		*tmp = '\0';

	return strdup(name);
}

906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938
static bool is_kmod_dso(struct dso *dso)
{
	return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
	       dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE;
}

static int map_groups__set_module_path(struct map_groups *mg, const char *path,
				       struct kmod_path *m)
{
	struct map *map;
	char *long_name;

	map = map_groups__find_by_name(mg, MAP__FUNCTION, m->name);
	if (map == NULL)
		return 0;

	long_name = strdup(path);
	if (long_name == NULL)
		return -ENOMEM;

	dso__set_long_name(map->dso, long_name, true);
	dso__kernel_module_get_build_id(map->dso, "");

	/*
	 * Full name could reveal us kmod compression, so
	 * we need to update the symtab_type if needed.
	 */
	if (m->comp && is_kmod_dso(map->dso))
		map->dso->symtab_type++;

	return 0;
}

939
static int map_groups__set_modules_path_dir(struct map_groups *mg,
940
				const char *dir_name, int depth)
941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964
{
	struct dirent *dent;
	DIR *dir = opendir(dir_name);
	int ret = 0;

	if (!dir) {
		pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
		return -1;
	}

	while ((dent = readdir(dir)) != NULL) {
		char path[PATH_MAX];
		struct stat st;

		/*sshfs might return bad dent->d_type, so we have to stat*/
		snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name);
		if (stat(path, &st))
			continue;

		if (S_ISDIR(st.st_mode)) {
			if (!strcmp(dent->d_name, ".") ||
			    !strcmp(dent->d_name, ".."))
				continue;

965 966 967 968 969 970 971 972 973
			/* Do not follow top-level source and build symlinks */
			if (depth == 0) {
				if (!strcmp(dent->d_name, "source") ||
				    !strcmp(dent->d_name, "build"))
					continue;
			}

			ret = map_groups__set_modules_path_dir(mg, path,
							       depth + 1);
974 975 976
			if (ret < 0)
				goto out;
		} else {
977
			struct kmod_path m;
978

979 980 981
			ret = kmod_path__parse_name(&m, dent->d_name);
			if (ret)
				goto out;
982

983 984
			if (m.kmod)
				ret = map_groups__set_module_path(mg, path, &m);
985

986
			free(m.name);
987

988
			if (ret)
989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006
				goto out;
		}
	}

out:
	closedir(dir);
	return ret;
}

static int machine__set_modules_path(struct machine *machine)
{
	char *version;
	char modules_path[PATH_MAX];

	version = get_kernel_version(machine->root_dir);
	if (!version)
		return -1;

1007
	snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s",
1008 1009 1010
		 machine->root_dir, version);
	free(version);

1011
	return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0);
1012 1013
}

1014
static int machine__create_module(void *arg, const char *name, u64 start)
1015
{
1016
	struct machine *machine = arg;
1017
	struct map *map;
1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029

	map = machine__new_module(machine, start, name);
	if (map == NULL)
		return -1;

	dso__kernel_module_get_build_id(map->dso, machine->root_dir);

	return 0;
}

static int machine__create_modules(struct machine *machine)
{
1030 1031 1032
	const char *modules;
	char path[PATH_MAX];

1033
	if (machine__is_default_guest(machine)) {
1034
		modules = symbol_conf.default_guest_modules;
1035 1036
	} else {
		snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir);
1037 1038 1039
		modules = path;
	}

1040
	if (symbol__restricted_filename(modules, "/proc/modules"))
1041 1042
		return -1;

1043
	if (modules__parse(modules, machine, machine__create_module))
1044 1045
		return -1;

1046 1047
	if (!machine__set_modules_path(machine))
		return 0;
1048

1049
	pr_debug("Problems setting modules path maps, continuing anyway...\n");
1050

1051
	return 0;
1052 1053 1054 1055 1056
}

int machine__create_kernel_maps(struct machine *machine)
{
	struct dso *kernel = machine__get_kernel(machine);
1057
	const char *name;
1058
	u64 addr = machine__get_running_kernel_start(machine, &name);
1059 1060
	if (!addr)
		return -1;
1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078

	if (kernel == NULL ||
	    __machine__create_kernel_maps(machine, kernel) < 0)
		return -1;

	if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
		if (machine__is_host(machine))
			pr_debug("Problems creating module maps, "
				 "continuing anyway...\n");
		else
			pr_debug("Problems creating module maps for guest %d, "
				 "continuing anyway...\n", machine->pid);
	}

	/*
	 * Now that we have all the maps created, just set the ->end of them:
	 */
	map_groups__fixup_end(&machine->kmaps);
1079 1080 1081 1082 1083 1084 1085

	if (maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name,
					     addr)) {
		machine__destroy_kernel_maps(machine);
		return -1;
	}

1086 1087 1088
	return 0;
}

1089 1090 1091
static void machine__set_kernel_mmap_len(struct machine *machine,
					 union perf_event *event)
{
1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104
	int i;

	for (i = 0; i < MAP__NR_TYPES; i++) {
		machine->vmlinux_maps[i]->start = event->mmap.start;
		machine->vmlinux_maps[i]->end   = (event->mmap.start +
						   event->mmap.len);
		/*
		 * Be a bit paranoid here, some perf.data file came with
		 * a zero sized synthesized MMAP event for the kernel.
		 */
		if (machine->vmlinux_maps[i]->end == 0)
			machine->vmlinux_maps[i]->end = ~0ULL;
	}
1105 1106
}

1107 1108 1109 1110
static bool machine__uses_kcore(struct machine *machine)
{
	struct dso *dso;

1111
	list_for_each_entry(dso, &machine->kernel_dsos.head, node) {
1112 1113 1114 1115 1116 1117 1118
		if (dso__is_kcore(dso))
			return true;
	}

	return false;
}

1119 1120 1121 1122 1123 1124 1125 1126
static int machine__process_kernel_mmap_event(struct machine *machine,
					      union perf_event *event)
{
	struct map *map;
	char kmmap_prefix[PATH_MAX];
	enum dso_kernel_type kernel_type;
	bool is_kernel_mmap;

1127 1128 1129 1130
	/* If we have maps from kcore then we do not need or want any others */
	if (machine__uses_kcore(machine))
		return 0;

1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154
	machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix));
	if (machine__is_host(machine))
		kernel_type = DSO_TYPE_KERNEL;
	else
		kernel_type = DSO_TYPE_GUEST_KERNEL;

	is_kernel_mmap = memcmp(event->mmap.filename,
				kmmap_prefix,
				strlen(kmmap_prefix) - 1) == 0;
	if (event->mmap.filename[0] == '/' ||
	    (!is_kernel_mmap && event->mmap.filename[0] == '[')) {
		map = machine__new_module(machine, event->mmap.start,
					  event->mmap.filename);
		if (map == NULL)
			goto out_problem;

		map->end = map->start + event->mmap.len;
	} else if (is_kernel_mmap) {
		const char *symbol_name = (event->mmap.filename +
				strlen(kmmap_prefix));
		/*
		 * Should be there already, from the build-id table in
		 * the header.
		 */
1155 1156 1157 1158
		struct dso *kernel = NULL;
		struct dso *dso;

		list_for_each_entry(dso, &machine->kernel_dsos.head, node) {
1159
			if (is_kernel_module(dso->long_name))
1160 1161 1162 1163 1164 1165 1166 1167 1168
				continue;

			kernel = dso;
			break;
		}

		if (kernel == NULL)
			kernel = __dsos__findnew(&machine->kernel_dsos,
						 kmmap_prefix);
1169 1170 1171 1172 1173 1174 1175
		if (kernel == NULL)
			goto out_problem;

		kernel->kernel = kernel_type;
		if (__machine__create_kernel_maps(machine, kernel) < 0)
			goto out_problem;

1176 1177
		if (strstr(kernel->long_name, "vmlinux"))
			dso__set_short_name(kernel, "[kernel.vmlinux]", false);
1178

1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204
		machine__set_kernel_mmap_len(machine, event);

		/*
		 * Avoid using a zero address (kptr_restrict) for the ref reloc
		 * symbol. Effectively having zero here means that at record
		 * time /proc/sys/kernel/kptr_restrict was non zero.
		 */
		if (event->mmap.pgoff != 0) {
			maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
							 symbol_name,
							 event->mmap.pgoff);
		}

		if (machine__is_default_guest(machine)) {
			/*
			 * preload dso of guest kernel and modules
			 */
			dso__load(kernel, machine->vmlinux_maps[MAP__FUNCTION],
				  NULL);
		}
	}
	return 0;
out_problem:
	return -1;
}

1205
int machine__process_mmap2_event(struct machine *machine,
1206 1207
				 union perf_event *event,
				 struct perf_sample *sample __maybe_unused)
1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226
{
	u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
	struct thread *thread;
	struct map *map;
	enum map_type type;
	int ret = 0;

	if (dump_trace)
		perf_event__fprintf_mmap2(event, stdout);

	if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
	    cpumode == PERF_RECORD_MISC_KERNEL) {
		ret = machine__process_kernel_mmap_event(machine, event);
		if (ret < 0)
			goto out_problem;
		return 0;
	}

	thread = machine__findnew_thread(machine, event->mmap2.pid,
1227
					event->mmap2.tid);
1228 1229 1230 1231 1232 1233 1234 1235
	if (thread == NULL)
		goto out_problem;

	if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
		type = MAP__VARIABLE;
	else
		type = MAP__FUNCTION;

1236
	map = map__new(machine, event->mmap2.start,
1237 1238 1239 1240
			event->mmap2.len, event->mmap2.pgoff,
			event->mmap2.pid, event->mmap2.maj,
			event->mmap2.min, event->mmap2.ino,
			event->mmap2.ino_generation,
1241 1242
			event->mmap2.prot,
			event->mmap2.flags,
1243
			event->mmap2.filename, type, thread);
1244 1245

	if (map == NULL)
1246
		goto out_problem_map;
1247 1248

	thread__insert_map(thread, map);
1249
	thread__put(thread);
1250 1251
	return 0;

1252 1253
out_problem_map:
	thread__put(thread);
1254 1255 1256 1257 1258
out_problem:
	dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
	return 0;
}

1259 1260
int machine__process_mmap_event(struct machine *machine, union perf_event *event,
				struct perf_sample *sample __maybe_unused)
1261 1262 1263 1264
{
	u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
	struct thread *thread;
	struct map *map;
1265
	enum map_type type;
1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278
	int ret = 0;

	if (dump_trace)
		perf_event__fprintf_mmap(event, stdout);

	if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
	    cpumode == PERF_RECORD_MISC_KERNEL) {
		ret = machine__process_kernel_mmap_event(machine, event);
		if (ret < 0)
			goto out_problem;
		return 0;
	}

1279
	thread = machine__findnew_thread(machine, event->mmap.pid,
1280
					 event->mmap.tid);
1281 1282
	if (thread == NULL)
		goto out_problem;
1283 1284 1285 1286 1287 1288

	if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
		type = MAP__VARIABLE;
	else
		type = MAP__FUNCTION;

1289
	map = map__new(machine, event->mmap.start,
1290
			event->mmap.len, event->mmap.pgoff,
1291
			event->mmap.pid, 0, 0, 0, 0, 0, 0,
1292
			event->mmap.filename,
1293
			type, thread);
1294

1295
	if (map == NULL)
1296
		goto out_problem_map;
1297 1298

	thread__insert_map(thread, map);
1299
	thread__put(thread);
1300 1301
	return 0;

1302 1303
out_problem_map:
	thread__put(thread);
1304 1305 1306 1307 1308
out_problem:
	dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
	return 0;
}

1309
static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock)
1310
{
1311
	if (machine->last_match == th)
1312
		machine->last_match = NULL;
1313

1314
	BUG_ON(atomic_read(&th->refcnt) == 0);
1315 1316
	if (lock)
		pthread_rwlock_wrlock(&machine->threads_lock);
1317
	rb_erase_init(&th->rb_node, &machine->threads);
1318
	RB_CLEAR_NODE(&th->rb_node);
1319
	/*
1320 1321 1322
	 * Move it first to the dead_threads list, then drop the reference,
	 * if this is the last reference, then the thread__delete destructor
	 * will be called and we will remove it from the dead_threads list.
1323 1324
	 */
	list_add_tail(&th->node, &machine->dead_threads);
1325 1326
	if (lock)
		pthread_rwlock_unlock(&machine->threads_lock);
1327
	thread__put(th);
1328 1329
}

1330 1331 1332 1333 1334
void machine__remove_thread(struct machine *machine, struct thread *th)
{
	return __machine__remove_thread(machine, th, true);
}

1335 1336
int machine__process_fork_event(struct machine *machine, union perf_event *event,
				struct perf_sample *sample)
1337
{
1338 1339 1340
	struct thread *thread = machine__find_thread(machine,
						     event->fork.pid,
						     event->fork.tid);
1341 1342 1343
	struct thread *parent = machine__findnew_thread(machine,
							event->fork.ppid,
							event->fork.ptid);
1344
	int err = 0;
1345

1346
	/* if a thread currently exists for the thread id remove it */
1347
	if (thread != NULL) {
1348
		machine__remove_thread(machine, thread);
1349 1350
		thread__put(thread);
	}
1351

1352 1353
	thread = machine__findnew_thread(machine, event->fork.pid,
					 event->fork.tid);
1354 1355 1356 1357
	if (dump_trace)
		perf_event__fprintf_task(event, stdout);

	if (thread == NULL || parent == NULL ||
1358
	    thread__fork(thread, parent, sample->time) < 0) {
1359
		dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
1360
		err = -1;
1361
	}
1362 1363
	thread__put(thread);
	thread__put(parent);
1364

1365
	return err;
1366 1367
}

1368 1369
int machine__process_exit_event(struct machine *machine, union perf_event *event,
				struct perf_sample *sample __maybe_unused)
1370
{
1371 1372 1373
	struct thread *thread = machine__find_thread(machine,
						     event->fork.pid,
						     event->fork.tid);
1374 1375 1376 1377

	if (dump_trace)
		perf_event__fprintf_task(event, stdout);

1378
	if (thread != NULL) {
1379
		thread__exited(thread);
1380 1381
		thread__put(thread);
	}
1382 1383 1384 1385

	return 0;
}

1386 1387
int machine__process_event(struct machine *machine, union perf_event *event,
			   struct perf_sample *sample)
1388 1389 1390 1391 1392
{
	int ret;

	switch (event->header.type) {
	case PERF_RECORD_COMM:
1393
		ret = machine__process_comm_event(machine, event, sample); break;
1394
	case PERF_RECORD_MMAP:
1395
		ret = machine__process_mmap_event(machine, event, sample); break;
1396
	case PERF_RECORD_MMAP2:
1397
		ret = machine__process_mmap2_event(machine, event, sample); break;
1398
	case PERF_RECORD_FORK:
1399
		ret = machine__process_fork_event(machine, event, sample); break;
1400
	case PERF_RECORD_EXIT:
1401
		ret = machine__process_exit_event(machine, event, sample); break;
1402
	case PERF_RECORD_LOST:
1403
		ret = machine__process_lost_event(machine, event, sample); break;
1404 1405
	case PERF_RECORD_AUX:
		ret = machine__process_aux_event(machine, event); break;
1406 1407 1408
	case PERF_RECORD_ITRACE_START:
		ret = machine__process_itrace_start_event(machine, event);
		break;
1409 1410 1411 1412 1413 1414 1415
	default:
		ret = -1;
		break;
	}

	return ret;
}
1416

1417
static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
1418
{
1419
	if (sym->name && !regexec(regex, sym->name, 0, NULL, 0))
1420 1421 1422 1423
		return 1;
	return 0;
}

1424
static void ip__resolve_ams(struct thread *thread,
1425 1426 1427 1428 1429 1430
			    struct addr_map_symbol *ams,
			    u64 ip)
{
	struct addr_location al;

	memset(&al, 0, sizeof(al));
1431 1432 1433 1434 1435 1436 1437
	/*
	 * We cannot use the header.misc hint to determine whether a
	 * branch stack address is user, kernel, guest, hypervisor.
	 * Branches may straddle the kernel/user/hypervisor boundaries.
	 * Thus, we have to try consecutively until we find a match
	 * or else, the symbol is unknown
	 */
1438
	thread__find_cpumode_addr_location(thread, MAP__FUNCTION, ip, &al);
1439 1440 1441 1442 1443 1444 1445

	ams->addr = ip;
	ams->al_addr = al.addr;
	ams->sym = al.sym;
	ams->map = al.map;
}

1446
static void ip__resolve_data(struct thread *thread,
1447 1448 1449 1450 1451 1452
			     u8 m, struct addr_map_symbol *ams, u64 addr)
{
	struct addr_location al;

	memset(&al, 0, sizeof(al));

1453
	thread__find_addr_location(thread, m, MAP__VARIABLE, addr, &al);
1454 1455 1456 1457 1458 1459
	if (al.map == NULL) {
		/*
		 * some shared data regions have execute bit set which puts
		 * their mapping in the MAP__FUNCTION type array.
		 * Check there as a fallback option before dropping the sample.
		 */
1460
		thread__find_addr_location(thread, m, MAP__FUNCTION, addr, &al);
1461 1462
	}

1463 1464 1465 1466 1467 1468
	ams->addr = addr;
	ams->al_addr = al.addr;
	ams->sym = al.sym;
	ams->map = al.map;
}

1469 1470
struct mem_info *sample__resolve_mem(struct perf_sample *sample,
				     struct addr_location *al)
1471 1472 1473 1474 1475 1476
{
	struct mem_info *mi = zalloc(sizeof(*mi));

	if (!mi)
		return NULL;

1477 1478
	ip__resolve_ams(al->thread, &mi->iaddr, sample->ip);
	ip__resolve_data(al->thread, al->cpumode, &mi->daddr, sample->addr);
1479 1480 1481 1482 1483
	mi->data_src.val = sample->data_src;

	return mi;
}

1484 1485 1486
static int add_callchain_ip(struct thread *thread,
			    struct symbol **parent,
			    struct addr_location *root_al,
1487
			    u8 *cpumode,
1488 1489 1490 1491 1492 1493
			    u64 ip)
{
	struct addr_location al;

	al.filtered = 0;
	al.sym = NULL;
1494
	if (!cpumode) {
1495 1496
		thread__find_cpumode_addr_location(thread, MAP__FUNCTION,
						   ip, &al);
1497
	} else {
1498 1499 1500
		if (ip >= PERF_CONTEXT_MAX) {
			switch (ip) {
			case PERF_CONTEXT_HV:
1501
				*cpumode = PERF_RECORD_MISC_HYPERVISOR;
1502 1503
				break;
			case PERF_CONTEXT_KERNEL:
1504
				*cpumode = PERF_RECORD_MISC_KERNEL;
1505 1506
				break;
			case PERF_CONTEXT_USER:
1507
				*cpumode = PERF_RECORD_MISC_USER;
1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520
				break;
			default:
				pr_debug("invalid callchain context: "
					 "%"PRId64"\n", (s64) ip);
				/*
				 * It seems the callchain is corrupted.
				 * Discard all.
				 */
				callchain_cursor_reset(&callchain_cursor);
				return 1;
			}
			return 0;
		}
1521 1522
		thread__find_addr_location(thread, *cpumode, MAP__FUNCTION,
					   ip, &al);
1523 1524
	}

1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537
	if (al.sym != NULL) {
		if (sort__has_parent && !*parent &&
		    symbol__match_regex(al.sym, &parent_regex))
			*parent = al.sym;
		else if (have_ignore_callees && root_al &&
		  symbol__match_regex(al.sym, &ignore_callees_regex)) {
			/* Treat this symbol as the root,
			   forgetting its callees. */
			*root_al = al;
			callchain_cursor_reset(&callchain_cursor);
		}
	}

1538
	return callchain_cursor_append(&callchain_cursor, al.addr, al.map, al.sym);
1539 1540
}

1541 1542
struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
					   struct addr_location *al)
1543 1544
{
	unsigned int i;
1545 1546
	const struct branch_stack *bs = sample->branch_stack;
	struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info));
1547 1548 1549 1550 1551

	if (!bi)
		return NULL;

	for (i = 0; i < bs->nr; i++) {
1552 1553
		ip__resolve_ams(al->thread, &bi[i].to, bs->entries[i].to);
		ip__resolve_ams(al->thread, &bi[i].from, bs->entries[i].from);
1554 1555 1556 1557 1558
		bi[i].flags = bs->entries[i].flags;
	}
	return bi;
}

1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599
#define CHASHSZ 127
#define CHASHBITS 7
#define NO_ENTRY 0xff

#define PERF_MAX_BRANCH_DEPTH 127

/* Remove loops. */
static int remove_loops(struct branch_entry *l, int nr)
{
	int i, j, off;
	unsigned char chash[CHASHSZ];

	memset(chash, NO_ENTRY, sizeof(chash));

	BUG_ON(PERF_MAX_BRANCH_DEPTH > 255);

	for (i = 0; i < nr; i++) {
		int h = hash_64(l[i].from, CHASHBITS) % CHASHSZ;

		/* no collision handling for now */
		if (chash[h] == NO_ENTRY) {
			chash[h] = i;
		} else if (l[chash[h]].from == l[i].from) {
			bool is_loop = true;
			/* check if it is a real loop */
			off = 0;
			for (j = chash[h]; j < i && i + off < nr; j++, off++)
				if (l[j].from != l[i + off].from) {
					is_loop = false;
					break;
				}
			if (is_loop) {
				memmove(l + i, l + i + off,
					(nr - (i + off)) * sizeof(*l));
				nr -= off;
			}
		}
	}
	return nr;
}

K
Kan Liang 已提交
1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611
/*
 * Recolve LBR callstack chain sample
 * Return:
 * 1 on success get LBR callchain information
 * 0 no available LBR callchain information, should try fp
 * negative error code on other errors.
 */
static int resolve_lbr_callchain_sample(struct thread *thread,
					struct perf_sample *sample,
					struct symbol **parent,
					struct addr_location *root_al,
					int max_stack)
1612
{
K
Kan Liang 已提交
1613 1614
	struct ip_callchain *chain = sample->callchain;
	int chain_nr = min(max_stack, (int)chain->nr);
1615
	u8 cpumode = PERF_RECORD_MISC_USER;
K
Kan Liang 已提交
1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661
	int i, j, err;
	u64 ip;

	for (i = 0; i < chain_nr; i++) {
		if (chain->ips[i] == PERF_CONTEXT_USER)
			break;
	}

	/* LBR only affects the user callchain */
	if (i != chain_nr) {
		struct branch_stack *lbr_stack = sample->branch_stack;
		int lbr_nr = lbr_stack->nr;
		/*
		 * LBR callstack can only get user call chain.
		 * The mix_chain_nr is kernel call chain
		 * number plus LBR user call chain number.
		 * i is kernel call chain number,
		 * 1 is PERF_CONTEXT_USER,
		 * lbr_nr + 1 is the user call chain number.
		 * For details, please refer to the comments
		 * in callchain__printf
		 */
		int mix_chain_nr = i + 1 + lbr_nr + 1;

		if (mix_chain_nr > PERF_MAX_STACK_DEPTH + PERF_MAX_BRANCH_DEPTH) {
			pr_warning("corrupted callchain. skipping...\n");
			return 0;
		}

		for (j = 0; j < mix_chain_nr; j++) {
			if (callchain_param.order == ORDER_CALLEE) {
				if (j < i + 1)
					ip = chain->ips[j];
				else if (j > i + 1)
					ip = lbr_stack->entries[j - i - 2].from;
				else
					ip = lbr_stack->entries[0].to;
			} else {
				if (j < lbr_nr)
					ip = lbr_stack->entries[lbr_nr - j - 1].from;
				else if (j > lbr_nr)
					ip = chain->ips[i + 1 - (j - lbr_nr)];
				else
					ip = lbr_stack->entries[0].to;
			}

1662
			err = add_callchain_ip(thread, parent, root_al, &cpumode, ip);
K
Kan Liang 已提交
1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680
			if (err)
				return (err < 0) ? err : 0;
		}
		return 1;
	}

	return 0;
}

static int thread__resolve_callchain_sample(struct thread *thread,
					    struct perf_evsel *evsel,
					    struct perf_sample *sample,
					    struct symbol **parent,
					    struct addr_location *root_al,
					    int max_stack)
{
	struct branch_stack *branch = sample->branch_stack;
	struct ip_callchain *chain = sample->callchain;
1681
	int chain_nr = min(max_stack, (int)chain->nr);
1682
	u8 cpumode = PERF_RECORD_MISC_USER;
1683
	int i, j, err;
1684 1685 1686
	int skip_idx = -1;
	int first_call = 0;

K
Kan Liang 已提交
1687 1688 1689 1690 1691 1692 1693 1694 1695
	callchain_cursor_reset(&callchain_cursor);

	if (has_branch_callstack(evsel)) {
		err = resolve_lbr_callchain_sample(thread, sample, parent,
						   root_al, max_stack);
		if (err)
			return (err < 0) ? err : 0;
	}

1696 1697 1698 1699 1700 1701
	/*
	 * Based on DWARF debug information, some architectures skip
	 * a callchain entry saved by the kernel.
	 */
	if (chain->nr < PERF_MAX_STACK_DEPTH)
		skip_idx = arch_skip_callchain_idx(thread, chain);
1702

1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747
	/*
	 * Add branches to call stack for easier browsing. This gives
	 * more context for a sample than just the callers.
	 *
	 * This uses individual histograms of paths compared to the
	 * aggregated histograms the normal LBR mode uses.
	 *
	 * Limitations for now:
	 * - No extra filters
	 * - No annotations (should annotate somehow)
	 */

	if (branch && callchain_param.branch_callstack) {
		int nr = min(max_stack, (int)branch->nr);
		struct branch_entry be[nr];

		if (branch->nr > PERF_MAX_BRANCH_DEPTH) {
			pr_warning("corrupted branch chain. skipping...\n");
			goto check_calls;
		}

		for (i = 0; i < nr; i++) {
			if (callchain_param.order == ORDER_CALLEE) {
				be[i] = branch->entries[i];
				/*
				 * Check for overlap into the callchain.
				 * The return address is one off compared to
				 * the branch entry. To adjust for this
				 * assume the calling instruction is not longer
				 * than 8 bytes.
				 */
				if (i == skip_idx ||
				    chain->ips[first_call] >= PERF_CONTEXT_MAX)
					first_call++;
				else if (be[i].from < chain->ips[first_call] &&
				    be[i].from >= chain->ips[first_call] - 8)
					first_call++;
			} else
				be[i] = branch->entries[branch->nr - i - 1];
		}

		nr = remove_loops(be, nr);

		for (i = 0; i < nr; i++) {
			err = add_callchain_ip(thread, parent, root_al,
1748
					       NULL, be[i].to);
1749 1750
			if (!err)
				err = add_callchain_ip(thread, parent, root_al,
1751
						       NULL, be[i].from);
1752 1753 1754 1755 1756 1757 1758 1759 1760
			if (err == -EINVAL)
				break;
			if (err)
				return err;
		}
		chain_nr -= nr;
	}

check_calls:
1761 1762 1763 1764 1765
	if (chain->nr > PERF_MAX_STACK_DEPTH) {
		pr_warning("corrupted callchain. skipping...\n");
		return 0;
	}

1766
	for (i = first_call; i < chain_nr; i++) {
1767 1768 1769
		u64 ip;

		if (callchain_param.order == ORDER_CALLEE)
1770
			j = i;
1771
		else
1772 1773 1774 1775 1776 1777 1778
			j = chain->nr - i - 1;

#ifdef HAVE_SKIP_CALLCHAIN_IDX
		if (j == skip_idx)
			continue;
#endif
		ip = chain->ips[j];
1779

1780
		err = add_callchain_ip(thread, parent, root_al, &cpumode, ip);
1781 1782

		if (err)
1783
			return (err < 0) ? err : 0;
1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795
	}

	return 0;
}

static int unwind_entry(struct unwind_entry *entry, void *arg)
{
	struct callchain_cursor *cursor = arg;
	return callchain_cursor_append(cursor, entry->ip,
				       entry->map, entry->sym);
}

1796 1797 1798 1799 1800 1801
int thread__resolve_callchain(struct thread *thread,
			      struct perf_evsel *evsel,
			      struct perf_sample *sample,
			      struct symbol **parent,
			      struct addr_location *root_al,
			      int max_stack)
1802
{
K
Kan Liang 已提交
1803 1804 1805
	int ret = thread__resolve_callchain_sample(thread, evsel,
						   sample, parent,
						   root_al, max_stack);
1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818
	if (ret)
		return ret;

	/* Can we do dwarf post unwind? */
	if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) &&
	      (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER)))
		return 0;

	/* Bail out if nothing was captured. */
	if ((!sample->user_regs.regs) ||
	    (!sample->user_stack.size))
		return 0;

1819
	return unwind__get_entries(unwind_entry, &callchain_cursor,
1820
				   thread, sample, max_stack);
1821 1822

}
1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845

int machine__for_each_thread(struct machine *machine,
			     int (*fn)(struct thread *thread, void *p),
			     void *priv)
{
	struct rb_node *nd;
	struct thread *thread;
	int rc = 0;

	for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
		thread = rb_entry(nd, struct thread, rb_node);
		rc = fn(thread, priv);
		if (rc != 0)
			return rc;
	}

	list_for_each_entry(thread, &machine->dead_threads, node) {
		rc = fn(thread, priv);
		if (rc != 0)
			return rc;
	}
	return rc;
}
1846

1847
int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
1848
				  struct target *target, struct thread_map *threads,
1849
				  perf_event__handler_t process, bool data_mmap)
1850
{
1851
	if (target__has_task(target))
1852
		return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap);
1853
	else if (target__has_cpu(target))
1854 1855 1856 1857
		return perf_event__synthesize_threads(tool, process, machine, data_mmap);
	/* command specified */
	return 0;
}
1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897

pid_t machine__get_current_tid(struct machine *machine, int cpu)
{
	if (cpu < 0 || cpu >= MAX_NR_CPUS || !machine->current_tid)
		return -1;

	return machine->current_tid[cpu];
}

int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
			     pid_t tid)
{
	struct thread *thread;

	if (cpu < 0)
		return -EINVAL;

	if (!machine->current_tid) {
		int i;

		machine->current_tid = calloc(MAX_NR_CPUS, sizeof(pid_t));
		if (!machine->current_tid)
			return -ENOMEM;
		for (i = 0; i < MAX_NR_CPUS; i++)
			machine->current_tid[i] = -1;
	}

	if (cpu >= MAX_NR_CPUS) {
		pr_err("Requested CPU %d too large. ", cpu);
		pr_err("Consider raising MAX_NR_CPUS\n");
		return -EINVAL;
	}

	machine->current_tid[cpu] = tid;

	thread = machine__findnew_thread(machine, pid, tid);
	if (!thread)
		return -ENOMEM;

	thread->cpu = cpu;
1898
	thread__put(thread);
1899 1900 1901

	return 0;
}
1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923

int machine__get_kernel_start(struct machine *machine)
{
	struct map *map = machine__kernel_map(machine, MAP__FUNCTION);
	int err = 0;

	/*
	 * The only addresses above 2^63 are kernel addresses of a 64-bit
	 * kernel.  Note that addresses are unsigned so that on a 32-bit system
	 * all addresses including kernel addresses are less than 2^32.  In
	 * that case (32-bit system), if the kernel mapping is unknown, all
	 * addresses will be assumed to be in user space - see
	 * machine__kernel_ip().
	 */
	machine->kernel_start = 1ULL << 63;
	if (map) {
		err = map__load(map, machine->symbol_filter);
		if (map->start)
			machine->kernel_start = map->start;
	}
	return err;
}