machine.c 54.0 KB
Newer Older
1
#include <dirent.h>
2
#include <errno.h>
3
#include <inttypes.h>
4
#include <regex.h>
5
#include "callchain.h"
6 7
#include "debug.h"
#include "event.h"
8 9
#include "evsel.h"
#include "hist.h"
10 11
#include "machine.h"
#include "map.h"
12
#include "sort.h"
13
#include "strlist.h"
14
#include "thread.h"
15
#include "vdso.h"
16
#include <stdbool.h>
17 18 19
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
20
#include "unwind.h"
21
#include "linux/hash.h"
22
#include "asm/bug.h"
23

24 25 26
#include "sane_ctype.h"
#include <symbol/kallsyms.h>

27 28
static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock);

29 30 31 32
static void dsos__init(struct dsos *dsos)
{
	INIT_LIST_HEAD(&dsos->head);
	dsos->root = RB_ROOT;
33
	pthread_rwlock_init(&dsos->lock, NULL);
34 35
}

36 37
int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
{
38
	memset(machine, 0, sizeof(*machine));
39
	map_groups__init(&machine->kmaps, machine);
40
	RB_CLEAR_NODE(&machine->rb_node);
41
	dsos__init(&machine->dsos);
42 43

	machine->threads = RB_ROOT;
44
	pthread_rwlock_init(&machine->threads_lock, NULL);
45
	machine->nr_threads = 0;
46 47 48
	INIT_LIST_HEAD(&machine->dead_threads);
	machine->last_match = NULL;

49
	machine->vdso_info = NULL;
50
	machine->env = NULL;
51

52 53
	machine->pid = pid;

54
	machine->id_hdr_size = 0;
55
	machine->kptr_restrict_warned = false;
56
	machine->comm_exec = false;
57
	machine->kernel_start = 0;
58

59 60
	memset(machine->vmlinux_maps, 0, sizeof(machine->vmlinux_maps));

61 62 63 64 65
	machine->root_dir = strdup(root_dir);
	if (machine->root_dir == NULL)
		return -ENOMEM;

	if (pid != HOST_KERNEL_ID) {
66
		struct thread *thread = machine__findnew_thread(machine, -1,
67
								pid);
68 69 70 71 72 73
		char comm[64];

		if (thread == NULL)
			return -ENOMEM;

		snprintf(comm, sizeof(comm), "[guest/%d]", pid);
74
		thread__set_comm(thread, comm, 0);
75
		thread__put(thread);
76 77
	}

78 79
	machine->current_tid = NULL;

80 81 82
	return 0;
}

83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
struct machine *machine__new_host(void)
{
	struct machine *machine = malloc(sizeof(*machine));

	if (machine != NULL) {
		machine__init(machine, "", HOST_KERNEL_ID);

		if (machine__create_kernel_maps(machine) < 0)
			goto out_delete;
	}

	return machine;
out_delete:
	free(machine);
	return NULL;
}

100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
struct machine *machine__new_kallsyms(void)
{
	struct machine *machine = machine__new_host();
	/*
	 * FIXME:
	 * 1) MAP__FUNCTION will go away when we stop loading separate maps for
	 *    functions and data objects.
	 * 2) We should switch to machine__load_kallsyms(), i.e. not explicitely
	 *    ask for not using the kcore parsing code, once this one is fixed
	 *    to create a map per module.
	 */
	if (machine && __machine__load_kallsyms(machine, "/proc/kallsyms", MAP__FUNCTION, true) <= 0) {
		machine__delete(machine);
		machine = NULL;
	}

	return machine;
}

119
static void dsos__purge(struct dsos *dsos)
120 121 122
{
	struct dso *pos, *n;

123 124
	pthread_rwlock_wrlock(&dsos->lock);

125
	list_for_each_entry_safe(pos, n, &dsos->head, node) {
126
		RB_CLEAR_NODE(&pos->rb_node);
127
		pos->root = NULL;
128 129
		list_del_init(&pos->node);
		dso__put(pos);
130
	}
131 132

	pthread_rwlock_unlock(&dsos->lock);
133
}
134

135 136 137
static void dsos__exit(struct dsos *dsos)
{
	dsos__purge(dsos);
138
	pthread_rwlock_destroy(&dsos->lock);
139 140
}

141 142
void machine__delete_threads(struct machine *machine)
{
143
	struct rb_node *nd;
144

145 146
	pthread_rwlock_wrlock(&machine->threads_lock);
	nd = rb_first(&machine->threads);
147 148 149 150
	while (nd) {
		struct thread *t = rb_entry(nd, struct thread, rb_node);

		nd = rb_next(nd);
151
		__machine__remove_thread(machine, t, false);
152
	}
153
	pthread_rwlock_unlock(&machine->threads_lock);
154 155
}

156 157
void machine__exit(struct machine *machine)
{
158
	machine__destroy_kernel_maps(machine);
159
	map_groups__exit(&machine->kmaps);
160
	dsos__exit(&machine->dsos);
161
	machine__exit_vdso(machine);
162
	zfree(&machine->root_dir);
163
	zfree(&machine->current_tid);
164
	pthread_rwlock_destroy(&machine->threads_lock);
165 166 167 168
}

void machine__delete(struct machine *machine)
{
169 170 171 172
	if (machine) {
		machine__exit(machine);
		free(machine);
	}
173 174
}

175 176 177 178 179 180 181 182 183 184 185 186 187
void machines__init(struct machines *machines)
{
	machine__init(&machines->host, "", HOST_KERNEL_ID);
	machines->guests = RB_ROOT;
}

void machines__exit(struct machines *machines)
{
	machine__exit(&machines->host);
	/* XXX exit guest */
}

struct machine *machines__add(struct machines *machines, pid_t pid,
188 189
			      const char *root_dir)
{
190
	struct rb_node **p = &machines->guests.rb_node;
191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
	struct rb_node *parent = NULL;
	struct machine *pos, *machine = malloc(sizeof(*machine));

	if (machine == NULL)
		return NULL;

	if (machine__init(machine, root_dir, pid) != 0) {
		free(machine);
		return NULL;
	}

	while (*p != NULL) {
		parent = *p;
		pos = rb_entry(parent, struct machine, rb_node);
		if (pid < pos->pid)
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
	}

	rb_link_node(&machine->rb_node, parent, p);
212
	rb_insert_color(&machine->rb_node, &machines->guests);
213 214 215 216

	return machine;
}

217 218 219 220 221 222 223 224 225 226 227 228 229
void machines__set_comm_exec(struct machines *machines, bool comm_exec)
{
	struct rb_node *nd;

	machines->host.comm_exec = comm_exec;

	for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
		struct machine *machine = rb_entry(nd, struct machine, rb_node);

		machine->comm_exec = comm_exec;
	}
}

230
struct machine *machines__find(struct machines *machines, pid_t pid)
231
{
232
	struct rb_node **p = &machines->guests.rb_node;
233 234 235 236
	struct rb_node *parent = NULL;
	struct machine *machine;
	struct machine *default_machine = NULL;

237 238 239
	if (pid == HOST_KERNEL_ID)
		return &machines->host;

240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255
	while (*p != NULL) {
		parent = *p;
		machine = rb_entry(parent, struct machine, rb_node);
		if (pid < machine->pid)
			p = &(*p)->rb_left;
		else if (pid > machine->pid)
			p = &(*p)->rb_right;
		else
			return machine;
		if (!machine->pid)
			default_machine = machine;
	}

	return default_machine;
}

256
struct machine *machines__findnew(struct machines *machines, pid_t pid)
257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272
{
	char path[PATH_MAX];
	const char *root_dir = "";
	struct machine *machine = machines__find(machines, pid);

	if (machine && (machine->pid == pid))
		goto out;

	if ((pid != HOST_KERNEL_ID) &&
	    (pid != DEFAULT_GUEST_KERNEL_ID) &&
	    (symbol_conf.guestmount)) {
		sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
		if (access(path, R_OK)) {
			static struct strlist *seen;

			if (!seen)
273
				seen = strlist__new(NULL, NULL);
274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289

			if (!strlist__has_entry(seen, path)) {
				pr_err("Can't access file %s\n", path);
				strlist__add(seen, path);
			}
			machine = NULL;
			goto out;
		}
		root_dir = path;
	}

	machine = machines__add(machines, pid, root_dir);
out:
	return machine;
}

290 291
void machines__process_guests(struct machines *machines,
			      machine__process_t process, void *data)
292 293 294
{
	struct rb_node *nd;

295
	for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314
		struct machine *pos = rb_entry(nd, struct machine, rb_node);
		process(pos, data);
	}
}

char *machine__mmap_name(struct machine *machine, char *bf, size_t size)
{
	if (machine__is_host(machine))
		snprintf(bf, size, "[%s]", "kernel.kallsyms");
	else if (machine__is_default_guest(machine))
		snprintf(bf, size, "[%s]", "guest.kernel.kallsyms");
	else {
		snprintf(bf, size, "[%s.%d]", "guest.kernel.kallsyms",
			 machine->pid);
	}

	return bf;
}

315
void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
316 317 318 319
{
	struct rb_node *node;
	struct machine *machine;

320 321 322
	machines->host.id_hdr_size = id_hdr_size;

	for (node = rb_first(&machines->guests); node; node = rb_next(node)) {
323 324 325 326 327 328 329
		machine = rb_entry(node, struct machine, rb_node);
		machine->id_hdr_size = id_hdr_size;
	}

	return;
}

330 331 332 333 334 335 336 337 338 339 340 341 342
static void machine__update_thread_pid(struct machine *machine,
				       struct thread *th, pid_t pid)
{
	struct thread *leader;

	if (pid == th->pid_ || pid == -1 || th->pid_ != -1)
		return;

	th->pid_ = pid;

	if (th->pid_ == th->tid)
		return;

343
	leader = __machine__findnew_thread(machine, th->pid_, th->pid_);
344 345 346 347
	if (!leader)
		goto out_err;

	if (!leader->mg)
348
		leader->mg = map_groups__new(machine);
349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364

	if (!leader->mg)
		goto out_err;

	if (th->mg == leader->mg)
		return;

	if (th->mg) {
		/*
		 * Maps are created from MMAP events which provide the pid and
		 * tid.  Consequently there never should be any maps on a thread
		 * with an unknown pid.  Just print an error if there are.
		 */
		if (!map_groups__empty(th->mg))
			pr_err("Discarding thread maps for %d:%d\n",
			       th->pid_, th->tid);
365
		map_groups__put(th->mg);
366 367 368
	}

	th->mg = map_groups__get(leader->mg);
369 370
out_put:
	thread__put(leader);
371 372 373
	return;
out_err:
	pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid);
374
	goto out_put;
375 376
}

377
/*
378
 * Caller must eventually drop thread->refcnt returned with a successful
379 380
 * lookup/new thread inserted.
 */
381 382 383
static struct thread *____machine__findnew_thread(struct machine *machine,
						  pid_t pid, pid_t tid,
						  bool create)
384 385 386 387 388 389
{
	struct rb_node **p = &machine->threads.rb_node;
	struct rb_node *parent = NULL;
	struct thread *th;

	/*
390
	 * Front-end cache - TID lookups come in blocks,
391 392 393
	 * so most of the time we dont have to look up
	 * the full rbtree:
	 */
394
	th = machine->last_match;
395 396 397
	if (th != NULL) {
		if (th->tid == tid) {
			machine__update_thread_pid(machine, th, pid);
398
			return thread__get(th);
399 400
		}

401
		machine->last_match = NULL;
402
	}
403 404 405 406 407

	while (*p != NULL) {
		parent = *p;
		th = rb_entry(parent, struct thread, rb_node);

408
		if (th->tid == tid) {
409
			machine->last_match = th;
410
			machine__update_thread_pid(machine, th, pid);
411
			return thread__get(th);
412 413
		}

414
		if (tid < th->tid)
415 416 417 418 419 420 421 422
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
	}

	if (!create)
		return NULL;

423
	th = thread__new(pid, tid);
424 425 426
	if (th != NULL) {
		rb_link_node(&th->rb_node, parent, p);
		rb_insert_color(&th->rb_node, &machine->threads);
427 428 429 430 431 432 433 434 435

		/*
		 * We have to initialize map_groups separately
		 * after rb tree is updated.
		 *
		 * The reason is that we call machine__findnew_thread
		 * within thread__init_map_groups to find the thread
		 * leader and that would screwed the rb tree.
		 */
436
		if (thread__init_map_groups(th, machine)) {
437
			rb_erase_init(&th->rb_node, &machine->threads);
438
			RB_CLEAR_NODE(&th->rb_node);
439
			thread__put(th);
440
			return NULL;
441
		}
442 443 444 445
		/*
		 * It is now in the rbtree, get a ref
		 */
		thread__get(th);
446
		machine->last_match = th;
447
		++machine->nr_threads;
448 449 450 451 452
	}

	return th;
}

453 454 455 456 457
struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid)
{
	return ____machine__findnew_thread(machine, pid, tid, true);
}

458 459
struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
				       pid_t tid)
460
{
461 462 463
	struct thread *th;

	pthread_rwlock_wrlock(&machine->threads_lock);
464
	th = __machine__findnew_thread(machine, pid, tid);
465 466
	pthread_rwlock_unlock(&machine->threads_lock);
	return th;
467 468
}

469 470
struct thread *machine__find_thread(struct machine *machine, pid_t pid,
				    pid_t tid)
471
{
472 473
	struct thread *th;
	pthread_rwlock_rdlock(&machine->threads_lock);
474
	th =  ____machine__findnew_thread(machine, pid, tid, false);
475 476
	pthread_rwlock_unlock(&machine->threads_lock);
	return th;
477
}
478

479 480 481 482 483 484 485 486 487
struct comm *machine__thread_exec_comm(struct machine *machine,
				       struct thread *thread)
{
	if (machine->comm_exec)
		return thread__exec_comm(thread);
	else
		return thread__comm(thread);
}

488 489
int machine__process_comm_event(struct machine *machine, union perf_event *event,
				struct perf_sample *sample)
490
{
491 492 493
	struct thread *thread = machine__findnew_thread(machine,
							event->comm.pid,
							event->comm.tid);
494
	bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC;
495
	int err = 0;
496

497 498 499
	if (exec)
		machine->comm_exec = true;

500 501 502
	if (dump_trace)
		perf_event__fprintf_comm(event, stdout);

503 504
	if (thread == NULL ||
	    __thread__set_comm(thread, event->comm.comm, sample->time, exec)) {
505
		dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
506
		err = -1;
507 508
	}

509 510 511
	thread__put(thread);

	return err;
512 513
}

514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544
int machine__process_namespaces_event(struct machine *machine __maybe_unused,
				      union perf_event *event,
				      struct perf_sample *sample __maybe_unused)
{
	struct thread *thread = machine__findnew_thread(machine,
							event->namespaces.pid,
							event->namespaces.tid);
	int err = 0;

	WARN_ONCE(event->namespaces.nr_namespaces > NR_NAMESPACES,
		  "\nWARNING: kernel seems to support more namespaces than perf"
		  " tool.\nTry updating the perf tool..\n\n");

	WARN_ONCE(event->namespaces.nr_namespaces < NR_NAMESPACES,
		  "\nWARNING: perf tool seems to support more namespaces than"
		  " the kernel.\nTry updating the kernel..\n\n");

	if (dump_trace)
		perf_event__fprintf_namespaces(event, stdout);

	if (thread == NULL ||
	    thread__set_namespaces(thread, sample->time, &event->namespaces)) {
		dump_printf("problem processing PERF_RECORD_NAMESPACES, skipping event.\n");
		err = -1;
	}

	thread__put(thread);

	return err;
}

545
int machine__process_lost_event(struct machine *machine __maybe_unused,
546
				union perf_event *event, struct perf_sample *sample __maybe_unused)
547 548 549 550 551 552
{
	dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
		    event->lost.id, event->lost.lost);
	return 0;
}

553 554 555 556 557 558 559 560
int machine__process_lost_samples_event(struct machine *machine __maybe_unused,
					union perf_event *event, struct perf_sample *sample)
{
	dump_printf(": id:%" PRIu64 ": lost samples :%" PRIu64 "\n",
		    sample->id, event->lost_samples.lost);
	return 0;
}

561 562 563
static struct dso *machine__findnew_module_dso(struct machine *machine,
					       struct kmod_path *m,
					       const char *filename)
564 565 566
{
	struct dso *dso;

567 568 569
	pthread_rwlock_wrlock(&machine->dsos.lock);

	dso = __dsos__find(&machine->dsos, m->name, true);
570
	if (!dso) {
571
		dso = __dsos__addnew(&machine->dsos, m->name);
572
		if (dso == NULL)
573
			goto out_unlock;
574

575
		dso__set_module_info(dso, m, machine);
576
		dso__set_long_name(dso, strdup(filename), true);
577 578
	}

579
	dso__get(dso);
580 581
out_unlock:
	pthread_rwlock_unlock(&machine->dsos.lock);
582 583 584
	return dso;
}

585 586 587 588 589 590 591 592
int machine__process_aux_event(struct machine *machine __maybe_unused,
			       union perf_event *event)
{
	if (dump_trace)
		perf_event__fprintf_aux(event, stdout);
	return 0;
}

593 594 595 596 597 598 599 600
int machine__process_itrace_start_event(struct machine *machine __maybe_unused,
					union perf_event *event)
{
	if (dump_trace)
		perf_event__fprintf_itrace_start(event, stdout);
	return 0;
}

601 602 603 604 605 606 607 608
int machine__process_switch_event(struct machine *machine __maybe_unused,
				  union perf_event *event)
{
	if (dump_trace)
		perf_event__fprintf_switch(event, stdout);
	return 0;
}

609 610 611 612 613 614 615 616 617 618 619 620 621 622 623
static void dso__adjust_kmod_long_name(struct dso *dso, const char *filename)
{
	const char *dup_filename;

	if (!filename || !dso || !dso->long_name)
		return;
	if (dso->long_name[0] != '[')
		return;
	if (!strchr(filename, '/'))
		return;

	dup_filename = strdup(filename);
	if (!dup_filename)
		return;

624
	dso__set_long_name(dso, dup_filename, true);
625 626
}

627 628
struct map *machine__findnew_module_map(struct machine *machine, u64 start,
					const char *filename)
629
{
630
	struct map *map = NULL;
631
	struct dso *dso = NULL;
632
	struct kmod_path m;
633

634
	if (kmod_path__parse_name(&m, filename))
635 636
		return NULL;

637 638
	map = map_groups__find_by_name(&machine->kmaps, MAP__FUNCTION,
				       m.name);
639 640 641 642 643 644 645
	if (map) {
		/*
		 * If the map's dso is an offline module, give dso__load()
		 * a chance to find the file path of that module by fixing
		 * long_name.
		 */
		dso__adjust_kmod_long_name(map->dso, filename);
646
		goto out;
647
	}
648

649
	dso = machine__findnew_module_dso(machine, &m, filename);
650 651 652
	if (dso == NULL)
		goto out;

653 654
	map = map__new2(start, dso, MAP__FUNCTION);
	if (map == NULL)
655
		goto out;
656 657

	map_groups__insert(&machine->kmaps, map);
658

659 660
	/* Put the map here because map_groups__insert alread got it */
	map__put(map);
661
out:
662 663
	/* put the dso here, corresponding to  machine__findnew_module_dso */
	dso__put(dso);
664
	free(m.name);
665 666 667
	return map;
}

668
size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
669 670
{
	struct rb_node *nd;
671
	size_t ret = __dsos__fprintf(&machines->host.dsos.head, fp);
672

673
	for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
674
		struct machine *pos = rb_entry(nd, struct machine, rb_node);
675
		ret += __dsos__fprintf(&pos->dsos.head, fp);
676 677 678 679 680
	}

	return ret;
}

681
size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp,
682 683
				     bool (skip)(struct dso *dso, int parm), int parm)
{
684
	return __dsos__fprintf_buildid(&m->dsos.head, fp, skip, parm);
685 686
}

687
size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
688 689 690
				     bool (skip)(struct dso *dso, int parm), int parm)
{
	struct rb_node *nd;
691
	size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
692

693
	for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
694 695 696 697 698 699 700 701 702 703
		struct machine *pos = rb_entry(nd, struct machine, rb_node);
		ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
	}
	return ret;
}

size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
{
	int i;
	size_t printed = 0;
704
	struct dso *kdso = machine__kernel_map(machine)->dso;
705 706 707

	if (kdso->has_build_id) {
		char filename[PATH_MAX];
708 709
		if (dso__build_id_filename(kdso, filename, sizeof(filename),
					   false))
710 711 712 713 714 715 716 717 718 719 720 721
			printed += fprintf(fp, "[0] %s\n", filename);
	}

	for (i = 0; i < vmlinux_path__nr_entries; ++i)
		printed += fprintf(fp, "[%d] %s\n",
				   i + kdso->has_build_id, vmlinux_path[i]);

	return printed;
}

size_t machine__fprintf(struct machine *machine, FILE *fp)
{
722
	size_t ret;
723 724
	struct rb_node *nd;

725 726
	pthread_rwlock_rdlock(&machine->threads_lock);

727 728
	ret = fprintf(fp, "Threads: %u\n", machine->nr_threads);

729 730 731 732 733 734
	for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
		struct thread *pos = rb_entry(nd, struct thread, rb_node);

		ret += thread__fprintf(pos, fp);
	}

735 736
	pthread_rwlock_unlock(&machine->threads_lock);

737 738 739 740 741 742 743 744 745 746 747
	return ret;
}

static struct dso *machine__get_kernel(struct machine *machine)
{
	const char *vmlinux_name = NULL;
	struct dso *kernel;

	if (machine__is_host(machine)) {
		vmlinux_name = symbol_conf.vmlinux_name;
		if (!vmlinux_name)
748
			vmlinux_name = DSO__NAME_KALLSYMS;
749

750 751
		kernel = machine__findnew_kernel(machine, vmlinux_name,
						 "[kernel]", DSO_TYPE_KERNEL);
752 753 754 755 756 757 758 759 760
	} else {
		char bf[PATH_MAX];

		if (machine__is_default_guest(machine))
			vmlinux_name = symbol_conf.default_guest_vmlinux_name;
		if (!vmlinux_name)
			vmlinux_name = machine__mmap_name(machine, bf,
							  sizeof(bf));

761 762 763
		kernel = machine__findnew_kernel(machine, vmlinux_name,
						 "[guest.kernel]",
						 DSO_TYPE_GUEST_KERNEL);
764 765 766 767 768 769 770 771 772 773 774 775
	}

	if (kernel != NULL && (!kernel->has_build_id))
		dso__read_running_kernel_build_id(kernel, machine);

	return kernel;
}

struct process_args {
	u64 start;
};

776 777 778 779 780 781 782 783 784
static void machine__get_kallsyms_filename(struct machine *machine, char *buf,
					   size_t bufsz)
{
	if (machine__is_default_guest(machine))
		scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
	else
		scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
}

785 786 787 788 789 790
const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};

/* Figure out the start address of kernel map from /proc/kallsyms.
 * Returns the name of the start symbol in *symbol_name. Pass in NULL as
 * symbol_name if it's not that important.
 */
791 792
static int machine__get_running_kernel_start(struct machine *machine,
					     const char **symbol_name, u64 *start)
793
{
794
	char filename[PATH_MAX];
795
	int i, err = -1;
796 797
	const char *name;
	u64 addr = 0;
798

799
	machine__get_kallsyms_filename(machine, filename, PATH_MAX);
800 801 802 803

	if (symbol__restricted_filename(filename, "/proc/kallsyms"))
		return 0;

804
	for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
805 806
		err = kallsyms__get_function_start(filename, name, &addr);
		if (!err)
807 808 809
			break;
	}

810 811 812
	if (err)
		return -1;

813 814
	if (symbol_name)
		*symbol_name = name;
815

816 817
	*start = addr;
	return 0;
818 819 820 821
}

int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
{
822
	int type;
823 824 825 826
	u64 start = 0;

	if (machine__get_running_kernel_start(machine, NULL, &start))
		return -1;
827

828 829 830
	/* In case of renewal the kernel map, destroy previous one */
	machine__destroy_kernel_maps(machine);

831 832
	for (type = 0; type < MAP__NR_TYPES; ++type) {
		struct kmap *kmap;
833
		struct map *map;
834 835 836 837 838 839 840 841

		machine->vmlinux_maps[type] = map__new2(start, kernel, type);
		if (machine->vmlinux_maps[type] == NULL)
			return -1;

		machine->vmlinux_maps[type]->map_ip =
			machine->vmlinux_maps[type]->unmap_ip =
				identity__map_ip;
842
		map = __machine__kernel_map(machine, type);
843
		kmap = map__kmap(map);
844 845 846
		if (!kmap)
			return -1;

847
		kmap->kmaps = &machine->kmaps;
848
		map_groups__insert(&machine->kmaps, map);
849 850 851 852 853 854 855
	}

	return 0;
}

void machine__destroy_kernel_maps(struct machine *machine)
{
856
	int type;
857 858 859

	for (type = 0; type < MAP__NR_TYPES; ++type) {
		struct kmap *kmap;
860
		struct map *map = __machine__kernel_map(machine, type);
861

862
		if (map == NULL)
863 864
			continue;

865 866
		kmap = map__kmap(map);
		map_groups__remove(&machine->kmaps, map);
867
		if (kmap && kmap->ref_reloc_sym) {
868 869 870 871 872
			/*
			 * ref_reloc_sym is shared among all maps, so free just
			 * on one of them.
			 */
			if (type == MAP__FUNCTION) {
873 874 875 876
				zfree((char **)&kmap->ref_reloc_sym->name);
				zfree(&kmap->ref_reloc_sym);
			} else
				kmap->ref_reloc_sym = NULL;
877 878
		}

879
		map__put(machine->vmlinux_maps[type]);
880 881 882 883
		machine->vmlinux_maps[type] = NULL;
	}
}

884
int machines__create_guest_kernel_maps(struct machines *machines)
885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932
{
	int ret = 0;
	struct dirent **namelist = NULL;
	int i, items = 0;
	char path[PATH_MAX];
	pid_t pid;
	char *endp;

	if (symbol_conf.default_guest_vmlinux_name ||
	    symbol_conf.default_guest_modules ||
	    symbol_conf.default_guest_kallsyms) {
		machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
	}

	if (symbol_conf.guestmount) {
		items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
		if (items <= 0)
			return -ENOENT;
		for (i = 0; i < items; i++) {
			if (!isdigit(namelist[i]->d_name[0])) {
				/* Filter out . and .. */
				continue;
			}
			pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
			if ((*endp != '\0') ||
			    (endp == namelist[i]->d_name) ||
			    (errno == ERANGE)) {
				pr_debug("invalid directory (%s). Skipping.\n",
					 namelist[i]->d_name);
				continue;
			}
			sprintf(path, "%s/%s/proc/kallsyms",
				symbol_conf.guestmount,
				namelist[i]->d_name);
			ret = access(path, R_OK);
			if (ret) {
				pr_debug("Can't access file %s\n", path);
				goto failure;
			}
			machines__create_kernel_maps(machines, pid);
		}
failure:
		free(namelist);
	}

	return ret;
}

933
void machines__destroy_kernel_maps(struct machines *machines)
934
{
935 936 937
	struct rb_node *next = rb_first(&machines->guests);

	machine__destroy_kernel_maps(&machines->host);
938 939 940 941 942

	while (next) {
		struct machine *pos = rb_entry(next, struct machine, rb_node);

		next = rb_next(&pos->rb_node);
943
		rb_erase(&pos->rb_node, &machines->guests);
944 945 946 947
		machine__delete(pos);
	}
}

948
int machines__create_kernel_maps(struct machines *machines, pid_t pid)
949 950 951 952 953 954 955 956 957
{
	struct machine *machine = machines__findnew(machines, pid);

	if (machine == NULL)
		return -1;

	return machine__create_kernel_maps(machine);
}

958
int __machine__load_kallsyms(struct machine *machine, const char *filename,
959
			     enum map_type type, bool no_kcore)
960
{
961
	struct map *map = machine__kernel_map(machine);
962
	int ret = __dso__load_kallsyms(map->dso, filename, map, no_kcore);
963 964 965 966 967 968 969 970 971 972 973 974 975 976

	if (ret > 0) {
		dso__set_loaded(map->dso, type);
		/*
		 * Since /proc/kallsyms will have multiple sessions for the
		 * kernel, with modules between them, fixup the end of all
		 * sections.
		 */
		__map_groups__fixup_end(&machine->kmaps, type);
	}

	return ret;
}

977
int machine__load_kallsyms(struct machine *machine, const char *filename,
978
			   enum map_type type)
979
{
980
	return __machine__load_kallsyms(machine, filename, type, false);
981 982
}

983
int machine__load_vmlinux_path(struct machine *machine, enum map_type type)
984
{
985
	struct map *map = machine__kernel_map(machine);
986
	int ret = dso__load_vmlinux_path(map->dso, map);
987

988
	if (ret > 0)
989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027
		dso__set_loaded(map->dso, type);

	return ret;
}

static void map_groups__fixup_end(struct map_groups *mg)
{
	int i;
	for (i = 0; i < MAP__NR_TYPES; ++i)
		__map_groups__fixup_end(mg, i);
}

static char *get_kernel_version(const char *root_dir)
{
	char version[PATH_MAX];
	FILE *file;
	char *name, *tmp;
	const char *prefix = "Linux version ";

	sprintf(version, "%s/proc/version", root_dir);
	file = fopen(version, "r");
	if (!file)
		return NULL;

	version[0] = '\0';
	tmp = fgets(version, sizeof(version), file);
	fclose(file);

	name = strstr(version, prefix);
	if (!name)
		return NULL;
	name += strlen(prefix);
	tmp = strchr(name, ' ');
	if (tmp)
		*tmp = '\0';

	return strdup(name);
}

1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060
static bool is_kmod_dso(struct dso *dso)
{
	return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
	       dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE;
}

static int map_groups__set_module_path(struct map_groups *mg, const char *path,
				       struct kmod_path *m)
{
	struct map *map;
	char *long_name;

	map = map_groups__find_by_name(mg, MAP__FUNCTION, m->name);
	if (map == NULL)
		return 0;

	long_name = strdup(path);
	if (long_name == NULL)
		return -ENOMEM;

	dso__set_long_name(map->dso, long_name, true);
	dso__kernel_module_get_build_id(map->dso, "");

	/*
	 * Full name could reveal us kmod compression, so
	 * we need to update the symtab_type if needed.
	 */
	if (m->comp && is_kmod_dso(map->dso))
		map->dso->symtab_type++;

	return 0;
}

1061
static int map_groups__set_modules_path_dir(struct map_groups *mg,
1062
				const char *dir_name, int depth)
1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086
{
	struct dirent *dent;
	DIR *dir = opendir(dir_name);
	int ret = 0;

	if (!dir) {
		pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
		return -1;
	}

	while ((dent = readdir(dir)) != NULL) {
		char path[PATH_MAX];
		struct stat st;

		/*sshfs might return bad dent->d_type, so we have to stat*/
		snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name);
		if (stat(path, &st))
			continue;

		if (S_ISDIR(st.st_mode)) {
			if (!strcmp(dent->d_name, ".") ||
			    !strcmp(dent->d_name, ".."))
				continue;

1087 1088 1089 1090 1091 1092 1093 1094 1095
			/* Do not follow top-level source and build symlinks */
			if (depth == 0) {
				if (!strcmp(dent->d_name, "source") ||
				    !strcmp(dent->d_name, "build"))
					continue;
			}

			ret = map_groups__set_modules_path_dir(mg, path,
							       depth + 1);
1096 1097 1098
			if (ret < 0)
				goto out;
		} else {
1099
			struct kmod_path m;
1100

1101 1102 1103
			ret = kmod_path__parse_name(&m, dent->d_name);
			if (ret)
				goto out;
1104

1105 1106
			if (m.kmod)
				ret = map_groups__set_module_path(mg, path, &m);
1107

1108
			free(m.name);
1109

1110
			if (ret)
1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128
				goto out;
		}
	}

out:
	closedir(dir);
	return ret;
}

static int machine__set_modules_path(struct machine *machine)
{
	char *version;
	char modules_path[PATH_MAX];

	version = get_kernel_version(machine->root_dir);
	if (!version)
		return -1;

1129
	snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s",
1130 1131 1132
		 machine->root_dir, version);
	free(version);

1133
	return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0);
1134
}
1135 1136 1137 1138 1139
int __weak arch__fix_module_text_start(u64 *start __maybe_unused,
				const char *name __maybe_unused)
{
	return 0;
}
1140

1141 1142
static int machine__create_module(void *arg, const char *name, u64 start,
				  u64 size)
1143
{
1144
	struct machine *machine = arg;
1145
	struct map *map;
1146

1147 1148 1149
	if (arch__fix_module_text_start(&start, name) < 0)
		return -1;

1150
	map = machine__findnew_module_map(machine, start, name);
1151 1152
	if (map == NULL)
		return -1;
1153
	map->end = start + size;
1154 1155 1156 1157 1158 1159 1160 1161

	dso__kernel_module_get_build_id(map->dso, machine->root_dir);

	return 0;
}

static int machine__create_modules(struct machine *machine)
{
1162 1163 1164
	const char *modules;
	char path[PATH_MAX];

1165
	if (machine__is_default_guest(machine)) {
1166
		modules = symbol_conf.default_guest_modules;
1167 1168
	} else {
		snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir);
1169 1170 1171
		modules = path;
	}

1172
	if (symbol__restricted_filename(modules, "/proc/modules"))
1173 1174
		return -1;

1175
	if (modules__parse(modules, machine, machine__create_module))
1176 1177
		return -1;

1178 1179
	if (!machine__set_modules_path(machine))
		return 0;
1180

1181
	pr_debug("Problems setting modules path maps, continuing anyway...\n");
1182

1183
	return 0;
1184 1185 1186 1187 1188
}

int machine__create_kernel_maps(struct machine *machine)
{
	struct dso *kernel = machine__get_kernel(machine);
1189 1190
	const char *name = NULL;
	u64 addr = 0;
1191 1192
	int ret;

1193
	if (kernel == NULL)
1194
		return -1;
1195

1196 1197 1198
	ret = __machine__create_kernel_maps(machine, kernel);
	dso__put(kernel);
	if (ret < 0)
1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213
		return -1;

	if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
		if (machine__is_host(machine))
			pr_debug("Problems creating module maps, "
				 "continuing anyway...\n");
		else
			pr_debug("Problems creating module maps for guest %d, "
				 "continuing anyway...\n", machine->pid);
	}

	/*
	 * Now that we have all the maps created, just set the ->end of them:
	 */
	map_groups__fixup_end(&machine->kmaps);
1214

1215 1216 1217 1218 1219 1220
	if (!machine__get_running_kernel_start(machine, &name, &addr)) {
		if (name &&
		    maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name, addr)) {
			machine__destroy_kernel_maps(machine);
			return -1;
		}
1221 1222
	}

1223 1224 1225
	return 0;
}

1226 1227 1228
static void machine__set_kernel_mmap_len(struct machine *machine,
					 union perf_event *event)
{
1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241
	int i;

	for (i = 0; i < MAP__NR_TYPES; i++) {
		machine->vmlinux_maps[i]->start = event->mmap.start;
		machine->vmlinux_maps[i]->end   = (event->mmap.start +
						   event->mmap.len);
		/*
		 * Be a bit paranoid here, some perf.data file came with
		 * a zero sized synthesized MMAP event for the kernel.
		 */
		if (machine->vmlinux_maps[i]->end == 0)
			machine->vmlinux_maps[i]->end = ~0ULL;
	}
1242 1243
}

1244 1245 1246 1247
static bool machine__uses_kcore(struct machine *machine)
{
	struct dso *dso;

1248
	list_for_each_entry(dso, &machine->dsos.head, node) {
1249 1250 1251 1252 1253 1254 1255
		if (dso__is_kcore(dso))
			return true;
	}

	return false;
}

1256 1257 1258 1259 1260 1261 1262 1263
static int machine__process_kernel_mmap_event(struct machine *machine,
					      union perf_event *event)
{
	struct map *map;
	char kmmap_prefix[PATH_MAX];
	enum dso_kernel_type kernel_type;
	bool is_kernel_mmap;

1264 1265 1266 1267
	/* If we have maps from kcore then we do not need or want any others */
	if (machine__uses_kcore(machine))
		return 0;

1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278
	machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix));
	if (machine__is_host(machine))
		kernel_type = DSO_TYPE_KERNEL;
	else
		kernel_type = DSO_TYPE_GUEST_KERNEL;

	is_kernel_mmap = memcmp(event->mmap.filename,
				kmmap_prefix,
				strlen(kmmap_prefix) - 1) == 0;
	if (event->mmap.filename[0] == '/' ||
	    (!is_kernel_mmap && event->mmap.filename[0] == '[')) {
1279 1280
		map = machine__findnew_module_map(machine, event->mmap.start,
						  event->mmap.filename);
1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291
		if (map == NULL)
			goto out_problem;

		map->end = map->start + event->mmap.len;
	} else if (is_kernel_mmap) {
		const char *symbol_name = (event->mmap.filename +
				strlen(kmmap_prefix));
		/*
		 * Should be there already, from the build-id table in
		 * the header.
		 */
1292 1293 1294
		struct dso *kernel = NULL;
		struct dso *dso;

1295 1296
		pthread_rwlock_rdlock(&machine->dsos.lock);

1297
		list_for_each_entry(dso, &machine->dsos.head, node) {
1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317

			/*
			 * The cpumode passed to is_kernel_module is not the
			 * cpumode of *this* event. If we insist on passing
			 * correct cpumode to is_kernel_module, we should
			 * record the cpumode when we adding this dso to the
			 * linked list.
			 *
			 * However we don't really need passing correct
			 * cpumode.  We know the correct cpumode must be kernel
			 * mode (if not, we should not link it onto kernel_dsos
			 * list).
			 *
			 * Therefore, we pass PERF_RECORD_MISC_CPUMODE_UNKNOWN.
			 * is_kernel_module() treats it as a kernel cpumode.
			 */

			if (!dso->kernel ||
			    is_kernel_module(dso->long_name,
					     PERF_RECORD_MISC_CPUMODE_UNKNOWN))
1318 1319
				continue;

1320

1321 1322 1323 1324
			kernel = dso;
			break;
		}

1325 1326
		pthread_rwlock_unlock(&machine->dsos.lock);

1327
		if (kernel == NULL)
1328
			kernel = machine__findnew_dso(machine, kmmap_prefix);
1329 1330 1331 1332
		if (kernel == NULL)
			goto out_problem;

		kernel->kernel = kernel_type;
1333 1334
		if (__machine__create_kernel_maps(machine, kernel) < 0) {
			dso__put(kernel);
1335
			goto out_problem;
1336
		}
1337

1338 1339
		if (strstr(kernel->long_name, "vmlinux"))
			dso__set_short_name(kernel, "[kernel.vmlinux]", false);
1340

1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357
		machine__set_kernel_mmap_len(machine, event);

		/*
		 * Avoid using a zero address (kptr_restrict) for the ref reloc
		 * symbol. Effectively having zero here means that at record
		 * time /proc/sys/kernel/kptr_restrict was non zero.
		 */
		if (event->mmap.pgoff != 0) {
			maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
							 symbol_name,
							 event->mmap.pgoff);
		}

		if (machine__is_default_guest(machine)) {
			/*
			 * preload dso of guest kernel and modules
			 */
1358
			dso__load(kernel, machine__kernel_map(machine));
1359 1360 1361 1362 1363 1364 1365
		}
	}
	return 0;
out_problem:
	return -1;
}

1366
int machine__process_mmap2_event(struct machine *machine,
1367
				 union perf_event *event,
1368
				 struct perf_sample *sample)
1369 1370 1371 1372 1373 1374 1375 1376 1377
{
	struct thread *thread;
	struct map *map;
	enum map_type type;
	int ret = 0;

	if (dump_trace)
		perf_event__fprintf_mmap2(event, stdout);

1378 1379
	if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
	    sample->cpumode == PERF_RECORD_MISC_KERNEL) {
1380 1381 1382 1383 1384 1385 1386
		ret = machine__process_kernel_mmap_event(machine, event);
		if (ret < 0)
			goto out_problem;
		return 0;
	}

	thread = machine__findnew_thread(machine, event->mmap2.pid,
1387
					event->mmap2.tid);
1388 1389 1390 1391 1392 1393 1394 1395
	if (thread == NULL)
		goto out_problem;

	if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
		type = MAP__VARIABLE;
	else
		type = MAP__FUNCTION;

1396
	map = map__new(machine, event->mmap2.start,
1397
			event->mmap2.len, event->mmap2.pgoff,
1398
			event->mmap2.maj,
1399 1400
			event->mmap2.min, event->mmap2.ino,
			event->mmap2.ino_generation,
1401 1402
			event->mmap2.prot,
			event->mmap2.flags,
1403
			event->mmap2.filename, type, thread);
1404 1405

	if (map == NULL)
1406
		goto out_problem_map;
1407

1408 1409 1410 1411
	ret = thread__insert_map(thread, map);
	if (ret)
		goto out_problem_insert;

1412
	thread__put(thread);
1413
	map__put(map);
1414 1415
	return 0;

1416 1417
out_problem_insert:
	map__put(map);
1418 1419
out_problem_map:
	thread__put(thread);
1420 1421 1422 1423 1424
out_problem:
	dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
	return 0;
}

1425
int machine__process_mmap_event(struct machine *machine, union perf_event *event,
1426
				struct perf_sample *sample)
1427 1428 1429
{
	struct thread *thread;
	struct map *map;
1430
	enum map_type type;
1431 1432 1433 1434 1435
	int ret = 0;

	if (dump_trace)
		perf_event__fprintf_mmap(event, stdout);

1436 1437
	if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
	    sample->cpumode == PERF_RECORD_MISC_KERNEL) {
1438 1439 1440 1441 1442 1443
		ret = machine__process_kernel_mmap_event(machine, event);
		if (ret < 0)
			goto out_problem;
		return 0;
	}

1444
	thread = machine__findnew_thread(machine, event->mmap.pid,
1445
					 event->mmap.tid);
1446 1447
	if (thread == NULL)
		goto out_problem;
1448 1449 1450 1451 1452 1453

	if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
		type = MAP__VARIABLE;
	else
		type = MAP__FUNCTION;

1454
	map = map__new(machine, event->mmap.start,
1455
			event->mmap.len, event->mmap.pgoff,
1456
			0, 0, 0, 0, 0, 0,
1457
			event->mmap.filename,
1458
			type, thread);
1459

1460
	if (map == NULL)
1461
		goto out_problem_map;
1462

1463 1464 1465 1466
	ret = thread__insert_map(thread, map);
	if (ret)
		goto out_problem_insert;

1467
	thread__put(thread);
1468
	map__put(map);
1469 1470
	return 0;

1471 1472
out_problem_insert:
	map__put(map);
1473 1474
out_problem_map:
	thread__put(thread);
1475 1476 1477 1478 1479
out_problem:
	dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
	return 0;
}

1480
static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock)
1481
{
1482
	if (machine->last_match == th)
1483
		machine->last_match = NULL;
1484

1485
	BUG_ON(refcount_read(&th->refcnt) == 0);
1486 1487
	if (lock)
		pthread_rwlock_wrlock(&machine->threads_lock);
1488
	rb_erase_init(&th->rb_node, &machine->threads);
1489
	RB_CLEAR_NODE(&th->rb_node);
1490
	--machine->nr_threads;
1491
	/*
1492 1493 1494
	 * Move it first to the dead_threads list, then drop the reference,
	 * if this is the last reference, then the thread__delete destructor
	 * will be called and we will remove it from the dead_threads list.
1495 1496
	 */
	list_add_tail(&th->node, &machine->dead_threads);
1497 1498
	if (lock)
		pthread_rwlock_unlock(&machine->threads_lock);
1499
	thread__put(th);
1500 1501
}

1502 1503 1504 1505 1506
void machine__remove_thread(struct machine *machine, struct thread *th)
{
	return __machine__remove_thread(machine, th, true);
}

1507 1508
int machine__process_fork_event(struct machine *machine, union perf_event *event,
				struct perf_sample *sample)
1509
{
1510 1511 1512
	struct thread *thread = machine__find_thread(machine,
						     event->fork.pid,
						     event->fork.tid);
1513 1514 1515
	struct thread *parent = machine__findnew_thread(machine,
							event->fork.ppid,
							event->fork.ptid);
1516
	int err = 0;
1517

1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535
	if (dump_trace)
		perf_event__fprintf_task(event, stdout);

	/*
	 * There may be an existing thread that is not actually the parent,
	 * either because we are processing events out of order, or because the
	 * (fork) event that would have removed the thread was lost. Assume the
	 * latter case and continue on as best we can.
	 */
	if (parent->pid_ != (pid_t)event->fork.ppid) {
		dump_printf("removing erroneous parent thread %d/%d\n",
			    parent->pid_, parent->tid);
		machine__remove_thread(machine, parent);
		thread__put(parent);
		parent = machine__findnew_thread(machine, event->fork.ppid,
						 event->fork.ptid);
	}

1536
	/* if a thread currently exists for the thread id remove it */
1537
	if (thread != NULL) {
1538
		machine__remove_thread(machine, thread);
1539 1540
		thread__put(thread);
	}
1541

1542 1543
	thread = machine__findnew_thread(machine, event->fork.pid,
					 event->fork.tid);
1544 1545

	if (thread == NULL || parent == NULL ||
1546
	    thread__fork(thread, parent, sample->time) < 0) {
1547
		dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
1548
		err = -1;
1549
	}
1550 1551
	thread__put(thread);
	thread__put(parent);
1552

1553
	return err;
1554 1555
}

1556 1557
int machine__process_exit_event(struct machine *machine, union perf_event *event,
				struct perf_sample *sample __maybe_unused)
1558
{
1559 1560 1561
	struct thread *thread = machine__find_thread(machine,
						     event->fork.pid,
						     event->fork.tid);
1562 1563 1564 1565

	if (dump_trace)
		perf_event__fprintf_task(event, stdout);

1566
	if (thread != NULL) {
1567
		thread__exited(thread);
1568 1569
		thread__put(thread);
	}
1570 1571 1572 1573

	return 0;
}

1574 1575
int machine__process_event(struct machine *machine, union perf_event *event,
			   struct perf_sample *sample)
1576 1577 1578 1579 1580
{
	int ret;

	switch (event->header.type) {
	case PERF_RECORD_COMM:
1581
		ret = machine__process_comm_event(machine, event, sample); break;
1582
	case PERF_RECORD_MMAP:
1583
		ret = machine__process_mmap_event(machine, event, sample); break;
1584 1585
	case PERF_RECORD_NAMESPACES:
		ret = machine__process_namespaces_event(machine, event, sample); break;
1586
	case PERF_RECORD_MMAP2:
1587
		ret = machine__process_mmap2_event(machine, event, sample); break;
1588
	case PERF_RECORD_FORK:
1589
		ret = machine__process_fork_event(machine, event, sample); break;
1590
	case PERF_RECORD_EXIT:
1591
		ret = machine__process_exit_event(machine, event, sample); break;
1592
	case PERF_RECORD_LOST:
1593
		ret = machine__process_lost_event(machine, event, sample); break;
1594 1595
	case PERF_RECORD_AUX:
		ret = machine__process_aux_event(machine, event); break;
1596
	case PERF_RECORD_ITRACE_START:
1597
		ret = machine__process_itrace_start_event(machine, event); break;
1598 1599
	case PERF_RECORD_LOST_SAMPLES:
		ret = machine__process_lost_samples_event(machine, event, sample); break;
1600 1601 1602
	case PERF_RECORD_SWITCH:
	case PERF_RECORD_SWITCH_CPU_WIDE:
		ret = machine__process_switch_event(machine, event); break;
1603 1604 1605 1606 1607 1608 1609
	default:
		ret = -1;
		break;
	}

	return ret;
}
1610

1611
static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
1612
{
1613
	if (!regexec(regex, sym->name, 0, NULL, 0))
1614 1615 1616 1617
		return 1;
	return 0;
}

1618
static void ip__resolve_ams(struct thread *thread,
1619 1620 1621 1622 1623 1624
			    struct addr_map_symbol *ams,
			    u64 ip)
{
	struct addr_location al;

	memset(&al, 0, sizeof(al));
1625 1626 1627 1628 1629 1630 1631
	/*
	 * We cannot use the header.misc hint to determine whether a
	 * branch stack address is user, kernel, guest, hypervisor.
	 * Branches may straddle the kernel/user/hypervisor boundaries.
	 * Thus, we have to try consecutively until we find a match
	 * or else, the symbol is unknown
	 */
1632
	thread__find_cpumode_addr_location(thread, MAP__FUNCTION, ip, &al);
1633 1634 1635 1636 1637

	ams->addr = ip;
	ams->al_addr = al.addr;
	ams->sym = al.sym;
	ams->map = al.map;
1638
	ams->phys_addr = 0;
1639 1640
}

1641
static void ip__resolve_data(struct thread *thread,
1642 1643
			     u8 m, struct addr_map_symbol *ams,
			     u64 addr, u64 phys_addr)
1644 1645 1646 1647 1648
{
	struct addr_location al;

	memset(&al, 0, sizeof(al));

1649
	thread__find_addr_location(thread, m, MAP__VARIABLE, addr, &al);
1650 1651 1652 1653 1654 1655
	if (al.map == NULL) {
		/*
		 * some shared data regions have execute bit set which puts
		 * their mapping in the MAP__FUNCTION type array.
		 * Check there as a fallback option before dropping the sample.
		 */
1656
		thread__find_addr_location(thread, m, MAP__FUNCTION, addr, &al);
1657 1658
	}

1659 1660 1661 1662
	ams->addr = addr;
	ams->al_addr = al.addr;
	ams->sym = al.sym;
	ams->map = al.map;
1663
	ams->phys_addr = phys_addr;
1664 1665
}

1666 1667
struct mem_info *sample__resolve_mem(struct perf_sample *sample,
				     struct addr_location *al)
1668 1669 1670 1671 1672 1673
{
	struct mem_info *mi = zalloc(sizeof(*mi));

	if (!mi)
		return NULL;

1674
	ip__resolve_ams(al->thread, &mi->iaddr, sample->ip);
1675 1676
	ip__resolve_data(al->thread, al->cpumode, &mi->daddr,
			 sample->addr, sample->phys_addr);
1677 1678 1679 1680 1681
	mi->data_src.val = sample->data_src;

	return mi;
}

1682 1683 1684 1685 1686
struct iterations {
	int nr_loop_iter;
	u64 cycles;
};

1687
static int add_callchain_ip(struct thread *thread,
1688
			    struct callchain_cursor *cursor,
1689 1690
			    struct symbol **parent,
			    struct addr_location *root_al,
1691
			    u8 *cpumode,
1692 1693 1694
			    u64 ip,
			    bool branch,
			    struct branch_flags *flags,
1695
			    struct iterations *iter,
1696
			    u64 branch_from)
1697 1698
{
	struct addr_location al;
1699 1700
	int nr_loop_iter = 0;
	u64 iter_cycles = 0;
1701 1702 1703

	al.filtered = 0;
	al.sym = NULL;
1704
	if (!cpumode) {
1705 1706
		thread__find_cpumode_addr_location(thread, MAP__FUNCTION,
						   ip, &al);
1707
	} else {
1708 1709 1710
		if (ip >= PERF_CONTEXT_MAX) {
			switch (ip) {
			case PERF_CONTEXT_HV:
1711
				*cpumode = PERF_RECORD_MISC_HYPERVISOR;
1712 1713
				break;
			case PERF_CONTEXT_KERNEL:
1714
				*cpumode = PERF_RECORD_MISC_KERNEL;
1715 1716
				break;
			case PERF_CONTEXT_USER:
1717
				*cpumode = PERF_RECORD_MISC_USER;
1718 1719 1720 1721 1722 1723 1724 1725
				break;
			default:
				pr_debug("invalid callchain context: "
					 "%"PRId64"\n", (s64) ip);
				/*
				 * It seems the callchain is corrupted.
				 * Discard all.
				 */
1726
				callchain_cursor_reset(cursor);
1727 1728 1729 1730
				return 1;
			}
			return 0;
		}
1731 1732
		thread__find_addr_location(thread, *cpumode, MAP__FUNCTION,
					   ip, &al);
1733 1734
	}

1735
	if (al.sym != NULL) {
1736
		if (perf_hpp_list.parent && !*parent &&
1737 1738 1739 1740 1741 1742 1743
		    symbol__match_regex(al.sym, &parent_regex))
			*parent = al.sym;
		else if (have_ignore_callees && root_al &&
		  symbol__match_regex(al.sym, &ignore_callees_regex)) {
			/* Treat this symbol as the root,
			   forgetting its callees. */
			*root_al = al;
1744
			callchain_cursor_reset(cursor);
1745 1746 1747
		}
	}

1748 1749
	if (symbol_conf.hide_unresolved && al.sym == NULL)
		return 0;
1750 1751 1752 1753 1754 1755

	if (iter) {
		nr_loop_iter = iter->nr_loop_iter;
		iter_cycles = iter->cycles;
	}

1756
	return callchain_cursor_append(cursor, al.addr, al.map, al.sym,
1757 1758
				       branch, flags, nr_loop_iter,
				       iter_cycles, branch_from);
1759 1760
}

1761 1762
struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
					   struct addr_location *al)
1763 1764
{
	unsigned int i;
1765 1766
	const struct branch_stack *bs = sample->branch_stack;
	struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info));
1767 1768 1769 1770 1771

	if (!bi)
		return NULL;

	for (i = 0; i < bs->nr; i++) {
1772 1773
		ip__resolve_ams(al->thread, &bi[i].to, bs->entries[i].to);
		ip__resolve_ams(al->thread, &bi[i].from, bs->entries[i].from);
1774 1775 1776 1777 1778
		bi[i].flags = bs->entries[i].flags;
	}
	return bi;
}

1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790
static void save_iterations(struct iterations *iter,
			    struct branch_entry *be, int nr)
{
	int i;

	iter->nr_loop_iter = nr;
	iter->cycles = 0;

	for (i = 0; i < nr; i++)
		iter->cycles += be[i].flags.cycles;
}

1791 1792 1793 1794 1795 1796 1797
#define CHASHSZ 127
#define CHASHBITS 7
#define NO_ENTRY 0xff

#define PERF_MAX_BRANCH_DEPTH 127

/* Remove loops. */
1798 1799
static int remove_loops(struct branch_entry *l, int nr,
			struct iterations *iter)
1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823
{
	int i, j, off;
	unsigned char chash[CHASHSZ];

	memset(chash, NO_ENTRY, sizeof(chash));

	BUG_ON(PERF_MAX_BRANCH_DEPTH > 255);

	for (i = 0; i < nr; i++) {
		int h = hash_64(l[i].from, CHASHBITS) % CHASHSZ;

		/* no collision handling for now */
		if (chash[h] == NO_ENTRY) {
			chash[h] = i;
		} else if (l[chash[h]].from == l[i].from) {
			bool is_loop = true;
			/* check if it is a real loop */
			off = 0;
			for (j = chash[h]; j < i && i + off < nr; j++, off++)
				if (l[j].from != l[i + off].from) {
					is_loop = false;
					break;
				}
			if (is_loop) {
1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835
				j = nr - (i + off);
				if (j > 0) {
					save_iterations(iter + i + off,
						l + i, off);

					memmove(iter + i, iter + i + off,
						j * sizeof(*iter));

					memmove(l + i, l + i + off,
						j * sizeof(*l));
				}

1836 1837 1838 1839 1840 1841 1842
				nr -= off;
			}
		}
	}
	return nr;
}

K
Kan Liang 已提交
1843 1844 1845 1846 1847 1848 1849 1850
/*
 * Recolve LBR callstack chain sample
 * Return:
 * 1 on success get LBR callchain information
 * 0 no available LBR callchain information, should try fp
 * negative error code on other errors.
 */
static int resolve_lbr_callchain_sample(struct thread *thread,
1851
					struct callchain_cursor *cursor,
K
Kan Liang 已提交
1852 1853 1854 1855
					struct perf_sample *sample,
					struct symbol **parent,
					struct addr_location *root_al,
					int max_stack)
1856
{
K
Kan Liang 已提交
1857
	struct ip_callchain *chain = sample->callchain;
1858
	int chain_nr = min(max_stack, (int)chain->nr), i;
1859
	u8 cpumode = PERF_RECORD_MISC_USER;
1860
	u64 ip, branch_from = 0;
K
Kan Liang 已提交
1861 1862 1863 1864 1865 1866 1867 1868 1869

	for (i = 0; i < chain_nr; i++) {
		if (chain->ips[i] == PERF_CONTEXT_USER)
			break;
	}

	/* LBR only affects the user callchain */
	if (i != chain_nr) {
		struct branch_stack *lbr_stack = sample->branch_stack;
1870 1871 1872
		int lbr_nr = lbr_stack->nr, j, k;
		bool branch;
		struct branch_flags *flags;
K
Kan Liang 已提交
1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885
		/*
		 * LBR callstack can only get user call chain.
		 * The mix_chain_nr is kernel call chain
		 * number plus LBR user call chain number.
		 * i is kernel call chain number,
		 * 1 is PERF_CONTEXT_USER,
		 * lbr_nr + 1 is the user call chain number.
		 * For details, please refer to the comments
		 * in callchain__printf
		 */
		int mix_chain_nr = i + 1 + lbr_nr + 1;

		for (j = 0; j < mix_chain_nr; j++) {
1886
			int err;
1887 1888 1889
			branch = false;
			flags = NULL;

K
Kan Liang 已提交
1890 1891 1892
			if (callchain_param.order == ORDER_CALLEE) {
				if (j < i + 1)
					ip = chain->ips[j];
1893 1894 1895 1896 1897 1898
				else if (j > i + 1) {
					k = j - i - 2;
					ip = lbr_stack->entries[k].from;
					branch = true;
					flags = &lbr_stack->entries[k].flags;
				} else {
K
Kan Liang 已提交
1899
					ip = lbr_stack->entries[0].to;
1900 1901
					branch = true;
					flags = &lbr_stack->entries[0].flags;
1902 1903
					branch_from =
						lbr_stack->entries[0].from;
1904
				}
K
Kan Liang 已提交
1905
			} else {
1906 1907 1908 1909 1910 1911
				if (j < lbr_nr) {
					k = lbr_nr - j - 1;
					ip = lbr_stack->entries[k].from;
					branch = true;
					flags = &lbr_stack->entries[k].flags;
				}
K
Kan Liang 已提交
1912 1913
				else if (j > lbr_nr)
					ip = chain->ips[i + 1 - (j - lbr_nr)];
1914
				else {
K
Kan Liang 已提交
1915
					ip = lbr_stack->entries[0].to;
1916 1917
					branch = true;
					flags = &lbr_stack->entries[0].flags;
1918 1919
					branch_from =
						lbr_stack->entries[0].from;
1920
				}
K
Kan Liang 已提交
1921 1922
			}

1923 1924
			err = add_callchain_ip(thread, cursor, parent,
					       root_al, &cpumode, ip,
1925
					       branch, flags, NULL,
1926
					       branch_from);
K
Kan Liang 已提交
1927 1928 1929 1930 1931 1932 1933 1934 1935 1936
			if (err)
				return (err < 0) ? err : 0;
		}
		return 1;
	}

	return 0;
}

static int thread__resolve_callchain_sample(struct thread *thread,
1937
					    struct callchain_cursor *cursor,
K
Kan Liang 已提交
1938 1939 1940 1941 1942 1943 1944 1945
					    struct perf_evsel *evsel,
					    struct perf_sample *sample,
					    struct symbol **parent,
					    struct addr_location *root_al,
					    int max_stack)
{
	struct branch_stack *branch = sample->branch_stack;
	struct ip_callchain *chain = sample->callchain;
1946
	int chain_nr = 0;
1947
	u8 cpumode = PERF_RECORD_MISC_USER;
1948
	int i, j, err, nr_entries;
1949 1950 1951
	int skip_idx = -1;
	int first_call = 0;

1952 1953 1954
	if (chain)
		chain_nr = chain->nr;

1955
	if (perf_evsel__has_branch_callstack(evsel)) {
1956
		err = resolve_lbr_callchain_sample(thread, cursor, sample, parent,
K
Kan Liang 已提交
1957 1958 1959 1960 1961
						   root_al, max_stack);
		if (err)
			return (err < 0) ? err : 0;
	}

1962 1963 1964 1965
	/*
	 * Based on DWARF debug information, some architectures skip
	 * a callchain entry saved by the kernel.
	 */
1966
	skip_idx = arch_skip_callchain_idx(thread, chain);
1967

1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982
	/*
	 * Add branches to call stack for easier browsing. This gives
	 * more context for a sample than just the callers.
	 *
	 * This uses individual histograms of paths compared to the
	 * aggregated histograms the normal LBR mode uses.
	 *
	 * Limitations for now:
	 * - No extra filters
	 * - No annotations (should annotate somehow)
	 */

	if (branch && callchain_param.branch_callstack) {
		int nr = min(max_stack, (int)branch->nr);
		struct branch_entry be[nr];
1983
		struct iterations iter[nr];
1984 1985 1986 1987 1988 1989 1990 1991 1992

		if (branch->nr > PERF_MAX_BRANCH_DEPTH) {
			pr_warning("corrupted branch chain. skipping...\n");
			goto check_calls;
		}

		for (i = 0; i < nr; i++) {
			if (callchain_param.order == ORDER_CALLEE) {
				be[i] = branch->entries[i];
1993 1994 1995 1996

				if (chain == NULL)
					continue;

1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013
				/*
				 * Check for overlap into the callchain.
				 * The return address is one off compared to
				 * the branch entry. To adjust for this
				 * assume the calling instruction is not longer
				 * than 8 bytes.
				 */
				if (i == skip_idx ||
				    chain->ips[first_call] >= PERF_CONTEXT_MAX)
					first_call++;
				else if (be[i].from < chain->ips[first_call] &&
				    be[i].from >= chain->ips[first_call] - 8)
					first_call++;
			} else
				be[i] = branch->entries[branch->nr - i - 1];
		}

2014 2015
		memset(iter, 0, sizeof(struct iterations) * nr);
		nr = remove_loops(be, nr, iter);
2016

2017
		for (i = 0; i < nr; i++) {
2018 2019 2020 2021 2022
			err = add_callchain_ip(thread, cursor, parent,
					       root_al,
					       NULL, be[i].to,
					       true, &be[i].flags,
					       NULL, be[i].from);
2023

2024
			if (!err)
2025
				err = add_callchain_ip(thread, cursor, parent, root_al,
2026 2027
						       NULL, be[i].from,
						       true, &be[i].flags,
2028
						       &iter[i], 0);
2029 2030 2031 2032 2033
			if (err == -EINVAL)
				break;
			if (err)
				return err;
		}
2034 2035 2036 2037

		if (chain_nr == 0)
			return 0;

2038 2039 2040 2041
		chain_nr -= nr;
	}

check_calls:
2042
	for (i = first_call, nr_entries = 0;
2043
	     i < chain_nr && nr_entries < max_stack; i++) {
2044 2045 2046
		u64 ip;

		if (callchain_param.order == ORDER_CALLEE)
2047
			j = i;
2048
		else
2049 2050 2051 2052 2053 2054 2055
			j = chain->nr - i - 1;

#ifdef HAVE_SKIP_CALLCHAIN_IDX
		if (j == skip_idx)
			continue;
#endif
		ip = chain->ips[j];
2056

2057 2058
		if (ip < PERF_CONTEXT_MAX)
                       ++nr_entries;
2059

2060 2061
		err = add_callchain_ip(thread, cursor, parent,
				       root_al, &cpumode, ip,
2062
				       false, NULL, NULL, 0);
2063 2064

		if (err)
2065
			return (err < 0) ? err : 0;
2066 2067 2068 2069 2070 2071 2072 2073
	}

	return 0;
}

static int unwind_entry(struct unwind_entry *entry, void *arg)
{
	struct callchain_cursor *cursor = arg;
2074 2075 2076

	if (symbol_conf.hide_unresolved && entry->sym == NULL)
		return 0;
2077
	return callchain_cursor_append(cursor, entry->ip,
2078
				       entry->map, entry->sym,
2079
				       false, NULL, 0, 0, 0);
2080 2081
}

2082 2083 2084 2085 2086
static int thread__resolve_callchain_unwind(struct thread *thread,
					    struct callchain_cursor *cursor,
					    struct perf_evsel *evsel,
					    struct perf_sample *sample,
					    int max_stack)
2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097
{
	/* Can we do dwarf post unwind? */
	if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) &&
	      (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER)))
		return 0;

	/* Bail out if nothing was captured. */
	if ((!sample->user_regs.regs) ||
	    (!sample->user_stack.size))
		return 0;

2098
	return unwind__get_entries(unwind_entry, cursor,
2099
				   thread, sample, max_stack);
2100
}
2101

2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136
int thread__resolve_callchain(struct thread *thread,
			      struct callchain_cursor *cursor,
			      struct perf_evsel *evsel,
			      struct perf_sample *sample,
			      struct symbol **parent,
			      struct addr_location *root_al,
			      int max_stack)
{
	int ret = 0;

	callchain_cursor_reset(&callchain_cursor);

	if (callchain_param.order == ORDER_CALLEE) {
		ret = thread__resolve_callchain_sample(thread, cursor,
						       evsel, sample,
						       parent, root_al,
						       max_stack);
		if (ret)
			return ret;
		ret = thread__resolve_callchain_unwind(thread, cursor,
						       evsel, sample,
						       max_stack);
	} else {
		ret = thread__resolve_callchain_unwind(thread, cursor,
						       evsel, sample,
						       max_stack);
		if (ret)
			return ret;
		ret = thread__resolve_callchain_sample(thread, cursor,
						       evsel, sample,
						       parent, root_al,
						       max_stack);
	}

	return ret;
2137
}
2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160

int machine__for_each_thread(struct machine *machine,
			     int (*fn)(struct thread *thread, void *p),
			     void *priv)
{
	struct rb_node *nd;
	struct thread *thread;
	int rc = 0;

	for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
		thread = rb_entry(nd, struct thread, rb_node);
		rc = fn(thread, priv);
		if (rc != 0)
			return rc;
	}

	list_for_each_entry(thread, &machine->dead_threads, node) {
		rc = fn(thread, priv);
		if (rc != 0)
			return rc;
	}
	return rc;
}
2161

2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182
int machines__for_each_thread(struct machines *machines,
			      int (*fn)(struct thread *thread, void *p),
			      void *priv)
{
	struct rb_node *nd;
	int rc = 0;

	rc = machine__for_each_thread(&machines->host, fn, priv);
	if (rc != 0)
		return rc;

	for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
		struct machine *machine = rb_entry(nd, struct machine, rb_node);

		rc = machine__for_each_thread(machine, fn, priv);
		if (rc != 0)
			return rc;
	}
	return rc;
}

2183
int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
2184
				  struct target *target, struct thread_map *threads,
2185 2186
				  perf_event__handler_t process, bool data_mmap,
				  unsigned int proc_map_timeout)
2187
{
2188
	if (target__has_task(target))
2189
		return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap, proc_map_timeout);
2190
	else if (target__has_cpu(target))
2191
		return perf_event__synthesize_threads(tool, process, machine, data_mmap, proc_map_timeout);
2192 2193 2194
	/* command specified */
	return 0;
}
2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234

pid_t machine__get_current_tid(struct machine *machine, int cpu)
{
	if (cpu < 0 || cpu >= MAX_NR_CPUS || !machine->current_tid)
		return -1;

	return machine->current_tid[cpu];
}

int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
			     pid_t tid)
{
	struct thread *thread;

	if (cpu < 0)
		return -EINVAL;

	if (!machine->current_tid) {
		int i;

		machine->current_tid = calloc(MAX_NR_CPUS, sizeof(pid_t));
		if (!machine->current_tid)
			return -ENOMEM;
		for (i = 0; i < MAX_NR_CPUS; i++)
			machine->current_tid[i] = -1;
	}

	if (cpu >= MAX_NR_CPUS) {
		pr_err("Requested CPU %d too large. ", cpu);
		pr_err("Consider raising MAX_NR_CPUS\n");
		return -EINVAL;
	}

	machine->current_tid[cpu] = tid;

	thread = machine__findnew_thread(machine, pid, tid);
	if (!thread)
		return -ENOMEM;

	thread->cpu = cpu;
2235
	thread__put(thread);
2236 2237 2238

	return 0;
}
2239 2240 2241

int machine__get_kernel_start(struct machine *machine)
{
2242
	struct map *map = machine__kernel_map(machine);
2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254
	int err = 0;

	/*
	 * The only addresses above 2^63 are kernel addresses of a 64-bit
	 * kernel.  Note that addresses are unsigned so that on a 32-bit system
	 * all addresses including kernel addresses are less than 2^32.  In
	 * that case (32-bit system), if the kernel mapping is unknown, all
	 * addresses will be assumed to be in user space - see
	 * machine__kernel_ip().
	 */
	machine->kernel_start = 1ULL << 63;
	if (map) {
2255
		err = map__load(map);
2256
		if (!err)
2257 2258 2259 2260
			machine->kernel_start = map->start;
	}
	return err;
}
2261 2262 2263

struct dso *machine__findnew_dso(struct machine *machine, const char *filename)
{
2264
	return dsos__findnew(&machine->dsos, filename);
2265
}
2266 2267 2268 2269 2270

char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
{
	struct machine *machine = vmachine;
	struct map *map;
2271
	struct symbol *sym = map_groups__find_symbol(&machine->kmaps, MAP__FUNCTION, *addrp, &map);
2272 2273 2274 2275 2276 2277 2278 2279

	if (sym == NULL)
		return NULL;

	*modp = __map__is_kmodule(map) ? (char *)map->dso->short_name : NULL;
	*addrp = map->unmap_ip(map, sym->start);
	return sym->name;
}