machine.c 56.1 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2
#include <dirent.h>
3
#include <errno.h>
4
#include <inttypes.h>
5
#include <regex.h>
6
#include "callchain.h"
7 8
#include "debug.h"
#include "event.h"
9 10
#include "evsel.h"
#include "hist.h"
11 12
#include "machine.h"
#include "map.h"
13
#include "sort.h"
14
#include "strlist.h"
15
#include "thread.h"
16
#include "vdso.h"
17
#include <stdbool.h>
18 19 20
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
21
#include "unwind.h"
22
#include "linux/hash.h"
23
#include "asm/bug.h"
24

25 26 27
#include "sane_ctype.h"
#include <symbol/kallsyms.h>

28 29
static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock);

30 31 32 33
static void dsos__init(struct dsos *dsos)
{
	INIT_LIST_HEAD(&dsos->head);
	dsos->root = RB_ROOT;
34
	init_rwsem(&dsos->lock);
35 36
}

37 38 39 40 41 42 43
static void machine__threads_init(struct machine *machine)
{
	int i;

	for (i = 0; i < THREADS__TABLE_SIZE; i++) {
		struct threads *threads = &machine->threads[i];
		threads->entries = RB_ROOT;
44
		init_rwsem(&threads->lock);
45 46 47 48 49 50
		threads->nr = 0;
		INIT_LIST_HEAD(&threads->dead);
		threads->last_match = NULL;
	}
}

51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71
static int machine__set_mmap_name(struct machine *machine)
{
	if (machine__is_host(machine)) {
		if (symbol_conf.vmlinux_name)
			machine->mmap_name = strdup(symbol_conf.vmlinux_name);
		else
			machine->mmap_name = strdup("[kernel.kallsyms]");
	} else if (machine__is_default_guest(machine)) {
		if (symbol_conf.default_guest_vmlinux_name)
			machine->mmap_name = strdup(symbol_conf.default_guest_vmlinux_name);
		else
			machine->mmap_name = strdup("[guest.kernel.kallsyms]");
	} else {
		if (asprintf(&machine->mmap_name, "[guest.kernel.kallsyms.%d]",
			 machine->pid) < 0)
			machine->mmap_name = NULL;
	}

	return machine->mmap_name ? 0 : -ENOMEM;
}

72 73
int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
{
74 75
	int err = -ENOMEM;

76
	memset(machine, 0, sizeof(*machine));
77
	map_groups__init(&machine->kmaps, machine);
78
	RB_CLEAR_NODE(&machine->rb_node);
79
	dsos__init(&machine->dsos);
80

81
	machine__threads_init(machine);
82

83
	machine->vdso_info = NULL;
84
	machine->env = NULL;
85

86 87
	machine->pid = pid;

88
	machine->id_hdr_size = 0;
89
	machine->kptr_restrict_warned = false;
90
	machine->comm_exec = false;
91
	machine->kernel_start = 0;
92

93 94
	memset(machine->vmlinux_maps, 0, sizeof(machine->vmlinux_maps));

95 96 97 98
	machine->root_dir = strdup(root_dir);
	if (machine->root_dir == NULL)
		return -ENOMEM;

99 100 101
	if (machine__set_mmap_name(machine))
		goto out;

102
	if (pid != HOST_KERNEL_ID) {
103
		struct thread *thread = machine__findnew_thread(machine, -1,
104
								pid);
105 106 107
		char comm[64];

		if (thread == NULL)
108
			goto out;
109 110

		snprintf(comm, sizeof(comm), "[guest/%d]", pid);
111
		thread__set_comm(thread, comm, 0);
112
		thread__put(thread);
113 114
	}

115
	machine->current_tid = NULL;
116
	err = 0;
117

118
out:
119
	if (err) {
120
		zfree(&machine->root_dir);
121 122
		zfree(&machine->mmap_name);
	}
123 124 125
	return 0;
}

126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142
struct machine *machine__new_host(void)
{
	struct machine *machine = malloc(sizeof(*machine));

	if (machine != NULL) {
		machine__init(machine, "", HOST_KERNEL_ID);

		if (machine__create_kernel_maps(machine) < 0)
			goto out_delete;
	}

	return machine;
out_delete:
	free(machine);
	return NULL;
}

143 144 145 146 147 148 149 150 151 152 153
struct machine *machine__new_kallsyms(void)
{
	struct machine *machine = machine__new_host();
	/*
	 * FIXME:
	 * 1) MAP__FUNCTION will go away when we stop loading separate maps for
	 *    functions and data objects.
	 * 2) We should switch to machine__load_kallsyms(), i.e. not explicitely
	 *    ask for not using the kcore parsing code, once this one is fixed
	 *    to create a map per module.
	 */
154
	if (machine && machine__load_kallsyms(machine, "/proc/kallsyms", MAP__FUNCTION) <= 0) {
155 156 157 158 159 160 161
		machine__delete(machine);
		machine = NULL;
	}

	return machine;
}

162
static void dsos__purge(struct dsos *dsos)
163 164 165
{
	struct dso *pos, *n;

166
	down_write(&dsos->lock);
167

168
	list_for_each_entry_safe(pos, n, &dsos->head, node) {
169
		RB_CLEAR_NODE(&pos->rb_node);
170
		pos->root = NULL;
171 172
		list_del_init(&pos->node);
		dso__put(pos);
173
	}
174

175
	up_write(&dsos->lock);
176
}
177

178 179 180
static void dsos__exit(struct dsos *dsos)
{
	dsos__purge(dsos);
181
	exit_rwsem(&dsos->lock);
182 183
}

184 185
void machine__delete_threads(struct machine *machine)
{
186
	struct rb_node *nd;
187
	int i;
188

189 190
	for (i = 0; i < THREADS__TABLE_SIZE; i++) {
		struct threads *threads = &machine->threads[i];
191
		down_write(&threads->lock);
192 193 194
		nd = rb_first(&threads->entries);
		while (nd) {
			struct thread *t = rb_entry(nd, struct thread, rb_node);
195

196 197 198
			nd = rb_next(nd);
			__machine__remove_thread(machine, t, false);
		}
199
		up_write(&threads->lock);
200 201 202
	}
}

203 204
void machine__exit(struct machine *machine)
{
205 206
	int i;

207 208 209
	if (machine == NULL)
		return;

210
	machine__destroy_kernel_maps(machine);
211
	map_groups__exit(&machine->kmaps);
212
	dsos__exit(&machine->dsos);
213
	machine__exit_vdso(machine);
214
	zfree(&machine->root_dir);
215
	zfree(&machine->mmap_name);
216
	zfree(&machine->current_tid);
217 218 219

	for (i = 0; i < THREADS__TABLE_SIZE; i++) {
		struct threads *threads = &machine->threads[i];
220
		exit_rwsem(&threads->lock);
221
	}
222 223 224 225
}

void machine__delete(struct machine *machine)
{
226 227 228 229
	if (machine) {
		machine__exit(machine);
		free(machine);
	}
230 231
}

232 233 234 235 236 237 238 239 240 241 242 243 244
void machines__init(struct machines *machines)
{
	machine__init(&machines->host, "", HOST_KERNEL_ID);
	machines->guests = RB_ROOT;
}

void machines__exit(struct machines *machines)
{
	machine__exit(&machines->host);
	/* XXX exit guest */
}

struct machine *machines__add(struct machines *machines, pid_t pid,
245 246
			      const char *root_dir)
{
247
	struct rb_node **p = &machines->guests.rb_node;
248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268
	struct rb_node *parent = NULL;
	struct machine *pos, *machine = malloc(sizeof(*machine));

	if (machine == NULL)
		return NULL;

	if (machine__init(machine, root_dir, pid) != 0) {
		free(machine);
		return NULL;
	}

	while (*p != NULL) {
		parent = *p;
		pos = rb_entry(parent, struct machine, rb_node);
		if (pid < pos->pid)
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
	}

	rb_link_node(&machine->rb_node, parent, p);
269
	rb_insert_color(&machine->rb_node, &machines->guests);
270 271 272 273

	return machine;
}

274 275 276 277 278 279 280 281 282 283 284 285 286
void machines__set_comm_exec(struct machines *machines, bool comm_exec)
{
	struct rb_node *nd;

	machines->host.comm_exec = comm_exec;

	for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
		struct machine *machine = rb_entry(nd, struct machine, rb_node);

		machine->comm_exec = comm_exec;
	}
}

287
struct machine *machines__find(struct machines *machines, pid_t pid)
288
{
289
	struct rb_node **p = &machines->guests.rb_node;
290 291 292 293
	struct rb_node *parent = NULL;
	struct machine *machine;
	struct machine *default_machine = NULL;

294 295 296
	if (pid == HOST_KERNEL_ID)
		return &machines->host;

297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312
	while (*p != NULL) {
		parent = *p;
		machine = rb_entry(parent, struct machine, rb_node);
		if (pid < machine->pid)
			p = &(*p)->rb_left;
		else if (pid > machine->pid)
			p = &(*p)->rb_right;
		else
			return machine;
		if (!machine->pid)
			default_machine = machine;
	}

	return default_machine;
}

313
struct machine *machines__findnew(struct machines *machines, pid_t pid)
314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329
{
	char path[PATH_MAX];
	const char *root_dir = "";
	struct machine *machine = machines__find(machines, pid);

	if (machine && (machine->pid == pid))
		goto out;

	if ((pid != HOST_KERNEL_ID) &&
	    (pid != DEFAULT_GUEST_KERNEL_ID) &&
	    (symbol_conf.guestmount)) {
		sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
		if (access(path, R_OK)) {
			static struct strlist *seen;

			if (!seen)
330
				seen = strlist__new(NULL, NULL);
331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346

			if (!strlist__has_entry(seen, path)) {
				pr_err("Can't access file %s\n", path);
				strlist__add(seen, path);
			}
			machine = NULL;
			goto out;
		}
		root_dir = path;
	}

	machine = machines__add(machines, pid, root_dir);
out:
	return machine;
}

347 348
void machines__process_guests(struct machines *machines,
			      machine__process_t process, void *data)
349 350 351
{
	struct rb_node *nd;

352
	for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
353 354 355 356 357
		struct machine *pos = rb_entry(nd, struct machine, rb_node);
		process(pos, data);
	}
}

358
void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
359 360 361 362
{
	struct rb_node *node;
	struct machine *machine;

363 364 365
	machines->host.id_hdr_size = id_hdr_size;

	for (node = rb_first(&machines->guests); node; node = rb_next(node)) {
366 367 368 369 370 371 372
		machine = rb_entry(node, struct machine, rb_node);
		machine->id_hdr_size = id_hdr_size;
	}

	return;
}

373 374 375 376 377 378 379 380 381 382 383 384 385
static void machine__update_thread_pid(struct machine *machine,
				       struct thread *th, pid_t pid)
{
	struct thread *leader;

	if (pid == th->pid_ || pid == -1 || th->pid_ != -1)
		return;

	th->pid_ = pid;

	if (th->pid_ == th->tid)
		return;

386
	leader = __machine__findnew_thread(machine, th->pid_, th->pid_);
387 388 389 390
	if (!leader)
		goto out_err;

	if (!leader->mg)
391
		leader->mg = map_groups__new(machine);
392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407

	if (!leader->mg)
		goto out_err;

	if (th->mg == leader->mg)
		return;

	if (th->mg) {
		/*
		 * Maps are created from MMAP events which provide the pid and
		 * tid.  Consequently there never should be any maps on a thread
		 * with an unknown pid.  Just print an error if there are.
		 */
		if (!map_groups__empty(th->mg))
			pr_err("Discarding thread maps for %d:%d\n",
			       th->pid_, th->tid);
408
		map_groups__put(th->mg);
409 410 411
	}

	th->mg = map_groups__get(leader->mg);
412 413
out_put:
	thread__put(leader);
414 415 416
	return;
out_err:
	pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid);
417
	goto out_put;
418 419
}

420
/*
421
 * Caller must eventually drop thread->refcnt returned with a successful
422 423
 * lookup/new thread inserted.
 */
424
static struct thread *____machine__findnew_thread(struct machine *machine,
425
						  struct threads *threads,
426 427
						  pid_t pid, pid_t tid,
						  bool create)
428
{
429
	struct rb_node **p = &threads->entries.rb_node;
430 431 432 433
	struct rb_node *parent = NULL;
	struct thread *th;

	/*
434
	 * Front-end cache - TID lookups come in blocks,
435 436 437
	 * so most of the time we dont have to look up
	 * the full rbtree:
	 */
438
	th = threads->last_match;
439 440 441
	if (th != NULL) {
		if (th->tid == tid) {
			machine__update_thread_pid(machine, th, pid);
442
			return thread__get(th);
443 444
		}

445
		threads->last_match = NULL;
446
	}
447 448 449 450 451

	while (*p != NULL) {
		parent = *p;
		th = rb_entry(parent, struct thread, rb_node);

452
		if (th->tid == tid) {
453
			threads->last_match = th;
454
			machine__update_thread_pid(machine, th, pid);
455
			return thread__get(th);
456 457
		}

458
		if (tid < th->tid)
459 460 461 462 463 464 465 466
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
	}

	if (!create)
		return NULL;

467
	th = thread__new(pid, tid);
468 469
	if (th != NULL) {
		rb_link_node(&th->rb_node, parent, p);
470
		rb_insert_color(&th->rb_node, &threads->entries);
471 472 473 474 475 476 477 478 479

		/*
		 * We have to initialize map_groups separately
		 * after rb tree is updated.
		 *
		 * The reason is that we call machine__findnew_thread
		 * within thread__init_map_groups to find the thread
		 * leader and that would screwed the rb tree.
		 */
480
		if (thread__init_map_groups(th, machine)) {
481
			rb_erase_init(&th->rb_node, &threads->entries);
482
			RB_CLEAR_NODE(&th->rb_node);
483
			thread__put(th);
484
			return NULL;
485
		}
486 487 488 489
		/*
		 * It is now in the rbtree, get a ref
		 */
		thread__get(th);
490 491
		threads->last_match = th;
		++threads->nr;
492 493 494 495 496
	}

	return th;
}

497 498
struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid)
{
499
	return ____machine__findnew_thread(machine, machine__threads(machine, tid), pid, tid, true);
500 501
}

502 503
struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
				       pid_t tid)
504
{
505
	struct threads *threads = machine__threads(machine, tid);
506 507
	struct thread *th;

508
	down_write(&threads->lock);
509
	th = __machine__findnew_thread(machine, pid, tid);
510
	up_write(&threads->lock);
511
	return th;
512 513
}

514 515
struct thread *machine__find_thread(struct machine *machine, pid_t pid,
				    pid_t tid)
516
{
517
	struct threads *threads = machine__threads(machine, tid);
518
	struct thread *th;
519

520
	down_read(&threads->lock);
521
	th =  ____machine__findnew_thread(machine, threads, pid, tid, false);
522
	up_read(&threads->lock);
523
	return th;
524
}
525

526 527 528 529 530 531 532 533 534
struct comm *machine__thread_exec_comm(struct machine *machine,
				       struct thread *thread)
{
	if (machine->comm_exec)
		return thread__exec_comm(thread);
	else
		return thread__comm(thread);
}

535 536
int machine__process_comm_event(struct machine *machine, union perf_event *event,
				struct perf_sample *sample)
537
{
538 539 540
	struct thread *thread = machine__findnew_thread(machine,
							event->comm.pid,
							event->comm.tid);
541
	bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC;
542
	int err = 0;
543

544 545 546
	if (exec)
		machine->comm_exec = true;

547 548 549
	if (dump_trace)
		perf_event__fprintf_comm(event, stdout);

550 551
	if (thread == NULL ||
	    __thread__set_comm(thread, event->comm.comm, sample->time, exec)) {
552
		dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
553
		err = -1;
554 555
	}

556 557 558
	thread__put(thread);

	return err;
559 560
}

561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591
int machine__process_namespaces_event(struct machine *machine __maybe_unused,
				      union perf_event *event,
				      struct perf_sample *sample __maybe_unused)
{
	struct thread *thread = machine__findnew_thread(machine,
							event->namespaces.pid,
							event->namespaces.tid);
	int err = 0;

	WARN_ONCE(event->namespaces.nr_namespaces > NR_NAMESPACES,
		  "\nWARNING: kernel seems to support more namespaces than perf"
		  " tool.\nTry updating the perf tool..\n\n");

	WARN_ONCE(event->namespaces.nr_namespaces < NR_NAMESPACES,
		  "\nWARNING: perf tool seems to support more namespaces than"
		  " the kernel.\nTry updating the kernel..\n\n");

	if (dump_trace)
		perf_event__fprintf_namespaces(event, stdout);

	if (thread == NULL ||
	    thread__set_namespaces(thread, sample->time, &event->namespaces)) {
		dump_printf("problem processing PERF_RECORD_NAMESPACES, skipping event.\n");
		err = -1;
	}

	thread__put(thread);

	return err;
}

592
int machine__process_lost_event(struct machine *machine __maybe_unused,
593
				union perf_event *event, struct perf_sample *sample __maybe_unused)
594 595 596 597 598 599
{
	dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
		    event->lost.id, event->lost.lost);
	return 0;
}

600 601 602 603 604 605 606 607
int machine__process_lost_samples_event(struct machine *machine __maybe_unused,
					union perf_event *event, struct perf_sample *sample)
{
	dump_printf(": id:%" PRIu64 ": lost samples :%" PRIu64 "\n",
		    sample->id, event->lost_samples.lost);
	return 0;
}

608 609 610
static struct dso *machine__findnew_module_dso(struct machine *machine,
					       struct kmod_path *m,
					       const char *filename)
611 612 613
{
	struct dso *dso;

614
	down_write(&machine->dsos.lock);
615 616

	dso = __dsos__find(&machine->dsos, m->name, true);
617
	if (!dso) {
618
		dso = __dsos__addnew(&machine->dsos, m->name);
619
		if (dso == NULL)
620
			goto out_unlock;
621

622
		dso__set_module_info(dso, m, machine);
623
		dso__set_long_name(dso, strdup(filename), true);
624 625
	}

626
	dso__get(dso);
627
out_unlock:
628
	up_write(&machine->dsos.lock);
629 630 631
	return dso;
}

632 633 634 635 636 637 638 639
int machine__process_aux_event(struct machine *machine __maybe_unused,
			       union perf_event *event)
{
	if (dump_trace)
		perf_event__fprintf_aux(event, stdout);
	return 0;
}

640 641 642 643 644 645 646 647
int machine__process_itrace_start_event(struct machine *machine __maybe_unused,
					union perf_event *event)
{
	if (dump_trace)
		perf_event__fprintf_itrace_start(event, stdout);
	return 0;
}

648 649 650 651 652 653 654 655
int machine__process_switch_event(struct machine *machine __maybe_unused,
				  union perf_event *event)
{
	if (dump_trace)
		perf_event__fprintf_switch(event, stdout);
	return 0;
}

656 657 658 659 660 661 662 663 664 665 666 667 668 669 670
static void dso__adjust_kmod_long_name(struct dso *dso, const char *filename)
{
	const char *dup_filename;

	if (!filename || !dso || !dso->long_name)
		return;
	if (dso->long_name[0] != '[')
		return;
	if (!strchr(filename, '/'))
		return;

	dup_filename = strdup(filename);
	if (!dup_filename)
		return;

671
	dso__set_long_name(dso, dup_filename, true);
672 673
}

674 675
struct map *machine__findnew_module_map(struct machine *machine, u64 start,
					const char *filename)
676
{
677
	struct map *map = NULL;
678
	struct dso *dso = NULL;
679
	struct kmod_path m;
680

681
	if (kmod_path__parse_name(&m, filename))
682 683
		return NULL;

684 685
	map = map_groups__find_by_name(&machine->kmaps, MAP__FUNCTION,
				       m.name);
686 687 688 689 690 691 692
	if (map) {
		/*
		 * If the map's dso is an offline module, give dso__load()
		 * a chance to find the file path of that module by fixing
		 * long_name.
		 */
		dso__adjust_kmod_long_name(map->dso, filename);
693
		goto out;
694
	}
695

696
	dso = machine__findnew_module_dso(machine, &m, filename);
697 698 699
	if (dso == NULL)
		goto out;

700 701
	map = map__new2(start, dso, MAP__FUNCTION);
	if (map == NULL)
702
		goto out;
703 704

	map_groups__insert(&machine->kmaps, map);
705

706 707
	/* Put the map here because map_groups__insert alread got it */
	map__put(map);
708
out:
709 710
	/* put the dso here, corresponding to  machine__findnew_module_dso */
	dso__put(dso);
711
	free(m.name);
712 713 714
	return map;
}

715
size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
716 717
{
	struct rb_node *nd;
718
	size_t ret = __dsos__fprintf(&machines->host.dsos.head, fp);
719

720
	for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
721
		struct machine *pos = rb_entry(nd, struct machine, rb_node);
722
		ret += __dsos__fprintf(&pos->dsos.head, fp);
723 724 725 726 727
	}

	return ret;
}

728
size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp,
729 730
				     bool (skip)(struct dso *dso, int parm), int parm)
{
731
	return __dsos__fprintf_buildid(&m->dsos.head, fp, skip, parm);
732 733
}

734
size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
735 736 737
				     bool (skip)(struct dso *dso, int parm), int parm)
{
	struct rb_node *nd;
738
	size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
739

740
	for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
741 742 743 744 745 746 747 748 749 750
		struct machine *pos = rb_entry(nd, struct machine, rb_node);
		ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
	}
	return ret;
}

size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
{
	int i;
	size_t printed = 0;
751
	struct dso *kdso = machine__kernel_map(machine)->dso;
752 753 754

	if (kdso->has_build_id) {
		char filename[PATH_MAX];
755 756
		if (dso__build_id_filename(kdso, filename, sizeof(filename),
					   false))
757 758 759 760 761 762 763 764 765 766 767 768 769
			printed += fprintf(fp, "[0] %s\n", filename);
	}

	for (i = 0; i < vmlinux_path__nr_entries; ++i)
		printed += fprintf(fp, "[%d] %s\n",
				   i + kdso->has_build_id, vmlinux_path[i]);

	return printed;
}

size_t machine__fprintf(struct machine *machine, FILE *fp)
{
	struct rb_node *nd;
770 771
	size_t ret;
	int i;
772

773 774
	for (i = 0; i < THREADS__TABLE_SIZE; i++) {
		struct threads *threads = &machine->threads[i];
775 776

		down_read(&threads->lock);
777

778
		ret = fprintf(fp, "Threads: %u\n", threads->nr);
779

780 781
		for (nd = rb_first(&threads->entries); nd; nd = rb_next(nd)) {
			struct thread *pos = rb_entry(nd, struct thread, rb_node);
782

783 784
			ret += thread__fprintf(pos, fp);
		}
785

786
		up_read(&threads->lock);
787
	}
788 789 790 791 792
	return ret;
}

static struct dso *machine__get_kernel(struct machine *machine)
{
793
	const char *vmlinux_name = machine->mmap_name;
794 795 796
	struct dso *kernel;

	if (machine__is_host(machine)) {
797 798
		kernel = machine__findnew_kernel(machine, vmlinux_name,
						 "[kernel]", DSO_TYPE_KERNEL);
799
	} else {
800 801 802
		kernel = machine__findnew_kernel(machine, vmlinux_name,
						 "[guest.kernel]",
						 DSO_TYPE_GUEST_KERNEL);
803 804 805 806 807 808 809 810 811 812 813 814
	}

	if (kernel != NULL && (!kernel->has_build_id))
		dso__read_running_kernel_build_id(kernel, machine);

	return kernel;
}

struct process_args {
	u64 start;
};

815 816 817 818 819 820 821 822 823
static void machine__get_kallsyms_filename(struct machine *machine, char *buf,
					   size_t bufsz)
{
	if (machine__is_default_guest(machine))
		scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
	else
		scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
}

824 825 826 827 828 829
const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};

/* Figure out the start address of kernel map from /proc/kallsyms.
 * Returns the name of the start symbol in *symbol_name. Pass in NULL as
 * symbol_name if it's not that important.
 */
830 831
static int machine__get_running_kernel_start(struct machine *machine,
					     const char **symbol_name, u64 *start)
832
{
833
	char filename[PATH_MAX];
834
	int i, err = -1;
835 836
	const char *name;
	u64 addr = 0;
837

838
	machine__get_kallsyms_filename(machine, filename, PATH_MAX);
839 840 841 842

	if (symbol__restricted_filename(filename, "/proc/kallsyms"))
		return 0;

843
	for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
844 845
		err = kallsyms__get_function_start(filename, name, &addr);
		if (!err)
846 847 848
			break;
	}

849 850 851
	if (err)
		return -1;

852 853
	if (symbol_name)
		*symbol_name = name;
854

855 856
	*start = addr;
	return 0;
857 858
}

859 860
static int
__machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
861
{
862
	int type;
863

864 865 866
	/* In case of renewal the kernel map, destroy previous one */
	machine__destroy_kernel_maps(machine);

867 868
	for (type = 0; type < MAP__NR_TYPES; ++type) {
		struct kmap *kmap;
869
		struct map *map;
870

871
		machine->vmlinux_maps[type] = map__new2(0, kernel, type);
872 873 874 875 876 877
		if (machine->vmlinux_maps[type] == NULL)
			return -1;

		machine->vmlinux_maps[type]->map_ip =
			machine->vmlinux_maps[type]->unmap_ip =
				identity__map_ip;
878
		map = __machine__kernel_map(machine, type);
879
		kmap = map__kmap(map);
880 881 882
		if (!kmap)
			return -1;

883
		kmap->kmaps = &machine->kmaps;
884
		map_groups__insert(&machine->kmaps, map);
885 886 887 888 889 890 891
	}

	return 0;
}

void machine__destroy_kernel_maps(struct machine *machine)
{
892
	int type;
893 894 895

	for (type = 0; type < MAP__NR_TYPES; ++type) {
		struct kmap *kmap;
896
		struct map *map = __machine__kernel_map(machine, type);
897

898
		if (map == NULL)
899 900
			continue;

901 902
		kmap = map__kmap(map);
		map_groups__remove(&machine->kmaps, map);
903
		if (kmap && kmap->ref_reloc_sym) {
904 905 906 907 908
			/*
			 * ref_reloc_sym is shared among all maps, so free just
			 * on one of them.
			 */
			if (type == MAP__FUNCTION) {
909 910 911 912
				zfree((char **)&kmap->ref_reloc_sym->name);
				zfree(&kmap->ref_reloc_sym);
			} else
				kmap->ref_reloc_sym = NULL;
913 914
		}

915
		map__put(machine->vmlinux_maps[type]);
916 917 918 919
		machine->vmlinux_maps[type] = NULL;
	}
}

920
int machines__create_guest_kernel_maps(struct machines *machines)
921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968
{
	int ret = 0;
	struct dirent **namelist = NULL;
	int i, items = 0;
	char path[PATH_MAX];
	pid_t pid;
	char *endp;

	if (symbol_conf.default_guest_vmlinux_name ||
	    symbol_conf.default_guest_modules ||
	    symbol_conf.default_guest_kallsyms) {
		machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
	}

	if (symbol_conf.guestmount) {
		items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
		if (items <= 0)
			return -ENOENT;
		for (i = 0; i < items; i++) {
			if (!isdigit(namelist[i]->d_name[0])) {
				/* Filter out . and .. */
				continue;
			}
			pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
			if ((*endp != '\0') ||
			    (endp == namelist[i]->d_name) ||
			    (errno == ERANGE)) {
				pr_debug("invalid directory (%s). Skipping.\n",
					 namelist[i]->d_name);
				continue;
			}
			sprintf(path, "%s/%s/proc/kallsyms",
				symbol_conf.guestmount,
				namelist[i]->d_name);
			ret = access(path, R_OK);
			if (ret) {
				pr_debug("Can't access file %s\n", path);
				goto failure;
			}
			machines__create_kernel_maps(machines, pid);
		}
failure:
		free(namelist);
	}

	return ret;
}

969
void machines__destroy_kernel_maps(struct machines *machines)
970
{
971 972 973
	struct rb_node *next = rb_first(&machines->guests);

	machine__destroy_kernel_maps(&machines->host);
974 975 976 977 978

	while (next) {
		struct machine *pos = rb_entry(next, struct machine, rb_node);

		next = rb_next(&pos->rb_node);
979
		rb_erase(&pos->rb_node, &machines->guests);
980 981 982 983
		machine__delete(pos);
	}
}

984
int machines__create_kernel_maps(struct machines *machines, pid_t pid)
985 986 987 988 989 990 991 992 993
{
	struct machine *machine = machines__findnew(machines, pid);

	if (machine == NULL)
		return -1;

	return machine__create_kernel_maps(machine);
}

994 995
int machine__load_kallsyms(struct machine *machine, const char *filename,
			     enum map_type type)
996
{
997
	struct map *map = machine__kernel_map(machine);
998
	int ret = __dso__load_kallsyms(map->dso, filename, map, true);
999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012

	if (ret > 0) {
		dso__set_loaded(map->dso, type);
		/*
		 * Since /proc/kallsyms will have multiple sessions for the
		 * kernel, with modules between them, fixup the end of all
		 * sections.
		 */
		__map_groups__fixup_end(&machine->kmaps, type);
	}

	return ret;
}

1013
int machine__load_vmlinux_path(struct machine *machine, enum map_type type)
1014
{
1015
	struct map *map = machine__kernel_map(machine);
1016
	int ret = dso__load_vmlinux_path(map->dso, map);
1017

1018
	if (ret > 0)
1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057
		dso__set_loaded(map->dso, type);

	return ret;
}

static void map_groups__fixup_end(struct map_groups *mg)
{
	int i;
	for (i = 0; i < MAP__NR_TYPES; ++i)
		__map_groups__fixup_end(mg, i);
}

static char *get_kernel_version(const char *root_dir)
{
	char version[PATH_MAX];
	FILE *file;
	char *name, *tmp;
	const char *prefix = "Linux version ";

	sprintf(version, "%s/proc/version", root_dir);
	file = fopen(version, "r");
	if (!file)
		return NULL;

	version[0] = '\0';
	tmp = fgets(version, sizeof(version), file);
	fclose(file);

	name = strstr(version, prefix);
	if (!name)
		return NULL;
	name += strlen(prefix);
	tmp = strchr(name, ' ');
	if (tmp)
		*tmp = '\0';

	return strdup(name);
}

1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090
static bool is_kmod_dso(struct dso *dso)
{
	return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
	       dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE;
}

static int map_groups__set_module_path(struct map_groups *mg, const char *path,
				       struct kmod_path *m)
{
	struct map *map;
	char *long_name;

	map = map_groups__find_by_name(mg, MAP__FUNCTION, m->name);
	if (map == NULL)
		return 0;

	long_name = strdup(path);
	if (long_name == NULL)
		return -ENOMEM;

	dso__set_long_name(map->dso, long_name, true);
	dso__kernel_module_get_build_id(map->dso, "");

	/*
	 * Full name could reveal us kmod compression, so
	 * we need to update the symtab_type if needed.
	 */
	if (m->comp && is_kmod_dso(map->dso))
		map->dso->symtab_type++;

	return 0;
}

1091
static int map_groups__set_modules_path_dir(struct map_groups *mg,
1092
				const char *dir_name, int depth)
1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116
{
	struct dirent *dent;
	DIR *dir = opendir(dir_name);
	int ret = 0;

	if (!dir) {
		pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
		return -1;
	}

	while ((dent = readdir(dir)) != NULL) {
		char path[PATH_MAX];
		struct stat st;

		/*sshfs might return bad dent->d_type, so we have to stat*/
		snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name);
		if (stat(path, &st))
			continue;

		if (S_ISDIR(st.st_mode)) {
			if (!strcmp(dent->d_name, ".") ||
			    !strcmp(dent->d_name, ".."))
				continue;

1117 1118 1119 1120 1121 1122 1123 1124 1125
			/* Do not follow top-level source and build symlinks */
			if (depth == 0) {
				if (!strcmp(dent->d_name, "source") ||
				    !strcmp(dent->d_name, "build"))
					continue;
			}

			ret = map_groups__set_modules_path_dir(mg, path,
							       depth + 1);
1126 1127 1128
			if (ret < 0)
				goto out;
		} else {
1129
			struct kmod_path m;
1130

1131 1132 1133
			ret = kmod_path__parse_name(&m, dent->d_name);
			if (ret)
				goto out;
1134

1135 1136
			if (m.kmod)
				ret = map_groups__set_module_path(mg, path, &m);
1137

1138
			free(m.name);
1139

1140
			if (ret)
1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158
				goto out;
		}
	}

out:
	closedir(dir);
	return ret;
}

static int machine__set_modules_path(struct machine *machine)
{
	char *version;
	char modules_path[PATH_MAX];

	version = get_kernel_version(machine->root_dir);
	if (!version)
		return -1;

1159
	snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s",
1160 1161 1162
		 machine->root_dir, version);
	free(version);

1163
	return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0);
1164
}
1165 1166 1167 1168 1169
int __weak arch__fix_module_text_start(u64 *start __maybe_unused,
				const char *name __maybe_unused)
{
	return 0;
}
1170

1171 1172
static int machine__create_module(void *arg, const char *name, u64 start,
				  u64 size)
1173
{
1174
	struct machine *machine = arg;
1175
	struct map *map;
1176

1177 1178 1179
	if (arch__fix_module_text_start(&start, name) < 0)
		return -1;

1180
	map = machine__findnew_module_map(machine, start, name);
1181 1182
	if (map == NULL)
		return -1;
1183
	map->end = start + size;
1184 1185 1186 1187 1188 1189 1190 1191

	dso__kernel_module_get_build_id(map->dso, machine->root_dir);

	return 0;
}

static int machine__create_modules(struct machine *machine)
{
1192 1193 1194
	const char *modules;
	char path[PATH_MAX];

1195
	if (machine__is_default_guest(machine)) {
1196
		modules = symbol_conf.default_guest_modules;
1197 1198
	} else {
		snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir);
1199 1200 1201
		modules = path;
	}

1202
	if (symbol__restricted_filename(modules, "/proc/modules"))
1203 1204
		return -1;

1205
	if (modules__parse(modules, machine, machine__create_module))
1206 1207
		return -1;

1208 1209
	if (!machine__set_modules_path(machine))
		return 0;
1210

1211
	pr_debug("Problems setting modules path maps, continuing anyway...\n");
1212

1213
	return 0;
1214 1215
}

1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228
static void machine__set_kernel_mmap(struct machine *machine,
				     u64 start, u64 end)
{
	int i;

	for (i = 0; i < MAP__NR_TYPES; i++) {
		machine->vmlinux_maps[i]->start = start;
		machine->vmlinux_maps[i]->end   = end;

		/*
		 * Be a bit paranoid here, some perf.data file came with
		 * a zero sized synthesized MMAP event for the kernel.
		 */
1229
		if (start == 0 && end == 0)
1230 1231 1232 1233
			machine->vmlinux_maps[i]->end = ~0ULL;
	}
}

1234 1235 1236
int machine__create_kernel_maps(struct machine *machine)
{
	struct dso *kernel = machine__get_kernel(machine);
1237 1238
	const char *name = NULL;
	u64 addr = 0;
1239 1240
	int ret;

1241
	if (kernel == NULL)
1242
		return -1;
1243

1244 1245 1246
	ret = __machine__create_kernel_maps(machine, kernel);
	dso__put(kernel);
	if (ret < 0)
1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257
		return -1;

	if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
		if (machine__is_host(machine))
			pr_debug("Problems creating module maps, "
				 "continuing anyway...\n");
		else
			pr_debug("Problems creating module maps for guest %d, "
				 "continuing anyway...\n", machine->pid);
	}

1258 1259 1260 1261 1262 1263
	if (!machine__get_running_kernel_start(machine, &name, &addr)) {
		if (name &&
		    maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name, addr)) {
			machine__destroy_kernel_maps(machine);
			return -1;
		}
1264
		machine__set_kernel_mmap(machine, addr, 0);
1265 1266
	}

1267 1268 1269 1270
	/*
	 * Now that we have all the maps created, just set the ->end of them:
	 */
	map_groups__fixup_end(&machine->kmaps);
1271 1272 1273
	return 0;
}

1274 1275 1276 1277
static bool machine__uses_kcore(struct machine *machine)
{
	struct dso *dso;

1278
	list_for_each_entry(dso, &machine->dsos.head, node) {
1279 1280 1281 1282 1283 1284 1285
		if (dso__is_kcore(dso))
			return true;
	}

	return false;
}

1286 1287 1288 1289 1290 1291 1292
static int machine__process_kernel_mmap_event(struct machine *machine,
					      union perf_event *event)
{
	struct map *map;
	enum dso_kernel_type kernel_type;
	bool is_kernel_mmap;

1293 1294 1295 1296
	/* If we have maps from kcore then we do not need or want any others */
	if (machine__uses_kcore(machine))
		return 0;

1297 1298 1299 1300 1301 1302
	if (machine__is_host(machine))
		kernel_type = DSO_TYPE_KERNEL;
	else
		kernel_type = DSO_TYPE_GUEST_KERNEL;

	is_kernel_mmap = memcmp(event->mmap.filename,
1303 1304
				machine->mmap_name,
				strlen(machine->mmap_name) - 1) == 0;
1305 1306
	if (event->mmap.filename[0] == '/' ||
	    (!is_kernel_mmap && event->mmap.filename[0] == '[')) {
1307 1308
		map = machine__findnew_module_map(machine, event->mmap.start,
						  event->mmap.filename);
1309 1310 1311 1312 1313 1314
		if (map == NULL)
			goto out_problem;

		map->end = map->start + event->mmap.len;
	} else if (is_kernel_mmap) {
		const char *symbol_name = (event->mmap.filename +
1315
				strlen(machine->mmap_name));
1316 1317 1318 1319
		/*
		 * Should be there already, from the build-id table in
		 * the header.
		 */
1320 1321 1322
		struct dso *kernel = NULL;
		struct dso *dso;

1323
		down_read(&machine->dsos.lock);
1324

1325
		list_for_each_entry(dso, &machine->dsos.head, node) {
1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345

			/*
			 * The cpumode passed to is_kernel_module is not the
			 * cpumode of *this* event. If we insist on passing
			 * correct cpumode to is_kernel_module, we should
			 * record the cpumode when we adding this dso to the
			 * linked list.
			 *
			 * However we don't really need passing correct
			 * cpumode.  We know the correct cpumode must be kernel
			 * mode (if not, we should not link it onto kernel_dsos
			 * list).
			 *
			 * Therefore, we pass PERF_RECORD_MISC_CPUMODE_UNKNOWN.
			 * is_kernel_module() treats it as a kernel cpumode.
			 */

			if (!dso->kernel ||
			    is_kernel_module(dso->long_name,
					     PERF_RECORD_MISC_CPUMODE_UNKNOWN))
1346 1347
				continue;

1348

1349 1350 1351 1352
			kernel = dso;
			break;
		}

1353
		up_read(&machine->dsos.lock);
1354

1355
		if (kernel == NULL)
1356
			kernel = machine__findnew_dso(machine, machine->mmap_name);
1357 1358 1359 1360
		if (kernel == NULL)
			goto out_problem;

		kernel->kernel = kernel_type;
1361 1362
		if (__machine__create_kernel_maps(machine, kernel) < 0) {
			dso__put(kernel);
1363
			goto out_problem;
1364
		}
1365

1366 1367
		if (strstr(kernel->long_name, "vmlinux"))
			dso__set_short_name(kernel, "[kernel.vmlinux]", false);
1368

1369 1370
		machine__set_kernel_mmap(machine, event->mmap.start,
					 event->mmap.start + event->mmap.len);
1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386

		/*
		 * Avoid using a zero address (kptr_restrict) for the ref reloc
		 * symbol. Effectively having zero here means that at record
		 * time /proc/sys/kernel/kptr_restrict was non zero.
		 */
		if (event->mmap.pgoff != 0) {
			maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
							 symbol_name,
							 event->mmap.pgoff);
		}

		if (machine__is_default_guest(machine)) {
			/*
			 * preload dso of guest kernel and modules
			 */
1387
			dso__load(kernel, machine__kernel_map(machine));
1388 1389 1390 1391 1392 1393 1394
		}
	}
	return 0;
out_problem:
	return -1;
}

1395
int machine__process_mmap2_event(struct machine *machine,
1396
				 union perf_event *event,
1397
				 struct perf_sample *sample)
1398 1399 1400 1401 1402 1403 1404 1405 1406
{
	struct thread *thread;
	struct map *map;
	enum map_type type;
	int ret = 0;

	if (dump_trace)
		perf_event__fprintf_mmap2(event, stdout);

1407 1408
	if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
	    sample->cpumode == PERF_RECORD_MISC_KERNEL) {
1409 1410 1411 1412 1413 1414 1415
		ret = machine__process_kernel_mmap_event(machine, event);
		if (ret < 0)
			goto out_problem;
		return 0;
	}

	thread = machine__findnew_thread(machine, event->mmap2.pid,
1416
					event->mmap2.tid);
1417 1418 1419 1420 1421 1422 1423 1424
	if (thread == NULL)
		goto out_problem;

	if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
		type = MAP__VARIABLE;
	else
		type = MAP__FUNCTION;

1425
	map = map__new(machine, event->mmap2.start,
1426
			event->mmap2.len, event->mmap2.pgoff,
1427
			event->mmap2.maj,
1428 1429
			event->mmap2.min, event->mmap2.ino,
			event->mmap2.ino_generation,
1430 1431
			event->mmap2.prot,
			event->mmap2.flags,
1432
			event->mmap2.filename, type, thread);
1433 1434

	if (map == NULL)
1435
		goto out_problem_map;
1436

1437 1438 1439 1440
	ret = thread__insert_map(thread, map);
	if (ret)
		goto out_problem_insert;

1441
	thread__put(thread);
1442
	map__put(map);
1443 1444
	return 0;

1445 1446
out_problem_insert:
	map__put(map);
1447 1448
out_problem_map:
	thread__put(thread);
1449 1450 1451 1452 1453
out_problem:
	dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
	return 0;
}

1454
int machine__process_mmap_event(struct machine *machine, union perf_event *event,
1455
				struct perf_sample *sample)
1456 1457 1458
{
	struct thread *thread;
	struct map *map;
1459
	enum map_type type;
1460 1461 1462 1463 1464
	int ret = 0;

	if (dump_trace)
		perf_event__fprintf_mmap(event, stdout);

1465 1466
	if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
	    sample->cpumode == PERF_RECORD_MISC_KERNEL) {
1467 1468 1469 1470 1471 1472
		ret = machine__process_kernel_mmap_event(machine, event);
		if (ret < 0)
			goto out_problem;
		return 0;
	}

1473
	thread = machine__findnew_thread(machine, event->mmap.pid,
1474
					 event->mmap.tid);
1475 1476
	if (thread == NULL)
		goto out_problem;
1477 1478 1479 1480 1481 1482

	if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
		type = MAP__VARIABLE;
	else
		type = MAP__FUNCTION;

1483
	map = map__new(machine, event->mmap.start,
1484
			event->mmap.len, event->mmap.pgoff,
1485
			0, 0, 0, 0, 0, 0,
1486
			event->mmap.filename,
1487
			type, thread);
1488

1489
	if (map == NULL)
1490
		goto out_problem_map;
1491

1492 1493 1494 1495
	ret = thread__insert_map(thread, map);
	if (ret)
		goto out_problem_insert;

1496
	thread__put(thread);
1497
	map__put(map);
1498 1499
	return 0;

1500 1501
out_problem_insert:
	map__put(map);
1502 1503
out_problem_map:
	thread__put(thread);
1504 1505 1506 1507 1508
out_problem:
	dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
	return 0;
}

1509
static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock)
1510
{
1511 1512 1513 1514
	struct threads *threads = machine__threads(machine, th->tid);

	if (threads->last_match == th)
		threads->last_match = NULL;
1515

1516
	BUG_ON(refcount_read(&th->refcnt) == 0);
1517
	if (lock)
1518
		down_write(&threads->lock);
1519
	rb_erase_init(&th->rb_node, &threads->entries);
1520
	RB_CLEAR_NODE(&th->rb_node);
1521
	--threads->nr;
1522
	/*
1523 1524 1525
	 * Move it first to the dead_threads list, then drop the reference,
	 * if this is the last reference, then the thread__delete destructor
	 * will be called and we will remove it from the dead_threads list.
1526
	 */
1527
	list_add_tail(&th->node, &threads->dead);
1528
	if (lock)
1529
		up_write(&threads->lock);
1530
	thread__put(th);
1531 1532
}

1533 1534 1535 1536 1537
void machine__remove_thread(struct machine *machine, struct thread *th)
{
	return __machine__remove_thread(machine, th, true);
}

1538 1539
int machine__process_fork_event(struct machine *machine, union perf_event *event,
				struct perf_sample *sample)
1540
{
1541 1542 1543
	struct thread *thread = machine__find_thread(machine,
						     event->fork.pid,
						     event->fork.tid);
1544 1545 1546
	struct thread *parent = machine__findnew_thread(machine,
							event->fork.ppid,
							event->fork.ptid);
1547
	int err = 0;
1548

1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566
	if (dump_trace)
		perf_event__fprintf_task(event, stdout);

	/*
	 * There may be an existing thread that is not actually the parent,
	 * either because we are processing events out of order, or because the
	 * (fork) event that would have removed the thread was lost. Assume the
	 * latter case and continue on as best we can.
	 */
	if (parent->pid_ != (pid_t)event->fork.ppid) {
		dump_printf("removing erroneous parent thread %d/%d\n",
			    parent->pid_, parent->tid);
		machine__remove_thread(machine, parent);
		thread__put(parent);
		parent = machine__findnew_thread(machine, event->fork.ppid,
						 event->fork.ptid);
	}

1567
	/* if a thread currently exists for the thread id remove it */
1568
	if (thread != NULL) {
1569
		machine__remove_thread(machine, thread);
1570 1571
		thread__put(thread);
	}
1572

1573 1574
	thread = machine__findnew_thread(machine, event->fork.pid,
					 event->fork.tid);
1575 1576

	if (thread == NULL || parent == NULL ||
1577
	    thread__fork(thread, parent, sample->time) < 0) {
1578
		dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
1579
		err = -1;
1580
	}
1581 1582
	thread__put(thread);
	thread__put(parent);
1583

1584
	return err;
1585 1586
}

1587 1588
int machine__process_exit_event(struct machine *machine, union perf_event *event,
				struct perf_sample *sample __maybe_unused)
1589
{
1590 1591 1592
	struct thread *thread = machine__find_thread(machine,
						     event->fork.pid,
						     event->fork.tid);
1593 1594 1595 1596

	if (dump_trace)
		perf_event__fprintf_task(event, stdout);

1597
	if (thread != NULL) {
1598
		thread__exited(thread);
1599 1600
		thread__put(thread);
	}
1601 1602 1603 1604

	return 0;
}

1605 1606
int machine__process_event(struct machine *machine, union perf_event *event,
			   struct perf_sample *sample)
1607 1608 1609 1610 1611
{
	int ret;

	switch (event->header.type) {
	case PERF_RECORD_COMM:
1612
		ret = machine__process_comm_event(machine, event, sample); break;
1613
	case PERF_RECORD_MMAP:
1614
		ret = machine__process_mmap_event(machine, event, sample); break;
1615 1616
	case PERF_RECORD_NAMESPACES:
		ret = machine__process_namespaces_event(machine, event, sample); break;
1617
	case PERF_RECORD_MMAP2:
1618
		ret = machine__process_mmap2_event(machine, event, sample); break;
1619
	case PERF_RECORD_FORK:
1620
		ret = machine__process_fork_event(machine, event, sample); break;
1621
	case PERF_RECORD_EXIT:
1622
		ret = machine__process_exit_event(machine, event, sample); break;
1623
	case PERF_RECORD_LOST:
1624
		ret = machine__process_lost_event(machine, event, sample); break;
1625 1626
	case PERF_RECORD_AUX:
		ret = machine__process_aux_event(machine, event); break;
1627
	case PERF_RECORD_ITRACE_START:
1628
		ret = machine__process_itrace_start_event(machine, event); break;
1629 1630
	case PERF_RECORD_LOST_SAMPLES:
		ret = machine__process_lost_samples_event(machine, event, sample); break;
1631 1632 1633
	case PERF_RECORD_SWITCH:
	case PERF_RECORD_SWITCH_CPU_WIDE:
		ret = machine__process_switch_event(machine, event); break;
1634 1635 1636 1637 1638 1639 1640
	default:
		ret = -1;
		break;
	}

	return ret;
}
1641

1642
static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
1643
{
1644
	if (!regexec(regex, sym->name, 0, NULL, 0))
1645 1646 1647 1648
		return 1;
	return 0;
}

1649
static void ip__resolve_ams(struct thread *thread,
1650 1651 1652 1653 1654 1655
			    struct addr_map_symbol *ams,
			    u64 ip)
{
	struct addr_location al;

	memset(&al, 0, sizeof(al));
1656 1657 1658 1659 1660 1661 1662
	/*
	 * We cannot use the header.misc hint to determine whether a
	 * branch stack address is user, kernel, guest, hypervisor.
	 * Branches may straddle the kernel/user/hypervisor boundaries.
	 * Thus, we have to try consecutively until we find a match
	 * or else, the symbol is unknown
	 */
1663
	thread__find_cpumode_addr_location(thread, MAP__FUNCTION, ip, &al);
1664 1665 1666 1667 1668

	ams->addr = ip;
	ams->al_addr = al.addr;
	ams->sym = al.sym;
	ams->map = al.map;
1669
	ams->phys_addr = 0;
1670 1671
}

1672
static void ip__resolve_data(struct thread *thread,
1673 1674
			     u8 m, struct addr_map_symbol *ams,
			     u64 addr, u64 phys_addr)
1675 1676 1677 1678 1679
{
	struct addr_location al;

	memset(&al, 0, sizeof(al));

1680
	thread__find_addr_location(thread, m, MAP__VARIABLE, addr, &al);
1681 1682 1683 1684 1685 1686
	if (al.map == NULL) {
		/*
		 * some shared data regions have execute bit set which puts
		 * their mapping in the MAP__FUNCTION type array.
		 * Check there as a fallback option before dropping the sample.
		 */
1687
		thread__find_addr_location(thread, m, MAP__FUNCTION, addr, &al);
1688 1689
	}

1690 1691 1692 1693
	ams->addr = addr;
	ams->al_addr = al.addr;
	ams->sym = al.sym;
	ams->map = al.map;
1694
	ams->phys_addr = phys_addr;
1695 1696
}

1697 1698
struct mem_info *sample__resolve_mem(struct perf_sample *sample,
				     struct addr_location *al)
1699
{
1700
	struct mem_info *mi = mem_info__new();
1701 1702 1703 1704

	if (!mi)
		return NULL;

1705
	ip__resolve_ams(al->thread, &mi->iaddr, sample->ip);
1706 1707
	ip__resolve_data(al->thread, al->cpumode, &mi->daddr,
			 sample->addr, sample->phys_addr);
1708 1709 1710 1711 1712
	mi->data_src.val = sample->data_src;

	return mi;
}

1713 1714
static char *callchain_srcline(struct map *map, struct symbol *sym, u64 ip)
{
1715 1716
	char *srcline = NULL;

1717
	if (!map || callchain_param.key == CCKEY_FUNCTION)
1718 1719 1720 1721 1722 1723 1724 1725
		return srcline;

	srcline = srcline__tree_find(&map->dso->srclines, ip);
	if (!srcline) {
		bool show_sym = false;
		bool show_addr = callchain_param.key == CCKEY_ADDRESS;

		srcline = get_srcline(map->dso, map__rip_2objdump(map, ip),
1726
				      sym, show_sym, show_addr, ip);
1727 1728
		srcline__tree_insert(&map->dso->srclines, ip, srcline);
	}
1729

1730
	return srcline;
1731 1732
}

1733 1734 1735 1736 1737
struct iterations {
	int nr_loop_iter;
	u64 cycles;
};

1738
static int add_callchain_ip(struct thread *thread,
1739
			    struct callchain_cursor *cursor,
1740 1741
			    struct symbol **parent,
			    struct addr_location *root_al,
1742
			    u8 *cpumode,
1743 1744 1745
			    u64 ip,
			    bool branch,
			    struct branch_flags *flags,
1746
			    struct iterations *iter,
1747
			    u64 branch_from)
1748 1749
{
	struct addr_location al;
1750 1751
	int nr_loop_iter = 0;
	u64 iter_cycles = 0;
1752
	const char *srcline = NULL;
1753 1754 1755

	al.filtered = 0;
	al.sym = NULL;
1756
	if (!cpumode) {
1757 1758
		thread__find_cpumode_addr_location(thread, MAP__FUNCTION,
						   ip, &al);
1759
	} else {
1760 1761 1762
		if (ip >= PERF_CONTEXT_MAX) {
			switch (ip) {
			case PERF_CONTEXT_HV:
1763
				*cpumode = PERF_RECORD_MISC_HYPERVISOR;
1764 1765
				break;
			case PERF_CONTEXT_KERNEL:
1766
				*cpumode = PERF_RECORD_MISC_KERNEL;
1767 1768
				break;
			case PERF_CONTEXT_USER:
1769
				*cpumode = PERF_RECORD_MISC_USER;
1770 1771 1772 1773 1774 1775 1776 1777
				break;
			default:
				pr_debug("invalid callchain context: "
					 "%"PRId64"\n", (s64) ip);
				/*
				 * It seems the callchain is corrupted.
				 * Discard all.
				 */
1778
				callchain_cursor_reset(cursor);
1779 1780 1781 1782
				return 1;
			}
			return 0;
		}
1783 1784
		thread__find_addr_location(thread, *cpumode, MAP__FUNCTION,
					   ip, &al);
1785 1786
	}

1787
	if (al.sym != NULL) {
1788
		if (perf_hpp_list.parent && !*parent &&
1789 1790 1791 1792 1793 1794 1795
		    symbol__match_regex(al.sym, &parent_regex))
			*parent = al.sym;
		else if (have_ignore_callees && root_al &&
		  symbol__match_regex(al.sym, &ignore_callees_regex)) {
			/* Treat this symbol as the root,
			   forgetting its callees. */
			*root_al = al;
1796
			callchain_cursor_reset(cursor);
1797 1798 1799
		}
	}

1800 1801
	if (symbol_conf.hide_unresolved && al.sym == NULL)
		return 0;
1802 1803 1804 1805 1806 1807

	if (iter) {
		nr_loop_iter = iter->nr_loop_iter;
		iter_cycles = iter->cycles;
	}

1808
	srcline = callchain_srcline(al.map, al.sym, al.addr);
1809
	return callchain_cursor_append(cursor, al.addr, al.map, al.sym,
1810
				       branch, flags, nr_loop_iter,
1811
				       iter_cycles, branch_from, srcline);
1812 1813
}

1814 1815
struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
					   struct addr_location *al)
1816 1817
{
	unsigned int i;
1818 1819
	const struct branch_stack *bs = sample->branch_stack;
	struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info));
1820 1821 1822 1823 1824

	if (!bi)
		return NULL;

	for (i = 0; i < bs->nr; i++) {
1825 1826
		ip__resolve_ams(al->thread, &bi[i].to, bs->entries[i].to);
		ip__resolve_ams(al->thread, &bi[i].from, bs->entries[i].from);
1827 1828 1829 1830 1831
		bi[i].flags = bs->entries[i].flags;
	}
	return bi;
}

1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843
static void save_iterations(struct iterations *iter,
			    struct branch_entry *be, int nr)
{
	int i;

	iter->nr_loop_iter = nr;
	iter->cycles = 0;

	for (i = 0; i < nr; i++)
		iter->cycles += be[i].flags.cycles;
}

1844 1845 1846 1847 1848 1849 1850
#define CHASHSZ 127
#define CHASHBITS 7
#define NO_ENTRY 0xff

#define PERF_MAX_BRANCH_DEPTH 127

/* Remove loops. */
1851 1852
static int remove_loops(struct branch_entry *l, int nr,
			struct iterations *iter)
1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876
{
	int i, j, off;
	unsigned char chash[CHASHSZ];

	memset(chash, NO_ENTRY, sizeof(chash));

	BUG_ON(PERF_MAX_BRANCH_DEPTH > 255);

	for (i = 0; i < nr; i++) {
		int h = hash_64(l[i].from, CHASHBITS) % CHASHSZ;

		/* no collision handling for now */
		if (chash[h] == NO_ENTRY) {
			chash[h] = i;
		} else if (l[chash[h]].from == l[i].from) {
			bool is_loop = true;
			/* check if it is a real loop */
			off = 0;
			for (j = chash[h]; j < i && i + off < nr; j++, off++)
				if (l[j].from != l[i + off].from) {
					is_loop = false;
					break;
				}
			if (is_loop) {
1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888
				j = nr - (i + off);
				if (j > 0) {
					save_iterations(iter + i + off,
						l + i, off);

					memmove(iter + i, iter + i + off,
						j * sizeof(*iter));

					memmove(l + i, l + i + off,
						j * sizeof(*l));
				}

1889 1890 1891 1892 1893 1894 1895
				nr -= off;
			}
		}
	}
	return nr;
}

K
Kan Liang 已提交
1896 1897 1898 1899 1900 1901 1902 1903
/*
 * Recolve LBR callstack chain sample
 * Return:
 * 1 on success get LBR callchain information
 * 0 no available LBR callchain information, should try fp
 * negative error code on other errors.
 */
static int resolve_lbr_callchain_sample(struct thread *thread,
1904
					struct callchain_cursor *cursor,
K
Kan Liang 已提交
1905 1906 1907 1908
					struct perf_sample *sample,
					struct symbol **parent,
					struct addr_location *root_al,
					int max_stack)
1909
{
K
Kan Liang 已提交
1910
	struct ip_callchain *chain = sample->callchain;
1911
	int chain_nr = min(max_stack, (int)chain->nr), i;
1912
	u8 cpumode = PERF_RECORD_MISC_USER;
1913
	u64 ip, branch_from = 0;
K
Kan Liang 已提交
1914 1915 1916 1917 1918 1919 1920 1921 1922

	for (i = 0; i < chain_nr; i++) {
		if (chain->ips[i] == PERF_CONTEXT_USER)
			break;
	}

	/* LBR only affects the user callchain */
	if (i != chain_nr) {
		struct branch_stack *lbr_stack = sample->branch_stack;
1923 1924 1925
		int lbr_nr = lbr_stack->nr, j, k;
		bool branch;
		struct branch_flags *flags;
K
Kan Liang 已提交
1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938
		/*
		 * LBR callstack can only get user call chain.
		 * The mix_chain_nr is kernel call chain
		 * number plus LBR user call chain number.
		 * i is kernel call chain number,
		 * 1 is PERF_CONTEXT_USER,
		 * lbr_nr + 1 is the user call chain number.
		 * For details, please refer to the comments
		 * in callchain__printf
		 */
		int mix_chain_nr = i + 1 + lbr_nr + 1;

		for (j = 0; j < mix_chain_nr; j++) {
1939
			int err;
1940 1941 1942
			branch = false;
			flags = NULL;

K
Kan Liang 已提交
1943 1944 1945
			if (callchain_param.order == ORDER_CALLEE) {
				if (j < i + 1)
					ip = chain->ips[j];
1946 1947 1948 1949 1950 1951
				else if (j > i + 1) {
					k = j - i - 2;
					ip = lbr_stack->entries[k].from;
					branch = true;
					flags = &lbr_stack->entries[k].flags;
				} else {
K
Kan Liang 已提交
1952
					ip = lbr_stack->entries[0].to;
1953 1954
					branch = true;
					flags = &lbr_stack->entries[0].flags;
1955 1956
					branch_from =
						lbr_stack->entries[0].from;
1957
				}
K
Kan Liang 已提交
1958
			} else {
1959 1960 1961 1962 1963 1964
				if (j < lbr_nr) {
					k = lbr_nr - j - 1;
					ip = lbr_stack->entries[k].from;
					branch = true;
					flags = &lbr_stack->entries[k].flags;
				}
K
Kan Liang 已提交
1965 1966
				else if (j > lbr_nr)
					ip = chain->ips[i + 1 - (j - lbr_nr)];
1967
				else {
K
Kan Liang 已提交
1968
					ip = lbr_stack->entries[0].to;
1969 1970
					branch = true;
					flags = &lbr_stack->entries[0].flags;
1971 1972
					branch_from =
						lbr_stack->entries[0].from;
1973
				}
K
Kan Liang 已提交
1974 1975
			}

1976 1977
			err = add_callchain_ip(thread, cursor, parent,
					       root_al, &cpumode, ip,
1978
					       branch, flags, NULL,
1979
					       branch_from);
K
Kan Liang 已提交
1980 1981 1982 1983 1984 1985 1986 1987 1988 1989
			if (err)
				return (err < 0) ? err : 0;
		}
		return 1;
	}

	return 0;
}

static int thread__resolve_callchain_sample(struct thread *thread,
1990
					    struct callchain_cursor *cursor,
K
Kan Liang 已提交
1991 1992 1993 1994 1995 1996 1997 1998
					    struct perf_evsel *evsel,
					    struct perf_sample *sample,
					    struct symbol **parent,
					    struct addr_location *root_al,
					    int max_stack)
{
	struct branch_stack *branch = sample->branch_stack;
	struct ip_callchain *chain = sample->callchain;
1999
	int chain_nr = 0;
2000
	u8 cpumode = PERF_RECORD_MISC_USER;
2001
	int i, j, err, nr_entries;
2002 2003 2004
	int skip_idx = -1;
	int first_call = 0;

2005 2006 2007
	if (chain)
		chain_nr = chain->nr;

2008
	if (perf_evsel__has_branch_callstack(evsel)) {
2009
		err = resolve_lbr_callchain_sample(thread, cursor, sample, parent,
K
Kan Liang 已提交
2010 2011 2012 2013 2014
						   root_al, max_stack);
		if (err)
			return (err < 0) ? err : 0;
	}

2015 2016 2017 2018
	/*
	 * Based on DWARF debug information, some architectures skip
	 * a callchain entry saved by the kernel.
	 */
2019
	skip_idx = arch_skip_callchain_idx(thread, chain);
2020

2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035
	/*
	 * Add branches to call stack for easier browsing. This gives
	 * more context for a sample than just the callers.
	 *
	 * This uses individual histograms of paths compared to the
	 * aggregated histograms the normal LBR mode uses.
	 *
	 * Limitations for now:
	 * - No extra filters
	 * - No annotations (should annotate somehow)
	 */

	if (branch && callchain_param.branch_callstack) {
		int nr = min(max_stack, (int)branch->nr);
		struct branch_entry be[nr];
2036
		struct iterations iter[nr];
2037 2038 2039 2040 2041 2042 2043 2044 2045

		if (branch->nr > PERF_MAX_BRANCH_DEPTH) {
			pr_warning("corrupted branch chain. skipping...\n");
			goto check_calls;
		}

		for (i = 0; i < nr; i++) {
			if (callchain_param.order == ORDER_CALLEE) {
				be[i] = branch->entries[i];
2046 2047 2048 2049

				if (chain == NULL)
					continue;

2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066
				/*
				 * Check for overlap into the callchain.
				 * The return address is one off compared to
				 * the branch entry. To adjust for this
				 * assume the calling instruction is not longer
				 * than 8 bytes.
				 */
				if (i == skip_idx ||
				    chain->ips[first_call] >= PERF_CONTEXT_MAX)
					first_call++;
				else if (be[i].from < chain->ips[first_call] &&
				    be[i].from >= chain->ips[first_call] - 8)
					first_call++;
			} else
				be[i] = branch->entries[branch->nr - i - 1];
		}

2067 2068
		memset(iter, 0, sizeof(struct iterations) * nr);
		nr = remove_loops(be, nr, iter);
2069

2070
		for (i = 0; i < nr; i++) {
2071 2072 2073 2074 2075
			err = add_callchain_ip(thread, cursor, parent,
					       root_al,
					       NULL, be[i].to,
					       true, &be[i].flags,
					       NULL, be[i].from);
2076

2077
			if (!err)
2078
				err = add_callchain_ip(thread, cursor, parent, root_al,
2079 2080
						       NULL, be[i].from,
						       true, &be[i].flags,
2081
						       &iter[i], 0);
2082 2083 2084 2085 2086
			if (err == -EINVAL)
				break;
			if (err)
				return err;
		}
2087 2088 2089 2090

		if (chain_nr == 0)
			return 0;

2091 2092 2093 2094
		chain_nr -= nr;
	}

check_calls:
2095
	for (i = first_call, nr_entries = 0;
2096
	     i < chain_nr && nr_entries < max_stack; i++) {
2097 2098 2099
		u64 ip;

		if (callchain_param.order == ORDER_CALLEE)
2100
			j = i;
2101
		else
2102 2103 2104 2105 2106 2107 2108
			j = chain->nr - i - 1;

#ifdef HAVE_SKIP_CALLCHAIN_IDX
		if (j == skip_idx)
			continue;
#endif
		ip = chain->ips[j];
2109

2110 2111
		if (ip < PERF_CONTEXT_MAX)
                       ++nr_entries;
2112

2113 2114
		err = add_callchain_ip(thread, cursor, parent,
				       root_al, &cpumode, ip,
2115
				       false, NULL, NULL, 0);
2116 2117

		if (err)
2118
			return (err < 0) ? err : 0;
2119 2120 2121 2122 2123
	}

	return 0;
}

2124 2125 2126 2127 2128 2129
static int append_inlines(struct callchain_cursor *cursor,
			  struct map *map, struct symbol *sym, u64 ip)
{
	struct inline_node *inline_node;
	struct inline_list *ilist;
	u64 addr;
2130
	int ret = 1;
2131 2132

	if (!symbol_conf.inline_name || !map || !sym)
2133
		return ret;
2134 2135 2136 2137 2138 2139 2140

	addr = map__rip_2objdump(map, ip);

	inline_node = inlines__tree_find(&map->dso->inlined_nodes, addr);
	if (!inline_node) {
		inline_node = dso__parse_addr_inlines(map->dso, addr, sym);
		if (!inline_node)
2141
			return ret;
2142 2143 2144 2145
		inlines__tree_insert(&map->dso->inlined_nodes, inline_node);
	}

	list_for_each_entry(ilist, &inline_node->val, list) {
2146 2147 2148
		ret = callchain_cursor_append(cursor, ip, map,
					      ilist->symbol, false,
					      NULL, 0, 0, 0, ilist->srcline);
2149 2150 2151 2152 2153

		if (ret != 0)
			return ret;
	}

2154
	return ret;
2155 2156
}

2157 2158 2159
static int unwind_entry(struct unwind_entry *entry, void *arg)
{
	struct callchain_cursor *cursor = arg;
2160
	const char *srcline = NULL;
2161 2162 2163

	if (symbol_conf.hide_unresolved && entry->sym == NULL)
		return 0;
2164

2165 2166 2167
	if (append_inlines(cursor, entry->map, entry->sym, entry->ip) == 0)
		return 0;

2168
	srcline = callchain_srcline(entry->map, entry->sym, entry->ip);
2169
	return callchain_cursor_append(cursor, entry->ip,
2170
				       entry->map, entry->sym,
2171
				       false, NULL, 0, 0, 0, srcline);
2172 2173
}

2174 2175 2176 2177 2178
static int thread__resolve_callchain_unwind(struct thread *thread,
					    struct callchain_cursor *cursor,
					    struct perf_evsel *evsel,
					    struct perf_sample *sample,
					    int max_stack)
2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189
{
	/* Can we do dwarf post unwind? */
	if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) &&
	      (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER)))
		return 0;

	/* Bail out if nothing was captured. */
	if ((!sample->user_regs.regs) ||
	    (!sample->user_stack.size))
		return 0;

2190
	return unwind__get_entries(unwind_entry, cursor,
2191
				   thread, sample, max_stack);
2192
}
2193

2194 2195 2196 2197 2198 2199 2200 2201 2202 2203
int thread__resolve_callchain(struct thread *thread,
			      struct callchain_cursor *cursor,
			      struct perf_evsel *evsel,
			      struct perf_sample *sample,
			      struct symbol **parent,
			      struct addr_location *root_al,
			      int max_stack)
{
	int ret = 0;

2204
	callchain_cursor_reset(cursor);
2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228

	if (callchain_param.order == ORDER_CALLEE) {
		ret = thread__resolve_callchain_sample(thread, cursor,
						       evsel, sample,
						       parent, root_al,
						       max_stack);
		if (ret)
			return ret;
		ret = thread__resolve_callchain_unwind(thread, cursor,
						       evsel, sample,
						       max_stack);
	} else {
		ret = thread__resolve_callchain_unwind(thread, cursor,
						       evsel, sample,
						       max_stack);
		if (ret)
			return ret;
		ret = thread__resolve_callchain_sample(thread, cursor,
						       evsel, sample,
						       parent, root_al,
						       max_stack);
	}

	return ret;
2229
}
2230 2231 2232 2233 2234

int machine__for_each_thread(struct machine *machine,
			     int (*fn)(struct thread *thread, void *p),
			     void *priv)
{
2235
	struct threads *threads;
2236 2237 2238
	struct rb_node *nd;
	struct thread *thread;
	int rc = 0;
2239
	int i;
2240

2241 2242 2243 2244 2245 2246 2247 2248
	for (i = 0; i < THREADS__TABLE_SIZE; i++) {
		threads = &machine->threads[i];
		for (nd = rb_first(&threads->entries); nd; nd = rb_next(nd)) {
			thread = rb_entry(nd, struct thread, rb_node);
			rc = fn(thread, priv);
			if (rc != 0)
				return rc;
		}
2249

2250 2251 2252 2253 2254
		list_for_each_entry(thread, &threads->dead, node) {
			rc = fn(thread, priv);
			if (rc != 0)
				return rc;
		}
2255 2256 2257
	}
	return rc;
}
2258

2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279
int machines__for_each_thread(struct machines *machines,
			      int (*fn)(struct thread *thread, void *p),
			      void *priv)
{
	struct rb_node *nd;
	int rc = 0;

	rc = machine__for_each_thread(&machines->host, fn, priv);
	if (rc != 0)
		return rc;

	for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
		struct machine *machine = rb_entry(nd, struct machine, rb_node);

		rc = machine__for_each_thread(machine, fn, priv);
		if (rc != 0)
			return rc;
	}
	return rc;
}

2280
int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
2281
				  struct target *target, struct thread_map *threads,
2282
				  perf_event__handler_t process, bool data_mmap,
2283 2284
				  unsigned int proc_map_timeout,
				  unsigned int nr_threads_synthesize)
2285
{
2286
	if (target__has_task(target))
2287
		return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap, proc_map_timeout);
2288
	else if (target__has_cpu(target))
2289 2290 2291 2292
		return perf_event__synthesize_threads(tool, process,
						      machine, data_mmap,
						      proc_map_timeout,
						      nr_threads_synthesize);
2293 2294 2295
	/* command specified */
	return 0;
}
2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335

pid_t machine__get_current_tid(struct machine *machine, int cpu)
{
	if (cpu < 0 || cpu >= MAX_NR_CPUS || !machine->current_tid)
		return -1;

	return machine->current_tid[cpu];
}

int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
			     pid_t tid)
{
	struct thread *thread;

	if (cpu < 0)
		return -EINVAL;

	if (!machine->current_tid) {
		int i;

		machine->current_tid = calloc(MAX_NR_CPUS, sizeof(pid_t));
		if (!machine->current_tid)
			return -ENOMEM;
		for (i = 0; i < MAX_NR_CPUS; i++)
			machine->current_tid[i] = -1;
	}

	if (cpu >= MAX_NR_CPUS) {
		pr_err("Requested CPU %d too large. ", cpu);
		pr_err("Consider raising MAX_NR_CPUS\n");
		return -EINVAL;
	}

	machine->current_tid[cpu] = tid;

	thread = machine__findnew_thread(machine, pid, tid);
	if (!thread)
		return -ENOMEM;

	thread->cpu = cpu;
2336
	thread__put(thread);
2337 2338 2339

	return 0;
}
2340 2341 2342

int machine__get_kernel_start(struct machine *machine)
{
2343
	struct map *map = machine__kernel_map(machine);
2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355
	int err = 0;

	/*
	 * The only addresses above 2^63 are kernel addresses of a 64-bit
	 * kernel.  Note that addresses are unsigned so that on a 32-bit system
	 * all addresses including kernel addresses are less than 2^32.  In
	 * that case (32-bit system), if the kernel mapping is unknown, all
	 * addresses will be assumed to be in user space - see
	 * machine__kernel_ip().
	 */
	machine->kernel_start = 1ULL << 63;
	if (map) {
2356
		err = map__load(map);
2357
		if (!err)
2358 2359 2360 2361
			machine->kernel_start = map->start;
	}
	return err;
}
2362 2363 2364

struct dso *machine__findnew_dso(struct machine *machine, const char *filename)
{
2365
	return dsos__findnew(&machine->dsos, filename);
2366
}
2367 2368 2369 2370 2371

char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
{
	struct machine *machine = vmachine;
	struct map *map;
2372
	struct symbol *sym = map_groups__find_symbol(&machine->kmaps, MAP__FUNCTION, *addrp, &map);
2373 2374 2375 2376 2377 2378 2379 2380

	if (sym == NULL)
		return NULL;

	*modp = __map__is_kmodule(map) ? (char *)map->dso->short_name : NULL;
	*addrp = map->unmap_ip(map, sym->start);
	return sym->name;
}