machine.c 74.6 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2
#include <dirent.h>
3
#include <errno.h>
4
#include <inttypes.h>
5
#include <regex.h>
6
#include <stdlib.h>
7
#include "callchain.h"
8
#include "debug.h"
9
#include "dso.h"
10
#include "env.h"
11
#include "event.h"
12 13
#include "evsel.h"
#include "hist.h"
14 15
#include "machine.h"
#include "map.h"
16 17 18
#include "map_symbol.h"
#include "branch.h"
#include "mem-events.h"
19
#include "srcline.h"
20
#include "symbol.h"
21
#include "sort.h"
22
#include "strlist.h"
23
#include "target.h"
24
#include "thread.h"
25
#include "util.h"
26
#include "vdso.h"
27
#include <stdbool.h>
28 29 30
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
31
#include "unwind.h"
32
#include "linux/hash.h"
33
#include "asm/bug.h"
34
#include "bpf-event.h"
35
#include <internal/lib.h> // page_size
36
#include "cgroup.h"
37

38
#include <linux/ctype.h>
39
#include <symbol/kallsyms.h>
40
#include <linux/mman.h>
41
#include <linux/string.h>
42
#include <linux/zalloc.h>
43

44 45
static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock);

46 47 48 49 50
static struct dso *machine__kernel_dso(struct machine *machine)
{
	return machine->vmlinux_map->dso;
}

51 52 53 54
static void dsos__init(struct dsos *dsos)
{
	INIT_LIST_HEAD(&dsos->head);
	dsos->root = RB_ROOT;
55
	init_rwsem(&dsos->lock);
56 57
}

58 59 60 61 62 63
static void machine__threads_init(struct machine *machine)
{
	int i;

	for (i = 0; i < THREADS__TABLE_SIZE; i++) {
		struct threads *threads = &machine->threads[i];
64
		threads->entries = RB_ROOT_CACHED;
65
		init_rwsem(&threads->lock);
66 67 68 69 70 71
		threads->nr = 0;
		INIT_LIST_HEAD(&threads->dead);
		threads->last_match = NULL;
	}
}

72 73
static int machine__set_mmap_name(struct machine *machine)
{
J
Jiri Olsa 已提交
74 75 76 77 78 79 80
	if (machine__is_host(machine))
		machine->mmap_name = strdup("[kernel.kallsyms]");
	else if (machine__is_default_guest(machine))
		machine->mmap_name = strdup("[guest.kernel.kallsyms]");
	else if (asprintf(&machine->mmap_name, "[guest.kernel.kallsyms.%d]",
			  machine->pid) < 0)
		machine->mmap_name = NULL;
81 82 83 84

	return machine->mmap_name ? 0 : -ENOMEM;
}

85 86
int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
{
87 88
	int err = -ENOMEM;

89
	memset(machine, 0, sizeof(*machine));
90
	maps__init(&machine->kmaps, machine);
91
	RB_CLEAR_NODE(&machine->rb_node);
92
	dsos__init(&machine->dsos);
93

94
	machine__threads_init(machine);
95

96
	machine->vdso_info = NULL;
97
	machine->env = NULL;
98

99 100
	machine->pid = pid;

101
	machine->id_hdr_size = 0;
102
	machine->kptr_restrict_warned = false;
103
	machine->comm_exec = false;
104
	machine->kernel_start = 0;
105
	machine->vmlinux_map = NULL;
106

107 108 109 110
	machine->root_dir = strdup(root_dir);
	if (machine->root_dir == NULL)
		return -ENOMEM;

111 112 113
	if (machine__set_mmap_name(machine))
		goto out;

114
	if (pid != HOST_KERNEL_ID) {
115
		struct thread *thread = machine__findnew_thread(machine, -1,
116
								pid);
117 118 119
		char comm[64];

		if (thread == NULL)
120
			goto out;
121 122

		snprintf(comm, sizeof(comm), "[guest/%d]", pid);
123
		thread__set_comm(thread, comm, 0);
124
		thread__put(thread);
125 126
	}

127
	machine->current_tid = NULL;
128
	err = 0;
129

130
out:
131
	if (err) {
132
		zfree(&machine->root_dir);
133 134
		zfree(&machine->mmap_name);
	}
135 136 137
	return 0;
}

138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154
struct machine *machine__new_host(void)
{
	struct machine *machine = malloc(sizeof(*machine));

	if (machine != NULL) {
		machine__init(machine, "", HOST_KERNEL_ID);

		if (machine__create_kernel_maps(machine) < 0)
			goto out_delete;
	}

	return machine;
out_delete:
	free(machine);
	return NULL;
}

155 156 157 158 159
struct machine *machine__new_kallsyms(void)
{
	struct machine *machine = machine__new_host();
	/*
	 * FIXME:
160
	 * 1) We should switch to machine__load_kallsyms(), i.e. not explicitly
161 162 163
	 *    ask for not using the kcore parsing code, once this one is fixed
	 *    to create a map per module.
	 */
164
	if (machine && machine__load_kallsyms(machine, "/proc/kallsyms") <= 0) {
165 166 167 168 169 170 171
		machine__delete(machine);
		machine = NULL;
	}

	return machine;
}

172
static void dsos__purge(struct dsos *dsos)
173 174 175
{
	struct dso *pos, *n;

176
	down_write(&dsos->lock);
177

178
	list_for_each_entry_safe(pos, n, &dsos->head, node) {
179
		RB_CLEAR_NODE(&pos->rb_node);
180
		pos->root = NULL;
181 182
		list_del_init(&pos->node);
		dso__put(pos);
183
	}
184

185
	up_write(&dsos->lock);
186
}
187

188 189 190
static void dsos__exit(struct dsos *dsos)
{
	dsos__purge(dsos);
191
	exit_rwsem(&dsos->lock);
192 193
}

194 195
void machine__delete_threads(struct machine *machine)
{
196
	struct rb_node *nd;
197
	int i;
198

199 200
	for (i = 0; i < THREADS__TABLE_SIZE; i++) {
		struct threads *threads = &machine->threads[i];
201
		down_write(&threads->lock);
202
		nd = rb_first_cached(&threads->entries);
203 204
		while (nd) {
			struct thread *t = rb_entry(nd, struct thread, rb_node);
205

206 207 208
			nd = rb_next(nd);
			__machine__remove_thread(machine, t, false);
		}
209
		up_write(&threads->lock);
210 211 212
	}
}

213 214
void machine__exit(struct machine *machine)
{
215 216
	int i;

217 218 219
	if (machine == NULL)
		return;

220
	machine__destroy_kernel_maps(machine);
221
	maps__exit(&machine->kmaps);
222
	dsos__exit(&machine->dsos);
223
	machine__exit_vdso(machine);
224
	zfree(&machine->root_dir);
225
	zfree(&machine->mmap_name);
226
	zfree(&machine->current_tid);
227 228 229

	for (i = 0; i < THREADS__TABLE_SIZE; i++) {
		struct threads *threads = &machine->threads[i];
230 231 232 233 234 235 236 237 238 239 240 241
		struct thread *thread, *n;
		/*
		 * Forget about the dead, at this point whatever threads were
		 * left in the dead lists better have a reference count taken
		 * by who is using them, and then, when they drop those references
		 * and it finally hits zero, thread__put() will check and see that
		 * its not in the dead threads list and will not try to remove it
		 * from there, just calling thread__delete() straight away.
		 */
		list_for_each_entry_safe(thread, n, &threads->dead, node)
			list_del_init(&thread->node);

242
		exit_rwsem(&threads->lock);
243
	}
244 245 246 247
}

void machine__delete(struct machine *machine)
{
248 249 250 251
	if (machine) {
		machine__exit(machine);
		free(machine);
	}
252 253
}

254 255 256
void machines__init(struct machines *machines)
{
	machine__init(&machines->host, "", HOST_KERNEL_ID);
257
	machines->guests = RB_ROOT_CACHED;
258 259 260 261 262 263 264 265 266
}

void machines__exit(struct machines *machines)
{
	machine__exit(&machines->host);
	/* XXX exit guest */
}

struct machine *machines__add(struct machines *machines, pid_t pid,
267 268
			      const char *root_dir)
{
269
	struct rb_node **p = &machines->guests.rb_root.rb_node;
270 271
	struct rb_node *parent = NULL;
	struct machine *pos, *machine = malloc(sizeof(*machine));
272
	bool leftmost = true;
273 274 275 276 277 278 279 280 281 282 283 284 285 286

	if (machine == NULL)
		return NULL;

	if (machine__init(machine, root_dir, pid) != 0) {
		free(machine);
		return NULL;
	}

	while (*p != NULL) {
		parent = *p;
		pos = rb_entry(parent, struct machine, rb_node);
		if (pid < pos->pid)
			p = &(*p)->rb_left;
287
		else {
288
			p = &(*p)->rb_right;
289 290
			leftmost = false;
		}
291 292 293
	}

	rb_link_node(&machine->rb_node, parent, p);
294
	rb_insert_color_cached(&machine->rb_node, &machines->guests, leftmost);
295 296 297 298

	return machine;
}

299 300 301 302 303 304
void machines__set_comm_exec(struct machines *machines, bool comm_exec)
{
	struct rb_node *nd;

	machines->host.comm_exec = comm_exec;

305
	for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
306 307 308 309 310 311
		struct machine *machine = rb_entry(nd, struct machine, rb_node);

		machine->comm_exec = comm_exec;
	}
}

312
struct machine *machines__find(struct machines *machines, pid_t pid)
313
{
314
	struct rb_node **p = &machines->guests.rb_root.rb_node;
315 316 317 318
	struct rb_node *parent = NULL;
	struct machine *machine;
	struct machine *default_machine = NULL;

319 320 321
	if (pid == HOST_KERNEL_ID)
		return &machines->host;

322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337
	while (*p != NULL) {
		parent = *p;
		machine = rb_entry(parent, struct machine, rb_node);
		if (pid < machine->pid)
			p = &(*p)->rb_left;
		else if (pid > machine->pid)
			p = &(*p)->rb_right;
		else
			return machine;
		if (!machine->pid)
			default_machine = machine;
	}

	return default_machine;
}

338
struct machine *machines__findnew(struct machines *machines, pid_t pid)
339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354
{
	char path[PATH_MAX];
	const char *root_dir = "";
	struct machine *machine = machines__find(machines, pid);

	if (machine && (machine->pid == pid))
		goto out;

	if ((pid != HOST_KERNEL_ID) &&
	    (pid != DEFAULT_GUEST_KERNEL_ID) &&
	    (symbol_conf.guestmount)) {
		sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
		if (access(path, R_OK)) {
			static struct strlist *seen;

			if (!seen)
355
				seen = strlist__new(NULL, NULL);
356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371

			if (!strlist__has_entry(seen, path)) {
				pr_err("Can't access file %s\n", path);
				strlist__add(seen, path);
			}
			machine = NULL;
			goto out;
		}
		root_dir = path;
	}

	machine = machines__add(machines, pid, root_dir);
out:
	return machine;
}

372 373
void machines__process_guests(struct machines *machines,
			      machine__process_t process, void *data)
374 375 376
{
	struct rb_node *nd;

377
	for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
378 379 380 381 382
		struct machine *pos = rb_entry(nd, struct machine, rb_node);
		process(pos, data);
	}
}

383
void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
384 385 386 387
{
	struct rb_node *node;
	struct machine *machine;

388 389
	machines->host.id_hdr_size = id_hdr_size;

390 391
	for (node = rb_first_cached(&machines->guests); node;
	     node = rb_next(node)) {
392 393 394 395 396 397 398
		machine = rb_entry(node, struct machine, rb_node);
		machine->id_hdr_size = id_hdr_size;
	}

	return;
}

399 400 401 402 403 404 405 406 407 408 409 410 411
static void machine__update_thread_pid(struct machine *machine,
				       struct thread *th, pid_t pid)
{
	struct thread *leader;

	if (pid == th->pid_ || pid == -1 || th->pid_ != -1)
		return;

	th->pid_ = pid;

	if (th->pid_ == th->tid)
		return;

412
	leader = __machine__findnew_thread(machine, th->pid_, th->pid_);
413 414 415
	if (!leader)
		goto out_err;

416 417
	if (!leader->maps)
		leader->maps = maps__new(machine);
418

419
	if (!leader->maps)
420 421
		goto out_err;

422
	if (th->maps == leader->maps)
423 424
		return;

425
	if (th->maps) {
426 427 428 429 430
		/*
		 * Maps are created from MMAP events which provide the pid and
		 * tid.  Consequently there never should be any maps on a thread
		 * with an unknown pid.  Just print an error if there are.
		 */
431
		if (!maps__empty(th->maps))
432 433
			pr_err("Discarding thread maps for %d:%d\n",
			       th->pid_, th->tid);
434
		maps__put(th->maps);
435 436
	}

437
	th->maps = maps__get(leader->maps);
438 439
out_put:
	thread__put(leader);
440 441 442
	return;
out_err:
	pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid);
443
	goto out_put;
444 445
}

446
/*
447 448 449
 * Front-end cache - TID lookups come in blocks,
 * so most of the time we dont have to look up
 * the full rbtree:
450
 */
451
static struct thread*
452 453
__threads__get_last_match(struct threads *threads, struct machine *machine,
			  int pid, int tid)
454 455 456
{
	struct thread *th;

457
	th = threads->last_match;
458 459 460
	if (th != NULL) {
		if (th->tid == tid) {
			machine__update_thread_pid(machine, th, pid);
461
			return thread__get(th);
462 463
		}

464
		threads->last_match = NULL;
465
	}
466

467 468 469
	return NULL;
}

470 471 472 473 474 475 476 477 478 479 480 481
static struct thread*
threads__get_last_match(struct threads *threads, struct machine *machine,
			int pid, int tid)
{
	struct thread *th = NULL;

	if (perf_singlethreaded)
		th = __threads__get_last_match(threads, machine, pid, tid);

	return th;
}

482
static void
483
__threads__set_last_match(struct threads *threads, struct thread *th)
484 485 486 487
{
	threads->last_match = th;
}

488 489 490 491 492 493 494
static void
threads__set_last_match(struct threads *threads, struct thread *th)
{
	if (perf_singlethreaded)
		__threads__set_last_match(threads, th);
}

495 496 497 498 499 500 501 502 503
/*
 * Caller must eventually drop thread->refcnt returned with a successful
 * lookup/new thread inserted.
 */
static struct thread *____machine__findnew_thread(struct machine *machine,
						  struct threads *threads,
						  pid_t pid, pid_t tid,
						  bool create)
{
504
	struct rb_node **p = &threads->entries.rb_root.rb_node;
505 506
	struct rb_node *parent = NULL;
	struct thread *th;
507
	bool leftmost = true;
508 509 510 511 512

	th = threads__get_last_match(threads, machine, pid, tid);
	if (th)
		return th;

513 514 515 516
	while (*p != NULL) {
		parent = *p;
		th = rb_entry(parent, struct thread, rb_node);

517
		if (th->tid == tid) {
518
			threads__set_last_match(threads, th);
519
			machine__update_thread_pid(machine, th, pid);
520
			return thread__get(th);
521 522
		}

523
		if (tid < th->tid)
524
			p = &(*p)->rb_left;
525
		else {
526
			p = &(*p)->rb_right;
527 528
			leftmost = false;
		}
529 530 531 532 533
	}

	if (!create)
		return NULL;

534
	th = thread__new(pid, tid);
535 536
	if (th != NULL) {
		rb_link_node(&th->rb_node, parent, p);
537
		rb_insert_color_cached(&th->rb_node, &threads->entries, leftmost);
538 539

		/*
540
		 * We have to initialize maps separately after rb tree is updated.
541 542
		 *
		 * The reason is that we call machine__findnew_thread
543
		 * within thread__init_maps to find the thread
544 545
		 * leader and that would screwed the rb tree.
		 */
546
		if (thread__init_maps(th, machine)) {
547
			rb_erase_cached(&th->rb_node, &threads->entries);
548
			RB_CLEAR_NODE(&th->rb_node);
549
			thread__put(th);
550
			return NULL;
551
		}
552 553 554 555
		/*
		 * It is now in the rbtree, get a ref
		 */
		thread__get(th);
556
		threads__set_last_match(threads, th);
557
		++threads->nr;
558 559 560 561 562
	}

	return th;
}

563 564
struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid)
{
565
	return ____machine__findnew_thread(machine, machine__threads(machine, tid), pid, tid, true);
566 567
}

568 569
struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
				       pid_t tid)
570
{
571
	struct threads *threads = machine__threads(machine, tid);
572 573
	struct thread *th;

574
	down_write(&threads->lock);
575
	th = __machine__findnew_thread(machine, pid, tid);
576
	up_write(&threads->lock);
577
	return th;
578 579
}

580 581
struct thread *machine__find_thread(struct machine *machine, pid_t pid,
				    pid_t tid)
582
{
583
	struct threads *threads = machine__threads(machine, tid);
584
	struct thread *th;
585

586
	down_read(&threads->lock);
587
	th =  ____machine__findnew_thread(machine, threads, pid, tid, false);
588
	up_read(&threads->lock);
589
	return th;
590
}
591

592 593 594 595 596 597 598 599 600
struct comm *machine__thread_exec_comm(struct machine *machine,
				       struct thread *thread)
{
	if (machine->comm_exec)
		return thread__exec_comm(thread);
	else
		return thread__comm(thread);
}

601 602
int machine__process_comm_event(struct machine *machine, union perf_event *event,
				struct perf_sample *sample)
603
{
604 605 606
	struct thread *thread = machine__findnew_thread(machine,
							event->comm.pid,
							event->comm.tid);
607
	bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC;
608
	int err = 0;
609

610 611 612
	if (exec)
		machine->comm_exec = true;

613 614 615
	if (dump_trace)
		perf_event__fprintf_comm(event, stdout);

616 617
	if (thread == NULL ||
	    __thread__set_comm(thread, event->comm.comm, sample->time, exec)) {
618
		dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
619
		err = -1;
620 621
	}

622 623 624
	thread__put(thread);

	return err;
625 626
}

627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657
int machine__process_namespaces_event(struct machine *machine __maybe_unused,
				      union perf_event *event,
				      struct perf_sample *sample __maybe_unused)
{
	struct thread *thread = machine__findnew_thread(machine,
							event->namespaces.pid,
							event->namespaces.tid);
	int err = 0;

	WARN_ONCE(event->namespaces.nr_namespaces > NR_NAMESPACES,
		  "\nWARNING: kernel seems to support more namespaces than perf"
		  " tool.\nTry updating the perf tool..\n\n");

	WARN_ONCE(event->namespaces.nr_namespaces < NR_NAMESPACES,
		  "\nWARNING: perf tool seems to support more namespaces than"
		  " the kernel.\nTry updating the kernel..\n\n");

	if (dump_trace)
		perf_event__fprintf_namespaces(event, stdout);

	if (thread == NULL ||
	    thread__set_namespaces(thread, sample->time, &event->namespaces)) {
		dump_printf("problem processing PERF_RECORD_NAMESPACES, skipping event.\n");
		err = -1;
	}

	thread__put(thread);

	return err;
}

658
int machine__process_cgroup_event(struct machine *machine,
659 660 661
				  union perf_event *event,
				  struct perf_sample *sample __maybe_unused)
{
662 663
	struct cgroup *cgrp;

664 665 666
	if (dump_trace)
		perf_event__fprintf_cgroup(event, stdout);

667 668 669 670
	cgrp = cgroup__findnew(machine->env, event->cgroup.id, event->cgroup.path);
	if (cgrp == NULL)
		return -ENOMEM;

671 672 673
	return 0;
}

674
int machine__process_lost_event(struct machine *machine __maybe_unused,
675
				union perf_event *event, struct perf_sample *sample __maybe_unused)
676
{
677
	dump_printf(": id:%" PRI_lu64 ": lost:%" PRI_lu64 "\n",
678 679 680 681
		    event->lost.id, event->lost.lost);
	return 0;
}

682 683 684
int machine__process_lost_samples_event(struct machine *machine __maybe_unused,
					union perf_event *event, struct perf_sample *sample)
{
685
	dump_printf(": id:%" PRIu64 ": lost samples :%" PRI_lu64 "\n",
686 687 688 689
		    sample->id, event->lost_samples.lost);
	return 0;
}

690 691 692
static struct dso *machine__findnew_module_dso(struct machine *machine,
					       struct kmod_path *m,
					       const char *filename)
693 694 695
{
	struct dso *dso;

696
	down_write(&machine->dsos.lock);
697 698

	dso = __dsos__find(&machine->dsos, m->name, true);
699
	if (!dso) {
700
		dso = __dsos__addnew(&machine->dsos, m->name);
701
		if (dso == NULL)
702
			goto out_unlock;
703

704
		dso__set_module_info(dso, m, machine);
705
		dso__set_long_name(dso, strdup(filename), true);
706
		dso->kernel = DSO_TYPE_KERNEL;
707 708
	}

709
	dso__get(dso);
710
out_unlock:
711
	up_write(&machine->dsos.lock);
712 713 714
	return dso;
}

715 716 717 718 719 720 721 722
int machine__process_aux_event(struct machine *machine __maybe_unused,
			       union perf_event *event)
{
	if (dump_trace)
		perf_event__fprintf_aux(event, stdout);
	return 0;
}

723 724 725 726 727 728 729 730
int machine__process_itrace_start_event(struct machine *machine __maybe_unused,
					union perf_event *event)
{
	if (dump_trace)
		perf_event__fprintf_itrace_start(event, stdout);
	return 0;
}

731 732 733 734 735 736 737 738
int machine__process_switch_event(struct machine *machine __maybe_unused,
				  union perf_event *event)
{
	if (dump_trace)
		perf_event__fprintf_switch(event, stdout);
	return 0;
}

739 740
static int is_bpf_image(const char *name)
{
741 742
	return strncmp(name, "bpf_trampoline_", sizeof("bpf_trampoline_") - 1) == 0 ||
	       strncmp(name, "bpf_dispatcher_", sizeof("bpf_dispatcher_") - 1) == 0;
743 744
}

745 746 747 748 749
static int machine__process_ksymbol_register(struct machine *machine,
					     union perf_event *event,
					     struct perf_sample *sample __maybe_unused)
{
	struct symbol *sym;
750
	struct map *map = maps__find(&machine->kmaps, event->ksymbol.addr);
751 752

	if (!map) {
753 754 755 756 757 758 759 760 761
		struct dso *dso = dso__new(event->ksymbol.name);

		if (dso) {
			dso->kernel = DSO_TYPE_KERNEL;
			map = map__new2(0, dso);
		}

		if (!dso || !map) {
			dso__put(dso);
762
			return -ENOMEM;
763
		}
764

765 766
		map->start = event->ksymbol.addr;
		map->end = map->start + event->ksymbol.len;
767
		maps__insert(&machine->kmaps, map);
768
		dso__set_loaded(dso);
769 770 771 772 773

		if (is_bpf_image(event->ksymbol.name)) {
			dso->binary_type = DSO_BINARY_TYPE__BPF_IMAGE;
			dso__set_long_name(dso, "", false);
		}
774 775
	}

776
	sym = symbol__new(map->map_ip(map, map->start),
777 778
			  event->ksymbol.len,
			  0, 0, event->ksymbol.name);
779 780 781 782 783 784 785 786 787 788 789 790
	if (!sym)
		return -ENOMEM;
	dso__insert_symbol(map->dso, sym);
	return 0;
}

static int machine__process_ksymbol_unregister(struct machine *machine,
					       union perf_event *event,
					       struct perf_sample *sample __maybe_unused)
{
	struct map *map;

791
	map = maps__find(&machine->kmaps, event->ksymbol.addr);
792
	if (map)
793
		maps__remove(&machine->kmaps, map);
794 795 796 797 798 799 800 801 802 803 804

	return 0;
}

int machine__process_ksymbol(struct machine *machine __maybe_unused,
			     union perf_event *event,
			     struct perf_sample *sample)
{
	if (dump_trace)
		perf_event__fprintf_ksymbol(event, stdout);

805
	if (event->ksymbol.flags & PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER)
806 807 808 809 810
		return machine__process_ksymbol_unregister(machine, event,
							   sample);
	return machine__process_ksymbol_register(machine, event, sample);
}

811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851
int machine__process_text_poke(struct machine *machine, union perf_event *event,
			       struct perf_sample *sample __maybe_unused)
{
	struct map *map = maps__find(&machine->kmaps, event->text_poke.addr);
	u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;

	if (dump_trace)
		perf_event__fprintf_text_poke(event, stdout);

	if (!event->text_poke.new_len)
		return 0;

	if (cpumode != PERF_RECORD_MISC_KERNEL) {
		pr_debug("%s: unsupported cpumode - ignoring\n", __func__);
		return 0;
	}

	if (map && map->dso) {
		u8 *new_bytes = event->text_poke.bytes + event->text_poke.old_len;
		int ret;

		/*
		 * Kernel maps might be changed when loading symbols so loading
		 * must be done prior to using kernel maps.
		 */
		map__load(map);
		ret = dso__data_write_cache_addr(map->dso, map, machine,
						 event->text_poke.addr,
						 new_bytes,
						 event->text_poke.new_len);
		if (ret != event->text_poke.new_len)
			pr_debug("Failed to write kernel text poke at %#" PRI_lx64 "\n",
				 event->text_poke.addr);
	} else {
		pr_debug("Failed to find kernel text poke address map for %#" PRI_lx64 "\n",
			 event->text_poke.addr);
	}

	return 0;
}

852 853
static struct map *machine__addnew_module_map(struct machine *machine, u64 start,
					      const char *filename)
854
{
855 856
	struct map *map = NULL;
	struct kmod_path m;
857
	struct dso *dso;
858

859
	if (kmod_path__parse_name(&m, filename))
860 861
		return NULL;

862
	dso = machine__findnew_module_dso(machine, &m, filename);
863 864 865
	if (dso == NULL)
		goto out;

866
	map = map__new2(start, dso);
867
	if (map == NULL)
868
		goto out;
869

870
	maps__insert(&machine->kmaps, map);
871

872
	/* Put the map here because maps__insert alread got it */
873
	map__put(map);
874
out:
875 876
	/* put the dso here, corresponding to  machine__findnew_module_dso */
	dso__put(dso);
877
	zfree(&m.name);
878 879 880
	return map;
}

881
size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
882 883
{
	struct rb_node *nd;
884
	size_t ret = __dsos__fprintf(&machines->host.dsos.head, fp);
885

886
	for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
887
		struct machine *pos = rb_entry(nd, struct machine, rb_node);
888
		ret += __dsos__fprintf(&pos->dsos.head, fp);
889 890 891 892 893
	}

	return ret;
}

894
size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp,
895 896
				     bool (skip)(struct dso *dso, int parm), int parm)
{
897
	return __dsos__fprintf_buildid(&m->dsos.head, fp, skip, parm);
898 899
}

900
size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
901 902 903
				     bool (skip)(struct dso *dso, int parm), int parm)
{
	struct rb_node *nd;
904
	size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
905

906
	for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
907 908 909 910 911 912 913 914 915 916
		struct machine *pos = rb_entry(nd, struct machine, rb_node);
		ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
	}
	return ret;
}

size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
{
	int i;
	size_t printed = 0;
917
	struct dso *kdso = machine__kernel_dso(machine);
918 919 920

	if (kdso->has_build_id) {
		char filename[PATH_MAX];
921 922
		if (dso__build_id_filename(kdso, filename, sizeof(filename),
					   false))
923 924 925 926 927 928 929 930 931 932 933 934 935
			printed += fprintf(fp, "[0] %s\n", filename);
	}

	for (i = 0; i < vmlinux_path__nr_entries; ++i)
		printed += fprintf(fp, "[%d] %s\n",
				   i + kdso->has_build_id, vmlinux_path[i]);

	return printed;
}

size_t machine__fprintf(struct machine *machine, FILE *fp)
{
	struct rb_node *nd;
936 937
	size_t ret;
	int i;
938

939 940
	for (i = 0; i < THREADS__TABLE_SIZE; i++) {
		struct threads *threads = &machine->threads[i];
941 942

		down_read(&threads->lock);
943

944
		ret = fprintf(fp, "Threads: %u\n", threads->nr);
945

946 947
		for (nd = rb_first_cached(&threads->entries); nd;
		     nd = rb_next(nd)) {
948
			struct thread *pos = rb_entry(nd, struct thread, rb_node);
949

950 951
			ret += thread__fprintf(pos, fp);
		}
952

953
		up_read(&threads->lock);
954
	}
955 956 957 958 959
	return ret;
}

static struct dso *machine__get_kernel(struct machine *machine)
{
960
	const char *vmlinux_name = machine->mmap_name;
961 962 963
	struct dso *kernel;

	if (machine__is_host(machine)) {
J
Jiri Olsa 已提交
964 965 966
		if (symbol_conf.vmlinux_name)
			vmlinux_name = symbol_conf.vmlinux_name;

967 968
		kernel = machine__findnew_kernel(machine, vmlinux_name,
						 "[kernel]", DSO_TYPE_KERNEL);
969
	} else {
J
Jiri Olsa 已提交
970 971 972
		if (symbol_conf.default_guest_vmlinux_name)
			vmlinux_name = symbol_conf.default_guest_vmlinux_name;

973 974 975
		kernel = machine__findnew_kernel(machine, vmlinux_name,
						 "[guest.kernel]",
						 DSO_TYPE_GUEST_KERNEL);
976 977 978 979 980 981 982 983 984 985 986 987
	}

	if (kernel != NULL && (!kernel->has_build_id))
		dso__read_running_kernel_build_id(kernel, machine);

	return kernel;
}

struct process_args {
	u64 start;
};

988 989
void machine__get_kallsyms_filename(struct machine *machine, char *buf,
				    size_t bufsz)
990 991 992 993 994 995 996
{
	if (machine__is_default_guest(machine))
		scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
	else
		scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
}

997 998 999 1000 1001 1002
const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};

/* Figure out the start address of kernel map from /proc/kallsyms.
 * Returns the name of the start symbol in *symbol_name. Pass in NULL as
 * symbol_name if it's not that important.
 */
1003
static int machine__get_running_kernel_start(struct machine *machine,
1004 1005
					     const char **symbol_name,
					     u64 *start, u64 *end)
1006
{
1007
	char filename[PATH_MAX];
1008
	int i, err = -1;
1009 1010
	const char *name;
	u64 addr = 0;
1011

1012
	machine__get_kallsyms_filename(machine, filename, PATH_MAX);
1013 1014 1015 1016

	if (symbol__restricted_filename(filename, "/proc/kallsyms"))
		return 0;

1017
	for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
1018 1019
		err = kallsyms__get_function_start(filename, name, &addr);
		if (!err)
1020 1021 1022
			break;
	}

1023 1024 1025
	if (err)
		return -1;

1026 1027
	if (symbol_name)
		*symbol_name = name;
1028

1029
	*start = addr;
1030 1031 1032 1033 1034

	err = kallsyms__get_function_start(filename, "_etext", &addr);
	if (!err)
		*end = addr;

1035
	return 0;
1036 1037
}

1038 1039 1040
int machine__create_extra_kernel_map(struct machine *machine,
				     struct dso *kernel,
				     struct extra_kernel_map *xm)
1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053
{
	struct kmap *kmap;
	struct map *map;

	map = map__new2(xm->start, kernel);
	if (!map)
		return -1;

	map->end   = xm->end;
	map->pgoff = xm->pgoff;

	kmap = map__kmap(map);

1054
	strlcpy(kmap->name, xm->name, KMAP_NAME_LEN);
1055

1056
	maps__insert(&machine->kmaps, map);
1057

1058 1059
	pr_debug2("Added extra kernel map %s %" PRIx64 "-%" PRIx64 "\n",
		  kmap->name, map->start, map->end);
1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100

	map__put(map);

	return 0;
}

static u64 find_entry_trampoline(struct dso *dso)
{
	/* Duplicates are removed so lookup all aliases */
	const char *syms[] = {
		"_entry_trampoline",
		"__entry_trampoline_start",
		"entry_SYSCALL_64_trampoline",
	};
	struct symbol *sym = dso__first_symbol(dso);
	unsigned int i;

	for (; sym; sym = dso__next_symbol(sym)) {
		if (sym->binding != STB_GLOBAL)
			continue;
		for (i = 0; i < ARRAY_SIZE(syms); i++) {
			if (!strcmp(sym->name, syms[i]))
				return sym->start;
		}
	}

	return 0;
}

/*
 * These values can be used for kernels that do not have symbols for the entry
 * trampolines in kallsyms.
 */
#define X86_64_CPU_ENTRY_AREA_PER_CPU	0xfffffe0000000000ULL
#define X86_64_CPU_ENTRY_AREA_SIZE	0x2c000
#define X86_64_ENTRY_TRAMPOLINE		0x6000

/* Map x86_64 PTI entry trampolines */
int machine__map_x86_64_entry_trampolines(struct machine *machine,
					  struct dso *kernel)
{
1101
	struct maps *kmaps = &machine->kmaps;
1102
	int nr_cpus_avail, cpu;
1103 1104 1105 1106 1107 1108 1109 1110
	bool found = false;
	struct map *map;
	u64 pgoff;

	/*
	 * In the vmlinux case, pgoff is a virtual address which must now be
	 * mapped to a vmlinux offset.
	 */
1111
	maps__for_each_entry(kmaps, map) {
1112 1113 1114 1115 1116 1117
		struct kmap *kmap = __map__kmap(map);
		struct map *dest_map;

		if (!kmap || !is_entry_trampoline(kmap->name))
			continue;

1118
		dest_map = maps__find(kmaps, map->pgoff);
1119 1120 1121 1122 1123 1124
		if (dest_map != map)
			map->pgoff = dest_map->map_ip(dest_map, map->pgoff);
		found = true;
	}
	if (found || machine->trampolines_mapped)
		return 0;
1125

1126
	pgoff = find_entry_trampoline(kernel);
1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142
	if (!pgoff)
		return 0;

	nr_cpus_avail = machine__nr_cpus_avail(machine);

	/* Add a 1 page map for each CPU's entry trampoline */
	for (cpu = 0; cpu < nr_cpus_avail; cpu++) {
		u64 va = X86_64_CPU_ENTRY_AREA_PER_CPU +
			 cpu * X86_64_CPU_ENTRY_AREA_SIZE +
			 X86_64_ENTRY_TRAMPOLINE;
		struct extra_kernel_map xm = {
			.start = va,
			.end   = va + page_size,
			.pgoff = pgoff,
		};

1143 1144
		strlcpy(xm.name, ENTRY_TRAMPOLINE_NAME, KMAP_NAME_LEN);

1145 1146 1147 1148
		if (machine__create_extra_kernel_map(machine, kernel, &xm) < 0)
			return -1;
	}

1149 1150 1151 1152 1153 1154 1155 1156
	machine->trampolines_mapped = nr_cpus_avail;

	return 0;
}

int __weak machine__create_extra_kernel_maps(struct machine *machine __maybe_unused,
					     struct dso *kernel __maybe_unused)
{
1157 1158 1159
	return 0;
}

1160 1161
static int
__machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
1162
{
1163 1164 1165
	/* In case of renewal the kernel map, destroy previous one */
	machine__destroy_kernel_maps(machine);

1166 1167 1168
	machine->vmlinux_map = map__new2(0, kernel);
	if (machine->vmlinux_map == NULL)
		return -1;
1169

1170
	machine->vmlinux_map->map_ip = machine->vmlinux_map->unmap_ip = identity__map_ip;
1171
	maps__insert(&machine->kmaps, machine->vmlinux_map);
1172 1173 1174 1175 1176
	return 0;
}

void machine__destroy_kernel_maps(struct machine *machine)
{
1177 1178
	struct kmap *kmap;
	struct map *map = machine__kernel_map(machine);
1179

1180 1181
	if (map == NULL)
		return;
1182

1183
	kmap = map__kmap(map);
1184
	maps__remove(&machine->kmaps, map);
1185 1186 1187
	if (kmap && kmap->ref_reloc_sym) {
		zfree((char **)&kmap->ref_reloc_sym->name);
		zfree(&kmap->ref_reloc_sym);
1188
	}
1189 1190

	map__zput(machine->vmlinux_map);
1191 1192
}

1193
int machines__create_guest_kernel_maps(struct machines *machines)
1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241
{
	int ret = 0;
	struct dirent **namelist = NULL;
	int i, items = 0;
	char path[PATH_MAX];
	pid_t pid;
	char *endp;

	if (symbol_conf.default_guest_vmlinux_name ||
	    symbol_conf.default_guest_modules ||
	    symbol_conf.default_guest_kallsyms) {
		machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
	}

	if (symbol_conf.guestmount) {
		items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
		if (items <= 0)
			return -ENOENT;
		for (i = 0; i < items; i++) {
			if (!isdigit(namelist[i]->d_name[0])) {
				/* Filter out . and .. */
				continue;
			}
			pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
			if ((*endp != '\0') ||
			    (endp == namelist[i]->d_name) ||
			    (errno == ERANGE)) {
				pr_debug("invalid directory (%s). Skipping.\n",
					 namelist[i]->d_name);
				continue;
			}
			sprintf(path, "%s/%s/proc/kallsyms",
				symbol_conf.guestmount,
				namelist[i]->d_name);
			ret = access(path, R_OK);
			if (ret) {
				pr_debug("Can't access file %s\n", path);
				goto failure;
			}
			machines__create_kernel_maps(machines, pid);
		}
failure:
		free(namelist);
	}

	return ret;
}

1242
void machines__destroy_kernel_maps(struct machines *machines)
1243
{
1244
	struct rb_node *next = rb_first_cached(&machines->guests);
1245 1246

	machine__destroy_kernel_maps(&machines->host);
1247 1248 1249 1250 1251

	while (next) {
		struct machine *pos = rb_entry(next, struct machine, rb_node);

		next = rb_next(&pos->rb_node);
1252
		rb_erase_cached(&pos->rb_node, &machines->guests);
1253 1254 1255 1256
		machine__delete(pos);
	}
}

1257
int machines__create_kernel_maps(struct machines *machines, pid_t pid)
1258 1259 1260 1261 1262 1263 1264 1265 1266
{
	struct machine *machine = machines__findnew(machines, pid);

	if (machine == NULL)
		return -1;

	return machine__create_kernel_maps(machine);
}

1267
int machine__load_kallsyms(struct machine *machine, const char *filename)
1268
{
1269
	struct map *map = machine__kernel_map(machine);
1270
	int ret = __dso__load_kallsyms(map->dso, filename, map, true);
1271 1272

	if (ret > 0) {
1273
		dso__set_loaded(map->dso);
1274 1275 1276 1277 1278
		/*
		 * Since /proc/kallsyms will have multiple sessions for the
		 * kernel, with modules between them, fixup the end of all
		 * sections.
		 */
1279
		maps__fixup_end(&machine->kmaps);
1280 1281 1282 1283 1284
	}

	return ret;
}

1285
int machine__load_vmlinux_path(struct machine *machine)
1286
{
1287
	struct map *map = machine__kernel_map(machine);
1288
	int ret = dso__load_vmlinux_path(map->dso, map);
1289

1290
	if (ret > 0)
1291
		dso__set_loaded(map->dso);
1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309

	return ret;
}

static char *get_kernel_version(const char *root_dir)
{
	char version[PATH_MAX];
	FILE *file;
	char *name, *tmp;
	const char *prefix = "Linux version ";

	sprintf(version, "%s/proc/version", root_dir);
	file = fopen(version, "r");
	if (!file)
		return NULL;

	tmp = fgets(version, sizeof(version), file);
	fclose(file);
1310 1311
	if (!tmp)
		return NULL;
1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323

	name = strstr(version, prefix);
	if (!name)
		return NULL;
	name += strlen(prefix);
	tmp = strchr(name, ' ');
	if (tmp)
		*tmp = '\0';

	return strdup(name);
}

1324 1325 1326 1327 1328 1329
static bool is_kmod_dso(struct dso *dso)
{
	return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
	       dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE;
}

1330
static int maps__set_module_path(struct maps *maps, const char *path, struct kmod_path *m)
1331 1332
{
	char *long_name;
1333
	struct map *map = maps__find_by_name(maps, m->name);
1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348

	if (map == NULL)
		return 0;

	long_name = strdup(path);
	if (long_name == NULL)
		return -ENOMEM;

	dso__set_long_name(map->dso, long_name, true);
	dso__kernel_module_get_build_id(map->dso, "");

	/*
	 * Full name could reveal us kmod compression, so
	 * we need to update the symtab_type if needed.
	 */
1349
	if (m->comp && is_kmod_dso(map->dso)) {
1350
		map->dso->symtab_type++;
1351 1352
		map->dso->comp = m->comp;
	}
1353 1354 1355 1356

	return 0;
}

1357
static int maps__set_modules_path_dir(struct maps *maps, const char *dir_name, int depth)
1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381
{
	struct dirent *dent;
	DIR *dir = opendir(dir_name);
	int ret = 0;

	if (!dir) {
		pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
		return -1;
	}

	while ((dent = readdir(dir)) != NULL) {
		char path[PATH_MAX];
		struct stat st;

		/*sshfs might return bad dent->d_type, so we have to stat*/
		snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name);
		if (stat(path, &st))
			continue;

		if (S_ISDIR(st.st_mode)) {
			if (!strcmp(dent->d_name, ".") ||
			    !strcmp(dent->d_name, ".."))
				continue;

1382 1383 1384 1385 1386 1387 1388
			/* Do not follow top-level source and build symlinks */
			if (depth == 0) {
				if (!strcmp(dent->d_name, "source") ||
				    !strcmp(dent->d_name, "build"))
					continue;
			}

1389
			ret = maps__set_modules_path_dir(maps, path, depth + 1);
1390 1391 1392
			if (ret < 0)
				goto out;
		} else {
1393
			struct kmod_path m;
1394

1395 1396 1397
			ret = kmod_path__parse_name(&m, dent->d_name);
			if (ret)
				goto out;
1398

1399
			if (m.kmod)
1400
				ret = maps__set_module_path(maps, path, &m);
1401

1402
			zfree(&m.name);
1403

1404
			if (ret)
1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422
				goto out;
		}
	}

out:
	closedir(dir);
	return ret;
}

static int machine__set_modules_path(struct machine *machine)
{
	char *version;
	char modules_path[PATH_MAX];

	version = get_kernel_version(machine->root_dir);
	if (!version)
		return -1;

1423
	snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s",
1424 1425 1426
		 machine->root_dir, version);
	free(version);

1427
	return maps__set_modules_path_dir(&machine->kmaps, modules_path, 0);
1428
}
1429
int __weak arch__fix_module_text_start(u64 *start __maybe_unused,
1430
				u64 *size __maybe_unused,
1431 1432 1433 1434
				const char *name __maybe_unused)
{
	return 0;
}
1435

1436 1437
static int machine__create_module(void *arg, const char *name, u64 start,
				  u64 size)
1438
{
1439
	struct machine *machine = arg;
1440
	struct map *map;
1441

1442
	if (arch__fix_module_text_start(&start, &size, name) < 0)
1443 1444
		return -1;

1445
	map = machine__addnew_module_map(machine, start, name);
1446 1447
	if (map == NULL)
		return -1;
1448
	map->end = start + size;
1449 1450 1451 1452 1453 1454 1455 1456

	dso__kernel_module_get_build_id(map->dso, machine->root_dir);

	return 0;
}

static int machine__create_modules(struct machine *machine)
{
1457 1458 1459
	const char *modules;
	char path[PATH_MAX];

1460
	if (machine__is_default_guest(machine)) {
1461
		modules = symbol_conf.default_guest_modules;
1462 1463
	} else {
		snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir);
1464 1465 1466
		modules = path;
	}

1467
	if (symbol__restricted_filename(modules, "/proc/modules"))
1468 1469
		return -1;

1470
	if (modules__parse(modules, machine, machine__create_module))
1471 1472
		return -1;

1473 1474
	if (!machine__set_modules_path(machine))
		return 0;
1475

1476
	pr_debug("Problems setting modules path maps, continuing anyway...\n");
1477

1478
	return 0;
1479 1480
}

1481 1482 1483
static void machine__set_kernel_mmap(struct machine *machine,
				     u64 start, u64 end)
{
1484 1485 1486 1487 1488 1489 1490 1491
	machine->vmlinux_map->start = start;
	machine->vmlinux_map->end   = end;
	/*
	 * Be a bit paranoid here, some perf.data file came with
	 * a zero sized synthesized MMAP event for the kernel.
	 */
	if (start == 0 && end == 0)
		machine->vmlinux_map->end = ~0ULL;
1492 1493
}

1494 1495 1496 1497 1498 1499
static void machine__update_kernel_mmap(struct machine *machine,
				     u64 start, u64 end)
{
	struct map *map = machine__kernel_map(machine);

	map__get(map);
1500
	maps__remove(&machine->kmaps, map);
1501 1502 1503

	machine__set_kernel_mmap(machine, start, end);

1504
	maps__insert(&machine->kmaps, map);
1505 1506 1507
	map__put(map);
}

1508 1509 1510
int machine__create_kernel_maps(struct machine *machine)
{
	struct dso *kernel = machine__get_kernel(machine);
1511
	const char *name = NULL;
1512
	struct map *map;
1513
	u64 start = 0, end = ~0ULL;
1514 1515
	int ret;

1516
	if (kernel == NULL)
1517
		return -1;
1518

1519 1520
	ret = __machine__create_kernel_maps(machine, kernel);
	if (ret < 0)
1521
		goto out_put;
1522 1523 1524 1525 1526 1527 1528 1529 1530 1531

	if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
		if (machine__is_host(machine))
			pr_debug("Problems creating module maps, "
				 "continuing anyway...\n");
		else
			pr_debug("Problems creating module maps for guest %d, "
				 "continuing anyway...\n", machine->pid);
	}

1532
	if (!machine__get_running_kernel_start(machine, &name, &start, &end)) {
1533
		if (name &&
1534
		    map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map, name, start)) {
1535
			machine__destroy_kernel_maps(machine);
1536 1537
			ret = -1;
			goto out_put;
1538
		}
1539

1540 1541 1542 1543
		/*
		 * we have a real start address now, so re-order the kmaps
		 * assume it's the last in the kmaps
		 */
1544
		machine__update_kernel_mmap(machine, start, end);
1545 1546
	}

1547 1548 1549
	if (machine__create_extra_kernel_maps(machine, kernel))
		pr_debug("Problems creating extra kernel maps, continuing anyway...\n");

1550 1551 1552 1553 1554 1555 1556
	if (end == ~0ULL) {
		/* update end address of the kernel map using adjacent module address */
		map = map__next(machine__kernel_map(machine));
		if (map)
			machine__set_kernel_mmap(machine, start, map->start);
	}

1557 1558 1559
out_put:
	dso__put(kernel);
	return ret;
1560 1561
}

1562 1563 1564 1565
static bool machine__uses_kcore(struct machine *machine)
{
	struct dso *dso;

1566
	list_for_each_entry(dso, &machine->dsos.head, node) {
1567 1568 1569 1570 1571 1572 1573
		if (dso__is_kcore(dso))
			return true;
	}

	return false;
}

1574 1575 1576 1577 1578 1579 1580 1581 1582 1583
static bool perf_event__is_extra_kernel_mmap(struct machine *machine,
					     union perf_event *event)
{
	return machine__is(machine, "x86_64") &&
	       is_entry_trampoline(event->mmap.filename);
}

static int machine__process_extra_kernel_map(struct machine *machine,
					     union perf_event *event)
{
1584
	struct dso *kernel = machine__kernel_dso(machine);
1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598
	struct extra_kernel_map xm = {
		.start = event->mmap.start,
		.end   = event->mmap.start + event->mmap.len,
		.pgoff = event->mmap.pgoff,
	};

	if (kernel == NULL)
		return -1;

	strlcpy(xm.name, event->mmap.filename, KMAP_NAME_LEN);

	return machine__create_extra_kernel_map(machine, kernel, &xm);
}

1599 1600 1601 1602 1603 1604 1605
static int machine__process_kernel_mmap_event(struct machine *machine,
					      union perf_event *event)
{
	struct map *map;
	enum dso_kernel_type kernel_type;
	bool is_kernel_mmap;

1606 1607 1608 1609
	/* If we have maps from kcore then we do not need or want any others */
	if (machine__uses_kcore(machine))
		return 0;

1610 1611 1612 1613 1614 1615
	if (machine__is_host(machine))
		kernel_type = DSO_TYPE_KERNEL;
	else
		kernel_type = DSO_TYPE_GUEST_KERNEL;

	is_kernel_mmap = memcmp(event->mmap.filename,
1616 1617
				machine->mmap_name,
				strlen(machine->mmap_name) - 1) == 0;
1618 1619
	if (event->mmap.filename[0] == '/' ||
	    (!is_kernel_mmap && event->mmap.filename[0] == '[')) {
1620 1621
		map = machine__addnew_module_map(machine, event->mmap.start,
						 event->mmap.filename);
1622 1623 1624 1625 1626 1627
		if (map == NULL)
			goto out_problem;

		map->end = map->start + event->mmap.len;
	} else if (is_kernel_mmap) {
		const char *symbol_name = (event->mmap.filename +
1628
				strlen(machine->mmap_name));
1629 1630 1631 1632
		/*
		 * Should be there already, from the build-id table in
		 * the header.
		 */
1633 1634 1635
		struct dso *kernel = NULL;
		struct dso *dso;

1636
		down_read(&machine->dsos.lock);
1637

1638
		list_for_each_entry(dso, &machine->dsos.head, node) {
1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658

			/*
			 * The cpumode passed to is_kernel_module is not the
			 * cpumode of *this* event. If we insist on passing
			 * correct cpumode to is_kernel_module, we should
			 * record the cpumode when we adding this dso to the
			 * linked list.
			 *
			 * However we don't really need passing correct
			 * cpumode.  We know the correct cpumode must be kernel
			 * mode (if not, we should not link it onto kernel_dsos
			 * list).
			 *
			 * Therefore, we pass PERF_RECORD_MISC_CPUMODE_UNKNOWN.
			 * is_kernel_module() treats it as a kernel cpumode.
			 */

			if (!dso->kernel ||
			    is_kernel_module(dso->long_name,
					     PERF_RECORD_MISC_CPUMODE_UNKNOWN))
1659 1660
				continue;

1661

1662 1663 1664 1665
			kernel = dso;
			break;
		}

1666
		up_read(&machine->dsos.lock);
1667

1668
		if (kernel == NULL)
1669
			kernel = machine__findnew_dso(machine, machine->mmap_name);
1670 1671 1672 1673
		if (kernel == NULL)
			goto out_problem;

		kernel->kernel = kernel_type;
1674 1675
		if (__machine__create_kernel_maps(machine, kernel) < 0) {
			dso__put(kernel);
1676
			goto out_problem;
1677
		}
1678

1679 1680
		if (strstr(kernel->long_name, "vmlinux"))
			dso__set_short_name(kernel, "[kernel.vmlinux]", false);
1681

1682
		machine__update_kernel_mmap(machine, event->mmap.start,
1683
					 event->mmap.start + event->mmap.len);
1684 1685 1686 1687 1688 1689 1690

		/*
		 * Avoid using a zero address (kptr_restrict) for the ref reloc
		 * symbol. Effectively having zero here means that at record
		 * time /proc/sys/kernel/kptr_restrict was non zero.
		 */
		if (event->mmap.pgoff != 0) {
1691 1692 1693
			map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map,
							symbol_name,
							event->mmap.pgoff);
1694 1695 1696 1697 1698 1699
		}

		if (machine__is_default_guest(machine)) {
			/*
			 * preload dso of guest kernel and modules
			 */
1700
			dso__load(kernel, machine__kernel_map(machine));
1701
		}
1702 1703
	} else if (perf_event__is_extra_kernel_mmap(machine, event)) {
		return machine__process_extra_kernel_map(machine, event);
1704 1705 1706 1707 1708 1709
	}
	return 0;
out_problem:
	return -1;
}

1710
int machine__process_mmap2_event(struct machine *machine,
1711
				 union perf_event *event,
1712
				 struct perf_sample *sample)
1713 1714 1715
{
	struct thread *thread;
	struct map *map;
1716 1717 1718 1719 1720 1721
	struct dso_id dso_id = {
		.maj = event->mmap2.maj,
		.min = event->mmap2.min,
		.ino = event->mmap2.ino,
		.ino_generation = event->mmap2.ino_generation,
	};
1722 1723 1724 1725 1726
	int ret = 0;

	if (dump_trace)
		perf_event__fprintf_mmap2(event, stdout);

1727 1728
	if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
	    sample->cpumode == PERF_RECORD_MISC_KERNEL) {
1729 1730 1731 1732 1733 1734 1735
		ret = machine__process_kernel_mmap_event(machine, event);
		if (ret < 0)
			goto out_problem;
		return 0;
	}

	thread = machine__findnew_thread(machine, event->mmap2.pid,
1736
					event->mmap2.tid);
1737 1738 1739
	if (thread == NULL)
		goto out_problem;

1740
	map = map__new(machine, event->mmap2.start,
1741
			event->mmap2.len, event->mmap2.pgoff,
1742
			&dso_id, event->mmap2.prot,
1743
			event->mmap2.flags,
1744
			event->mmap2.filename, thread);
1745 1746

	if (map == NULL)
1747
		goto out_problem_map;
1748

1749 1750 1751 1752
	ret = thread__insert_map(thread, map);
	if (ret)
		goto out_problem_insert;

1753
	thread__put(thread);
1754
	map__put(map);
1755 1756
	return 0;

1757 1758
out_problem_insert:
	map__put(map);
1759 1760
out_problem_map:
	thread__put(thread);
1761 1762 1763 1764 1765
out_problem:
	dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
	return 0;
}

1766
int machine__process_mmap_event(struct machine *machine, union perf_event *event,
1767
				struct perf_sample *sample)
1768 1769 1770
{
	struct thread *thread;
	struct map *map;
1771
	u32 prot = 0;
1772 1773 1774 1775 1776
	int ret = 0;

	if (dump_trace)
		perf_event__fprintf_mmap(event, stdout);

1777 1778
	if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
	    sample->cpumode == PERF_RECORD_MISC_KERNEL) {
1779 1780 1781 1782 1783 1784
		ret = machine__process_kernel_mmap_event(machine, event);
		if (ret < 0)
			goto out_problem;
		return 0;
	}

1785
	thread = machine__findnew_thread(machine, event->mmap.pid,
1786
					 event->mmap.tid);
1787 1788
	if (thread == NULL)
		goto out_problem;
1789

1790
	if (!(event->header.misc & PERF_RECORD_MISC_MMAP_DATA))
1791
		prot = PROT_EXEC;
1792

1793
	map = map__new(machine, event->mmap.start,
1794
			event->mmap.len, event->mmap.pgoff,
1795
			NULL, prot, 0, event->mmap.filename, thread);
1796

1797
	if (map == NULL)
1798
		goto out_problem_map;
1799

1800 1801 1802 1803
	ret = thread__insert_map(thread, map);
	if (ret)
		goto out_problem_insert;

1804
	thread__put(thread);
1805
	map__put(map);
1806 1807
	return 0;

1808 1809
out_problem_insert:
	map__put(map);
1810 1811
out_problem_map:
	thread__put(thread);
1812 1813 1814 1815 1816
out_problem:
	dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
	return 0;
}

1817
static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock)
1818
{
1819 1820 1821
	struct threads *threads = machine__threads(machine, th->tid);

	if (threads->last_match == th)
1822
		threads__set_last_match(threads, NULL);
1823

1824
	if (lock)
1825
		down_write(&threads->lock);
1826 1827 1828

	BUG_ON(refcount_read(&th->refcnt) == 0);

1829
	rb_erase_cached(&th->rb_node, &threads->entries);
1830
	RB_CLEAR_NODE(&th->rb_node);
1831
	--threads->nr;
1832
	/*
1833 1834 1835
	 * Move it first to the dead_threads list, then drop the reference,
	 * if this is the last reference, then the thread__delete destructor
	 * will be called and we will remove it from the dead_threads list.
1836
	 */
1837
	list_add_tail(&th->node, &threads->dead);
1838 1839 1840 1841 1842 1843 1844 1845

	/*
	 * We need to do the put here because if this is the last refcount,
	 * then we will be touching the threads->dead head when removing the
	 * thread.
	 */
	thread__put(th);

1846
	if (lock)
1847
		up_write(&threads->lock);
1848 1849
}

1850 1851 1852 1853 1854
void machine__remove_thread(struct machine *machine, struct thread *th)
{
	return __machine__remove_thread(machine, th, true);
}

1855 1856
int machine__process_fork_event(struct machine *machine, union perf_event *event,
				struct perf_sample *sample)
1857
{
1858 1859 1860
	struct thread *thread = machine__find_thread(machine,
						     event->fork.pid,
						     event->fork.tid);
1861 1862 1863
	struct thread *parent = machine__findnew_thread(machine,
							event->fork.ppid,
							event->fork.ptid);
1864
	bool do_maps_clone = true;
1865
	int err = 0;
1866

1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884
	if (dump_trace)
		perf_event__fprintf_task(event, stdout);

	/*
	 * There may be an existing thread that is not actually the parent,
	 * either because we are processing events out of order, or because the
	 * (fork) event that would have removed the thread was lost. Assume the
	 * latter case and continue on as best we can.
	 */
	if (parent->pid_ != (pid_t)event->fork.ppid) {
		dump_printf("removing erroneous parent thread %d/%d\n",
			    parent->pid_, parent->tid);
		machine__remove_thread(machine, parent);
		thread__put(parent);
		parent = machine__findnew_thread(machine, event->fork.ppid,
						 event->fork.ptid);
	}

1885
	/* if a thread currently exists for the thread id remove it */
1886
	if (thread != NULL) {
1887
		machine__remove_thread(machine, thread);
1888 1889
		thread__put(thread);
	}
1890

1891 1892
	thread = machine__findnew_thread(machine, event->fork.pid,
					 event->fork.tid);
1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908
	/*
	 * When synthesizing FORK events, we are trying to create thread
	 * objects for the already running tasks on the machine.
	 *
	 * Normally, for a kernel FORK event, we want to clone the parent's
	 * maps because that is what the kernel just did.
	 *
	 * But when synthesizing, this should not be done.  If we do, we end up
	 * with overlapping maps as we process the sythesized MMAP2 events that
	 * get delivered shortly thereafter.
	 *
	 * Use the FORK event misc flags in an internal way to signal this
	 * situation, so we can elide the map clone when appropriate.
	 */
	if (event->fork.header.misc & PERF_RECORD_MISC_FORK_EXEC)
		do_maps_clone = false;
1909 1910

	if (thread == NULL || parent == NULL ||
1911
	    thread__fork(thread, parent, sample->time, do_maps_clone) < 0) {
1912
		dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
1913
		err = -1;
1914
	}
1915 1916
	thread__put(thread);
	thread__put(parent);
1917

1918
	return err;
1919 1920
}

1921 1922
int machine__process_exit_event(struct machine *machine, union perf_event *event,
				struct perf_sample *sample __maybe_unused)
1923
{
1924 1925 1926
	struct thread *thread = machine__find_thread(machine,
						     event->fork.pid,
						     event->fork.tid);
1927 1928 1929 1930

	if (dump_trace)
		perf_event__fprintf_task(event, stdout);

1931
	if (thread != NULL) {
1932
		thread__exited(thread);
1933 1934
		thread__put(thread);
	}
1935 1936 1937 1938

	return 0;
}

1939 1940
int machine__process_event(struct machine *machine, union perf_event *event,
			   struct perf_sample *sample)
1941 1942 1943 1944 1945
{
	int ret;

	switch (event->header.type) {
	case PERF_RECORD_COMM:
1946
		ret = machine__process_comm_event(machine, event, sample); break;
1947
	case PERF_RECORD_MMAP:
1948
		ret = machine__process_mmap_event(machine, event, sample); break;
1949 1950
	case PERF_RECORD_NAMESPACES:
		ret = machine__process_namespaces_event(machine, event, sample); break;
1951 1952
	case PERF_RECORD_CGROUP:
		ret = machine__process_cgroup_event(machine, event, sample); break;
1953
	case PERF_RECORD_MMAP2:
1954
		ret = machine__process_mmap2_event(machine, event, sample); break;
1955
	case PERF_RECORD_FORK:
1956
		ret = machine__process_fork_event(machine, event, sample); break;
1957
	case PERF_RECORD_EXIT:
1958
		ret = machine__process_exit_event(machine, event, sample); break;
1959
	case PERF_RECORD_LOST:
1960
		ret = machine__process_lost_event(machine, event, sample); break;
1961 1962
	case PERF_RECORD_AUX:
		ret = machine__process_aux_event(machine, event); break;
1963
	case PERF_RECORD_ITRACE_START:
1964
		ret = machine__process_itrace_start_event(machine, event); break;
1965 1966
	case PERF_RECORD_LOST_SAMPLES:
		ret = machine__process_lost_samples_event(machine, event, sample); break;
1967 1968 1969
	case PERF_RECORD_SWITCH:
	case PERF_RECORD_SWITCH_CPU_WIDE:
		ret = machine__process_switch_event(machine, event); break;
1970 1971
	case PERF_RECORD_KSYMBOL:
		ret = machine__process_ksymbol(machine, event, sample); break;
1972
	case PERF_RECORD_BPF_EVENT:
1973
		ret = machine__process_bpf(machine, event, sample); break;
1974 1975
	case PERF_RECORD_TEXT_POKE:
		ret = machine__process_text_poke(machine, event, sample); break;
1976 1977 1978 1979 1980 1981 1982
	default:
		ret = -1;
		break;
	}

	return ret;
}
1983

1984
static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
1985
{
1986
	if (!regexec(regex, sym->name, 0, NULL, 0))
1987 1988 1989 1990
		return 1;
	return 0;
}

1991
static void ip__resolve_ams(struct thread *thread,
1992 1993 1994 1995 1996 1997
			    struct addr_map_symbol *ams,
			    u64 ip)
{
	struct addr_location al;

	memset(&al, 0, sizeof(al));
1998 1999 2000 2001 2002 2003 2004
	/*
	 * We cannot use the header.misc hint to determine whether a
	 * branch stack address is user, kernel, guest, hypervisor.
	 * Branches may straddle the kernel/user/hypervisor boundaries.
	 * Thus, we have to try consecutively until we find a match
	 * or else, the symbol is unknown
	 */
2005
	thread__find_cpumode_addr_location(thread, ip, &al);
2006 2007 2008

	ams->addr = ip;
	ams->al_addr = al.addr;
2009
	ams->ms.maps = al.maps;
2010 2011
	ams->ms.sym = al.sym;
	ams->ms.map = al.map;
2012
	ams->phys_addr = 0;
2013 2014
}

2015
static void ip__resolve_data(struct thread *thread,
2016 2017
			     u8 m, struct addr_map_symbol *ams,
			     u64 addr, u64 phys_addr)
2018 2019 2020 2021 2022
{
	struct addr_location al;

	memset(&al, 0, sizeof(al));

2023
	thread__find_symbol(thread, m, addr, &al);
2024

2025 2026
	ams->addr = addr;
	ams->al_addr = al.addr;
2027
	ams->ms.maps = al.maps;
2028 2029
	ams->ms.sym = al.sym;
	ams->ms.map = al.map;
2030
	ams->phys_addr = phys_addr;
2031 2032
}

2033 2034
struct mem_info *sample__resolve_mem(struct perf_sample *sample,
				     struct addr_location *al)
2035
{
2036
	struct mem_info *mi = mem_info__new();
2037 2038 2039 2040

	if (!mi)
		return NULL;

2041
	ip__resolve_ams(al->thread, &mi->iaddr, sample->ip);
2042 2043
	ip__resolve_data(al->thread, al->cpumode, &mi->daddr,
			 sample->addr, sample->phys_addr);
2044 2045 2046 2047 2048
	mi->data_src.val = sample->data_src;

	return mi;
}

2049
static char *callchain_srcline(struct map_symbol *ms, u64 ip)
2050
{
2051
	struct map *map = ms->map;
2052 2053
	char *srcline = NULL;

2054
	if (!map || callchain_param.key == CCKEY_FUNCTION)
2055 2056 2057 2058 2059 2060 2061 2062
		return srcline;

	srcline = srcline__tree_find(&map->dso->srclines, ip);
	if (!srcline) {
		bool show_sym = false;
		bool show_addr = callchain_param.key == CCKEY_ADDRESS;

		srcline = get_srcline(map->dso, map__rip_2objdump(map, ip),
2063
				      ms->sym, show_sym, show_addr, ip);
2064 2065
		srcline__tree_insert(&map->dso->srclines, ip, srcline);
	}
2066

2067
	return srcline;
2068 2069
}

2070 2071 2072 2073 2074
struct iterations {
	int nr_loop_iter;
	u64 cycles;
};

2075
static int add_callchain_ip(struct thread *thread,
2076
			    struct callchain_cursor *cursor,
2077 2078
			    struct symbol **parent,
			    struct addr_location *root_al,
2079
			    u8 *cpumode,
2080 2081 2082
			    u64 ip,
			    bool branch,
			    struct branch_flags *flags,
2083
			    struct iterations *iter,
2084
			    u64 branch_from)
2085
{
2086
	struct map_symbol ms;
2087
	struct addr_location al;
2088 2089
	int nr_loop_iter = 0;
	u64 iter_cycles = 0;
2090
	const char *srcline = NULL;
2091 2092 2093

	al.filtered = 0;
	al.sym = NULL;
2094
	if (!cpumode) {
2095
		thread__find_cpumode_addr_location(thread, ip, &al);
2096
	} else {
2097 2098 2099
		if (ip >= PERF_CONTEXT_MAX) {
			switch (ip) {
			case PERF_CONTEXT_HV:
2100
				*cpumode = PERF_RECORD_MISC_HYPERVISOR;
2101 2102
				break;
			case PERF_CONTEXT_KERNEL:
2103
				*cpumode = PERF_RECORD_MISC_KERNEL;
2104 2105
				break;
			case PERF_CONTEXT_USER:
2106
				*cpumode = PERF_RECORD_MISC_USER;
2107 2108 2109 2110 2111 2112 2113 2114
				break;
			default:
				pr_debug("invalid callchain context: "
					 "%"PRId64"\n", (s64) ip);
				/*
				 * It seems the callchain is corrupted.
				 * Discard all.
				 */
2115
				callchain_cursor_reset(cursor);
2116 2117 2118 2119
				return 1;
			}
			return 0;
		}
2120
		thread__find_symbol(thread, *cpumode, ip, &al);
2121 2122
	}

2123
	if (al.sym != NULL) {
2124
		if (perf_hpp_list.parent && !*parent &&
2125 2126 2127 2128 2129 2130 2131
		    symbol__match_regex(al.sym, &parent_regex))
			*parent = al.sym;
		else if (have_ignore_callees && root_al &&
		  symbol__match_regex(al.sym, &ignore_callees_regex)) {
			/* Treat this symbol as the root,
			   forgetting its callees. */
			*root_al = al;
2132
			callchain_cursor_reset(cursor);
2133 2134 2135
		}
	}

2136 2137
	if (symbol_conf.hide_unresolved && al.sym == NULL)
		return 0;
2138 2139 2140 2141 2142 2143

	if (iter) {
		nr_loop_iter = iter->nr_loop_iter;
		iter_cycles = iter->cycles;
	}

2144
	ms.maps = al.maps;
2145 2146 2147 2148
	ms.map = al.map;
	ms.sym = al.sym;
	srcline = callchain_srcline(&ms, al.addr);
	return callchain_cursor_append(cursor, ip, &ms,
2149
				       branch, flags, nr_loop_iter,
2150
				       iter_cycles, branch_from, srcline);
2151 2152
}

2153 2154
struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
					   struct addr_location *al)
2155 2156
{
	unsigned int i;
2157
	const struct branch_stack *bs = sample->branch_stack;
2158
	struct branch_entry *entries = perf_sample__branch_entries(sample);
2159
	struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info));
2160 2161 2162 2163 2164

	if (!bi)
		return NULL;

	for (i = 0; i < bs->nr; i++) {
2165 2166 2167
		ip__resolve_ams(al->thread, &bi[i].to, entries[i].to);
		ip__resolve_ams(al->thread, &bi[i].from, entries[i].from);
		bi[i].flags = entries[i].flags;
2168 2169 2170 2171
	}
	return bi;
}

2172 2173 2174 2175 2176
static void save_iterations(struct iterations *iter,
			    struct branch_entry *be, int nr)
{
	int i;

2177
	iter->nr_loop_iter++;
2178 2179 2180 2181 2182 2183
	iter->cycles = 0;

	for (i = 0; i < nr; i++)
		iter->cycles += be[i].flags.cycles;
}

2184 2185 2186 2187 2188 2189 2190
#define CHASHSZ 127
#define CHASHBITS 7
#define NO_ENTRY 0xff

#define PERF_MAX_BRANCH_DEPTH 127

/* Remove loops. */
2191 2192
static int remove_loops(struct branch_entry *l, int nr,
			struct iterations *iter)
2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216
{
	int i, j, off;
	unsigned char chash[CHASHSZ];

	memset(chash, NO_ENTRY, sizeof(chash));

	BUG_ON(PERF_MAX_BRANCH_DEPTH > 255);

	for (i = 0; i < nr; i++) {
		int h = hash_64(l[i].from, CHASHBITS) % CHASHSZ;

		/* no collision handling for now */
		if (chash[h] == NO_ENTRY) {
			chash[h] = i;
		} else if (l[chash[h]].from == l[i].from) {
			bool is_loop = true;
			/* check if it is a real loop */
			off = 0;
			for (j = chash[h]; j < i && i + off < nr; j++, off++)
				if (l[j].from != l[i + off].from) {
					is_loop = false;
					break;
				}
			if (is_loop) {
2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228
				j = nr - (i + off);
				if (j > 0) {
					save_iterations(iter + i + off,
						l + i, off);

					memmove(iter + i, iter + i + off,
						j * sizeof(*iter));

					memmove(l + i, l + i + off,
						j * sizeof(*l));
				}

2229 2230 2231 2232 2233 2234 2235
				nr -= off;
			}
		}
	}
	return nr;
}

2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269
static int lbr_callchain_add_kernel_ip(struct thread *thread,
				       struct callchain_cursor *cursor,
				       struct perf_sample *sample,
				       struct symbol **parent,
				       struct addr_location *root_al,
				       u64 branch_from,
				       bool callee, int end)
{
	struct ip_callchain *chain = sample->callchain;
	u8 cpumode = PERF_RECORD_MISC_USER;
	int err, i;

	if (callee) {
		for (i = 0; i < end + 1; i++) {
			err = add_callchain_ip(thread, cursor, parent,
					       root_al, &cpumode, chain->ips[i],
					       false, NULL, NULL, branch_from);
			if (err)
				return err;
		}
		return 0;
	}

	for (i = end; i >= 0; i--) {
		err = add_callchain_ip(thread, cursor, parent,
				       root_al, &cpumode, chain->ips[i],
				       false, NULL, NULL, branch_from);
		if (err)
			return err;
	}

	return 0;
}

2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294
static void save_lbr_cursor_node(struct thread *thread,
				 struct callchain_cursor *cursor,
				 int idx)
{
	struct lbr_stitch *lbr_stitch = thread->lbr_stitch;

	if (!lbr_stitch)
		return;

	if (cursor->pos == cursor->nr) {
		lbr_stitch->prev_lbr_cursor[idx].valid = false;
		return;
	}

	if (!cursor->curr)
		cursor->curr = cursor->first;
	else
		cursor->curr = cursor->curr->next;
	memcpy(&lbr_stitch->prev_lbr_cursor[idx], cursor->curr,
	       sizeof(struct callchain_cursor_node));

	lbr_stitch->prev_lbr_cursor[idx].valid = true;
	cursor->pos++;
}

2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310
static int lbr_callchain_add_lbr_ip(struct thread *thread,
				    struct callchain_cursor *cursor,
				    struct perf_sample *sample,
				    struct symbol **parent,
				    struct addr_location *root_al,
				    u64 *branch_from,
				    bool callee)
{
	struct branch_stack *lbr_stack = sample->branch_stack;
	struct branch_entry *entries = perf_sample__branch_entries(sample);
	u8 cpumode = PERF_RECORD_MISC_USER;
	int lbr_nr = lbr_stack->nr;
	struct branch_flags *flags;
	int err, i;
	u64 ip;

2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325
	/*
	 * The curr and pos are not used in writing session. They are cleared
	 * in callchain_cursor_commit() when the writing session is closed.
	 * Using curr and pos to track the current cursor node.
	 */
	if (thread->lbr_stitch) {
		cursor->curr = NULL;
		cursor->pos = cursor->nr;
		if (cursor->nr) {
			cursor->curr = cursor->first;
			for (i = 0; i < (int)(cursor->nr - 1); i++)
				cursor->curr = cursor->curr->next;
		}
	}

2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337
	if (callee) {
		/* Add LBR ip from first entries.to */
		ip = entries[0].to;
		flags = &entries[0].flags;
		*branch_from = entries[0].from;
		err = add_callchain_ip(thread, cursor, parent,
				       root_al, &cpumode, ip,
				       true, flags, NULL,
				       *branch_from);
		if (err)
			return err;

2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351
		/*
		 * The number of cursor node increases.
		 * Move the current cursor node.
		 * But does not need to save current cursor node for entry 0.
		 * It's impossible to stitch the whole LBRs of previous sample.
		 */
		if (thread->lbr_stitch && (cursor->pos != cursor->nr)) {
			if (!cursor->curr)
				cursor->curr = cursor->first;
			else
				cursor->curr = cursor->curr->next;
			cursor->pos++;
		}

2352 2353 2354 2355 2356 2357 2358 2359 2360 2361
		/* Add LBR ip from entries.from one by one. */
		for (i = 0; i < lbr_nr; i++) {
			ip = entries[i].from;
			flags = &entries[i].flags;
			err = add_callchain_ip(thread, cursor, parent,
					       root_al, &cpumode, ip,
					       true, flags, NULL,
					       *branch_from);
			if (err)
				return err;
2362
			save_lbr_cursor_node(thread, cursor, i);
2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376
		}
		return 0;
	}

	/* Add LBR ip from entries.from one by one. */
	for (i = lbr_nr - 1; i >= 0; i--) {
		ip = entries[i].from;
		flags = &entries[i].flags;
		err = add_callchain_ip(thread, cursor, parent,
				       root_al, &cpumode, ip,
				       true, flags, NULL,
				       *branch_from);
		if (err)
			return err;
2377
		save_lbr_cursor_node(thread, cursor, i);
2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393
	}

	/* Add LBR ip from first entries.to */
	ip = entries[0].to;
	flags = &entries[0].flags;
	*branch_from = entries[0].from;
	err = add_callchain_ip(thread, cursor, parent,
			       root_al, &cpumode, ip,
			       true, flags, NULL,
			       *branch_from);
	if (err)
		return err;

	return 0;
}

2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506
static int lbr_callchain_add_stitched_lbr_ip(struct thread *thread,
					     struct callchain_cursor *cursor)
{
	struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
	struct callchain_cursor_node *cnode;
	struct stitch_list *stitch_node;
	int err;

	list_for_each_entry(stitch_node, &lbr_stitch->lists, node) {
		cnode = &stitch_node->cursor;

		err = callchain_cursor_append(cursor, cnode->ip,
					      &cnode->ms,
					      cnode->branch,
					      &cnode->branch_flags,
					      cnode->nr_loop_iter,
					      cnode->iter_cycles,
					      cnode->branch_from,
					      cnode->srcline);
		if (err)
			return err;
	}
	return 0;
}

static struct stitch_list *get_stitch_node(struct thread *thread)
{
	struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
	struct stitch_list *stitch_node;

	if (!list_empty(&lbr_stitch->free_lists)) {
		stitch_node = list_first_entry(&lbr_stitch->free_lists,
					       struct stitch_list, node);
		list_del(&stitch_node->node);

		return stitch_node;
	}

	return malloc(sizeof(struct stitch_list));
}

static bool has_stitched_lbr(struct thread *thread,
			     struct perf_sample *cur,
			     struct perf_sample *prev,
			     unsigned int max_lbr,
			     bool callee)
{
	struct branch_stack *cur_stack = cur->branch_stack;
	struct branch_entry *cur_entries = perf_sample__branch_entries(cur);
	struct branch_stack *prev_stack = prev->branch_stack;
	struct branch_entry *prev_entries = perf_sample__branch_entries(prev);
	struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
	int i, j, nr_identical_branches = 0;
	struct stitch_list *stitch_node;
	u64 cur_base, distance;

	if (!cur_stack || !prev_stack)
		return false;

	/* Find the physical index of the base-of-stack for current sample. */
	cur_base = max_lbr - cur_stack->nr + cur_stack->hw_idx + 1;

	distance = (prev_stack->hw_idx > cur_base) ? (prev_stack->hw_idx - cur_base) :
						     (max_lbr + prev_stack->hw_idx - cur_base);
	/* Previous sample has shorter stack. Nothing can be stitched. */
	if (distance + 1 > prev_stack->nr)
		return false;

	/*
	 * Check if there are identical LBRs between two samples.
	 * Identicall LBRs must have same from, to and flags values. Also,
	 * they have to be saved in the same LBR registers (same physical
	 * index).
	 *
	 * Starts from the base-of-stack of current sample.
	 */
	for (i = distance, j = cur_stack->nr - 1; (i >= 0) && (j >= 0); i--, j--) {
		if ((prev_entries[i].from != cur_entries[j].from) ||
		    (prev_entries[i].to != cur_entries[j].to) ||
		    (prev_entries[i].flags.value != cur_entries[j].flags.value))
			break;
		nr_identical_branches++;
	}

	if (!nr_identical_branches)
		return false;

	/*
	 * Save the LBRs between the base-of-stack of previous sample
	 * and the base-of-stack of current sample into lbr_stitch->lists.
	 * These LBRs will be stitched later.
	 */
	for (i = prev_stack->nr - 1; i > (int)distance; i--) {

		if (!lbr_stitch->prev_lbr_cursor[i].valid)
			continue;

		stitch_node = get_stitch_node(thread);
		if (!stitch_node)
			return false;

		memcpy(&stitch_node->cursor, &lbr_stitch->prev_lbr_cursor[i],
		       sizeof(struct callchain_cursor_node));

		if (callee)
			list_add(&stitch_node->node, &lbr_stitch->lists);
		else
			list_add_tail(&stitch_node->node, &lbr_stitch->lists);
	}

	return true;
}

2507
static bool alloc_lbr_stitch(struct thread *thread, unsigned int max_lbr)
2508 2509 2510 2511 2512 2513 2514 2515
{
	if (thread->lbr_stitch)
		return true;

	thread->lbr_stitch = zalloc(sizeof(*thread->lbr_stitch));
	if (!thread->lbr_stitch)
		goto err;

2516 2517 2518 2519
	thread->lbr_stitch->prev_lbr_cursor = calloc(max_lbr + 1, sizeof(struct callchain_cursor_node));
	if (!thread->lbr_stitch->prev_lbr_cursor)
		goto free_lbr_stitch;

2520 2521 2522
	INIT_LIST_HEAD(&thread->lbr_stitch->lists);
	INIT_LIST_HEAD(&thread->lbr_stitch->free_lists);

2523 2524 2525 2526
	return true;

free_lbr_stitch:
	zfree(&thread->lbr_stitch);
2527 2528 2529 2530 2531 2532
err:
	pr_warning("Failed to allocate space for stitched LBRs. Disable LBR stitch\n");
	thread->lbr_stitch_enable = false;
	return false;
}

K
Kan Liang 已提交
2533 2534 2535 2536 2537 2538 2539 2540
/*
 * Recolve LBR callstack chain sample
 * Return:
 * 1 on success get LBR callchain information
 * 0 no available LBR callchain information, should try fp
 * negative error code on other errors.
 */
static int resolve_lbr_callchain_sample(struct thread *thread,
2541
					struct callchain_cursor *cursor,
K
Kan Liang 已提交
2542 2543 2544
					struct perf_sample *sample,
					struct symbol **parent,
					struct addr_location *root_al,
2545 2546
					int max_stack,
					unsigned int max_lbr)
2547
{
2548
	bool callee = (callchain_param.order == ORDER_CALLEE);
K
Kan Liang 已提交
2549
	struct ip_callchain *chain = sample->callchain;
2550
	int chain_nr = min(max_stack, (int)chain->nr), i;
2551
	struct lbr_stitch *lbr_stitch;
2552
	bool stitched_lbr = false;
2553
	u64 branch_from = 0;
2554
	int err;
K
Kan Liang 已提交
2555 2556 2557 2558 2559 2560 2561

	for (i = 0; i < chain_nr; i++) {
		if (chain->ips[i] == PERF_CONTEXT_USER)
			break;
	}

	/* LBR only affects the user callchain */
2562 2563 2564
	if (i == chain_nr)
		return 0;

2565
	if (thread->lbr_stitch_enable && !sample->no_hw_idx &&
2566
	    (max_lbr > 0) && alloc_lbr_stitch(thread, max_lbr)) {
2567 2568
		lbr_stitch = thread->lbr_stitch;

2569 2570 2571 2572 2573 2574 2575 2576
		stitched_lbr = has_stitched_lbr(thread, sample,
						&lbr_stitch->prev_sample,
						max_lbr, callee);

		if (!stitched_lbr && !list_empty(&lbr_stitch->lists)) {
			list_replace_init(&lbr_stitch->lists,
					  &lbr_stitch->free_lists);
		}
2577 2578 2579
		memcpy(&lbr_stitch->prev_sample, sample, sizeof(*sample));
	}

2580
	if (callee) {
2581
		/* Add kernel ip */
2582 2583 2584 2585 2586 2587
		err = lbr_callchain_add_kernel_ip(thread, cursor, sample,
						  parent, root_al, branch_from,
						  true, i);
		if (err)
			goto error;

2588 2589
		err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent,
					       root_al, &branch_from, true);
2590 2591
		if (err)
			goto error;
K
Kan Liang 已提交
2592

2593 2594 2595 2596 2597 2598
		if (stitched_lbr) {
			err = lbr_callchain_add_stitched_lbr_ip(thread, cursor);
			if (err)
				goto error;
		}

2599
	} else {
2600 2601 2602 2603 2604
		if (stitched_lbr) {
			err = lbr_callchain_add_stitched_lbr_ip(thread, cursor);
			if (err)
				goto error;
		}
2605 2606
		err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent,
					       root_al, &branch_from, false);
2607
		if (err)
2608 2609 2610
			goto error;

		/* Add kernel ip */
2611 2612 2613 2614 2615
		err = lbr_callchain_add_kernel_ip(thread, cursor, sample,
						  parent, root_al, branch_from,
						  false, i);
		if (err)
			goto error;
2616 2617
	}
	return 1;
2618 2619 2620

error:
	return (err < 0) ? err : 0;
K
Kan Liang 已提交
2621 2622
}

2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643
static int find_prev_cpumode(struct ip_callchain *chain, struct thread *thread,
			     struct callchain_cursor *cursor,
			     struct symbol **parent,
			     struct addr_location *root_al,
			     u8 *cpumode, int ent)
{
	int err = 0;

	while (--ent >= 0) {
		u64 ip = chain->ips[ent];

		if (ip >= PERF_CONTEXT_MAX) {
			err = add_callchain_ip(thread, cursor, parent,
					       root_al, cpumode, ip,
					       false, NULL, NULL, 0);
			break;
		}
	}
	return err;
}

K
Kan Liang 已提交
2644
static int thread__resolve_callchain_sample(struct thread *thread,
2645
					    struct callchain_cursor *cursor,
2646
					    struct evsel *evsel,
K
Kan Liang 已提交
2647 2648 2649 2650 2651 2652
					    struct perf_sample *sample,
					    struct symbol **parent,
					    struct addr_location *root_al,
					    int max_stack)
{
	struct branch_stack *branch = sample->branch_stack;
2653
	struct branch_entry *entries = perf_sample__branch_entries(sample);
K
Kan Liang 已提交
2654
	struct ip_callchain *chain = sample->callchain;
2655
	int chain_nr = 0;
2656
	u8 cpumode = PERF_RECORD_MISC_USER;
2657
	int i, j, err, nr_entries;
2658 2659 2660
	int skip_idx = -1;
	int first_call = 0;

2661 2662 2663
	if (chain)
		chain_nr = chain->nr;

2664
	if (evsel__has_branch_callstack(evsel)) {
2665
		struct perf_env *env = evsel__env(evsel);
2666

2667
		err = resolve_lbr_callchain_sample(thread, cursor, sample, parent,
2668 2669
						   root_al, max_stack,
						   !env ? 0 : env->max_branches);
K
Kan Liang 已提交
2670 2671 2672 2673
		if (err)
			return (err < 0) ? err : 0;
	}

2674 2675 2676 2677
	/*
	 * Based on DWARF debug information, some architectures skip
	 * a callchain entry saved by the kernel.
	 */
2678
	skip_idx = arch_skip_callchain_idx(thread, chain);
2679

2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694
	/*
	 * Add branches to call stack for easier browsing. This gives
	 * more context for a sample than just the callers.
	 *
	 * This uses individual histograms of paths compared to the
	 * aggregated histograms the normal LBR mode uses.
	 *
	 * Limitations for now:
	 * - No extra filters
	 * - No annotations (should annotate somehow)
	 */

	if (branch && callchain_param.branch_callstack) {
		int nr = min(max_stack, (int)branch->nr);
		struct branch_entry be[nr];
2695
		struct iterations iter[nr];
2696 2697 2698 2699 2700 2701 2702 2703

		if (branch->nr > PERF_MAX_BRANCH_DEPTH) {
			pr_warning("corrupted branch chain. skipping...\n");
			goto check_calls;
		}

		for (i = 0; i < nr; i++) {
			if (callchain_param.order == ORDER_CALLEE) {
2704
				be[i] = entries[i];
2705 2706 2707 2708

				if (chain == NULL)
					continue;

2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722
				/*
				 * Check for overlap into the callchain.
				 * The return address is one off compared to
				 * the branch entry. To adjust for this
				 * assume the calling instruction is not longer
				 * than 8 bytes.
				 */
				if (i == skip_idx ||
				    chain->ips[first_call] >= PERF_CONTEXT_MAX)
					first_call++;
				else if (be[i].from < chain->ips[first_call] &&
				    be[i].from >= chain->ips[first_call] - 8)
					first_call++;
			} else
2723
				be[i] = entries[branch->nr - i - 1];
2724 2725
		}

2726 2727
		memset(iter, 0, sizeof(struct iterations) * nr);
		nr = remove_loops(be, nr, iter);
2728

2729
		for (i = 0; i < nr; i++) {
2730 2731 2732 2733 2734
			err = add_callchain_ip(thread, cursor, parent,
					       root_al,
					       NULL, be[i].to,
					       true, &be[i].flags,
					       NULL, be[i].from);
2735

2736
			if (!err)
2737
				err = add_callchain_ip(thread, cursor, parent, root_al,
2738 2739
						       NULL, be[i].from,
						       true, &be[i].flags,
2740
						       &iter[i], 0);
2741 2742 2743 2744 2745
			if (err == -EINVAL)
				break;
			if (err)
				return err;
		}
2746 2747 2748 2749

		if (chain_nr == 0)
			return 0;

2750 2751 2752 2753
		chain_nr -= nr;
	}

check_calls:
2754
	if (chain && callchain_param.order != ORDER_CALLEE) {
2755 2756 2757 2758 2759
		err = find_prev_cpumode(chain, thread, cursor, parent, root_al,
					&cpumode, chain->nr - first_call);
		if (err)
			return (err < 0) ? err : 0;
	}
2760
	for (i = first_call, nr_entries = 0;
2761
	     i < chain_nr && nr_entries < max_stack; i++) {
2762 2763 2764
		u64 ip;

		if (callchain_param.order == ORDER_CALLEE)
2765
			j = i;
2766
		else
2767 2768 2769 2770 2771 2772 2773
			j = chain->nr - i - 1;

#ifdef HAVE_SKIP_CALLCHAIN_IDX
		if (j == skip_idx)
			continue;
#endif
		ip = chain->ips[j];
2774 2775
		if (ip < PERF_CONTEXT_MAX)
                       ++nr_entries;
2776 2777 2778 2779 2780 2781 2782
		else if (callchain_param.order != ORDER_CALLEE) {
			err = find_prev_cpumode(chain, thread, cursor, parent,
						root_al, &cpumode, j);
			if (err)
				return (err < 0) ? err : 0;
			continue;
		}
2783

2784 2785
		err = add_callchain_ip(thread, cursor, parent,
				       root_al, &cpumode, ip,
2786
				       false, NULL, NULL, 0);
2787 2788

		if (err)
2789
			return (err < 0) ? err : 0;
2790 2791 2792 2793 2794
	}

	return 0;
}

2795
static int append_inlines(struct callchain_cursor *cursor, struct map_symbol *ms, u64 ip)
2796
{
2797 2798
	struct symbol *sym = ms->sym;
	struct map *map = ms->map;
2799 2800 2801
	struct inline_node *inline_node;
	struct inline_list *ilist;
	u64 addr;
2802
	int ret = 1;
2803 2804

	if (!symbol_conf.inline_name || !map || !sym)
2805
		return ret;
2806

2807 2808
	addr = map__map_ip(map, ip);
	addr = map__rip_2objdump(map, addr);
2809 2810 2811 2812 2813

	inline_node = inlines__tree_find(&map->dso->inlined_nodes, addr);
	if (!inline_node) {
		inline_node = dso__parse_addr_inlines(map->dso, addr, sym);
		if (!inline_node)
2814
			return ret;
2815 2816 2817 2818
		inlines__tree_insert(&map->dso->inlined_nodes, inline_node);
	}

	list_for_each_entry(ilist, &inline_node->val, list) {
2819
		struct map_symbol ilist_ms = {
2820
			.maps = ms->maps,
2821 2822 2823 2824
			.map = map,
			.sym = ilist->symbol,
		};
		ret = callchain_cursor_append(cursor, ip, &ilist_ms, false,
2825
					      NULL, 0, 0, 0, ilist->srcline);
2826 2827 2828 2829 2830

		if (ret != 0)
			return ret;
	}

2831
	return ret;
2832 2833
}

2834 2835 2836
static int unwind_entry(struct unwind_entry *entry, void *arg)
{
	struct callchain_cursor *cursor = arg;
2837
	const char *srcline = NULL;
2838
	u64 addr = entry->ip;
2839

2840
	if (symbol_conf.hide_unresolved && entry->ms.sym == NULL)
2841
		return 0;
2842

2843
	if (append_inlines(cursor, &entry->ms, entry->ip) == 0)
2844 2845
		return 0;

2846 2847 2848 2849
	/*
	 * Convert entry->ip from a virtual address to an offset in
	 * its corresponding binary.
	 */
2850 2851
	if (entry->ms.map)
		addr = map__map_ip(entry->ms.map, entry->ip);
2852

2853 2854
	srcline = callchain_srcline(&entry->ms, addr);
	return callchain_cursor_append(cursor, entry->ip, &entry->ms,
2855
				       false, NULL, 0, 0, 0, srcline);
2856 2857
}

2858 2859
static int thread__resolve_callchain_unwind(struct thread *thread,
					    struct callchain_cursor *cursor,
2860
					    struct evsel *evsel,
2861 2862
					    struct perf_sample *sample,
					    int max_stack)
2863 2864
{
	/* Can we do dwarf post unwind? */
2865 2866
	if (!((evsel->core.attr.sample_type & PERF_SAMPLE_REGS_USER) &&
	      (evsel->core.attr.sample_type & PERF_SAMPLE_STACK_USER)))
2867 2868 2869 2870 2871 2872 2873
		return 0;

	/* Bail out if nothing was captured. */
	if ((!sample->user_regs.regs) ||
	    (!sample->user_stack.size))
		return 0;

2874
	return unwind__get_entries(unwind_entry, cursor,
2875
				   thread, sample, max_stack);
2876
}
2877

2878 2879
int thread__resolve_callchain(struct thread *thread,
			      struct callchain_cursor *cursor,
2880
			      struct evsel *evsel,
2881 2882 2883 2884 2885 2886 2887
			      struct perf_sample *sample,
			      struct symbol **parent,
			      struct addr_location *root_al,
			      int max_stack)
{
	int ret = 0;

2888
	callchain_cursor_reset(cursor);
2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912

	if (callchain_param.order == ORDER_CALLEE) {
		ret = thread__resolve_callchain_sample(thread, cursor,
						       evsel, sample,
						       parent, root_al,
						       max_stack);
		if (ret)
			return ret;
		ret = thread__resolve_callchain_unwind(thread, cursor,
						       evsel, sample,
						       max_stack);
	} else {
		ret = thread__resolve_callchain_unwind(thread, cursor,
						       evsel, sample,
						       max_stack);
		if (ret)
			return ret;
		ret = thread__resolve_callchain_sample(thread, cursor,
						       evsel, sample,
						       parent, root_al,
						       max_stack);
	}

	return ret;
2913
}
2914 2915 2916 2917 2918

int machine__for_each_thread(struct machine *machine,
			     int (*fn)(struct thread *thread, void *p),
			     void *priv)
{
2919
	struct threads *threads;
2920 2921 2922
	struct rb_node *nd;
	struct thread *thread;
	int rc = 0;
2923
	int i;
2924

2925 2926
	for (i = 0; i < THREADS__TABLE_SIZE; i++) {
		threads = &machine->threads[i];
2927 2928
		for (nd = rb_first_cached(&threads->entries); nd;
		     nd = rb_next(nd)) {
2929 2930 2931 2932 2933
			thread = rb_entry(nd, struct thread, rb_node);
			rc = fn(thread, priv);
			if (rc != 0)
				return rc;
		}
2934

2935 2936 2937 2938 2939
		list_for_each_entry(thread, &threads->dead, node) {
			rc = fn(thread, priv);
			if (rc != 0)
				return rc;
		}
2940 2941 2942
	}
	return rc;
}
2943

2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954
int machines__for_each_thread(struct machines *machines,
			      int (*fn)(struct thread *thread, void *p),
			      void *priv)
{
	struct rb_node *nd;
	int rc = 0;

	rc = machine__for_each_thread(&machines->host, fn, priv);
	if (rc != 0)
		return rc;

2955
	for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
2956 2957 2958 2959 2960 2961 2962 2963 2964
		struct machine *machine = rb_entry(nd, struct machine, rb_node);

		rc = machine__for_each_thread(machine, fn, priv);
		if (rc != 0)
			return rc;
	}
	return rc;
}

2965 2966
pid_t machine__get_current_tid(struct machine *machine, int cpu)
{
2967 2968 2969
	int nr_cpus = min(machine->env->nr_cpus_online, MAX_NR_CPUS);

	if (cpu < 0 || cpu >= nr_cpus || !machine->current_tid)
2970 2971 2972 2973 2974 2975 2976 2977 2978
		return -1;

	return machine->current_tid[cpu];
}

int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
			     pid_t tid)
{
	struct thread *thread;
2979
	int nr_cpus = min(machine->env->nr_cpus_online, MAX_NR_CPUS);
2980 2981 2982 2983 2984 2985 2986

	if (cpu < 0)
		return -EINVAL;

	if (!machine->current_tid) {
		int i;

2987
		machine->current_tid = calloc(nr_cpus, sizeof(pid_t));
2988 2989
		if (!machine->current_tid)
			return -ENOMEM;
2990
		for (i = 0; i < nr_cpus; i++)
2991 2992 2993
			machine->current_tid[i] = -1;
	}

2994
	if (cpu >= nr_cpus) {
2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006
		pr_err("Requested CPU %d too large. ", cpu);
		pr_err("Consider raising MAX_NR_CPUS\n");
		return -EINVAL;
	}

	machine->current_tid[cpu] = tid;

	thread = machine__findnew_thread(machine, pid, tid);
	if (!thread)
		return -ENOMEM;

	thread->cpu = cpu;
3007
	thread__put(thread);
3008 3009 3010

	return 0;
}
3011

3012 3013 3014 3015 3016 3017 3018 3019 3020
/*
 * Compares the raw arch string. N.B. see instead perf_env__arch() if a
 * normalized arch is needed.
 */
bool machine__is(struct machine *machine, const char *arch)
{
	return machine && !strcmp(perf_env__raw_arch(machine->env), arch);
}

3021 3022 3023 3024 3025
int machine__nr_cpus_avail(struct machine *machine)
{
	return machine ? perf_env__nr_cpus_avail(machine->env) : 0;
}

3026 3027
int machine__get_kernel_start(struct machine *machine)
{
3028
	struct map *map = machine__kernel_map(machine);
3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040
	int err = 0;

	/*
	 * The only addresses above 2^63 are kernel addresses of a 64-bit
	 * kernel.  Note that addresses are unsigned so that on a 32-bit system
	 * all addresses including kernel addresses are less than 2^32.  In
	 * that case (32-bit system), if the kernel mapping is unknown, all
	 * addresses will be assumed to be in user space - see
	 * machine__kernel_ip().
	 */
	machine->kernel_start = 1ULL << 63;
	if (map) {
3041
		err = map__load(map);
3042 3043 3044 3045 3046 3047
		/*
		 * On x86_64, PTI entry trampolines are less than the
		 * start of kernel text, but still above 2^63. So leave
		 * kernel_start = 1ULL << 63 for x86_64.
		 */
		if (!err && !machine__is(machine, "x86_64"))
3048 3049 3050 3051
			machine->kernel_start = map->start;
	}
	return err;
}
3052

3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079
u8 machine__addr_cpumode(struct machine *machine, u8 cpumode, u64 addr)
{
	u8 addr_cpumode = cpumode;
	bool kernel_ip;

	if (!machine->single_address_space)
		goto out;

	kernel_ip = machine__kernel_ip(machine, addr);
	switch (cpumode) {
	case PERF_RECORD_MISC_KERNEL:
	case PERF_RECORD_MISC_USER:
		addr_cpumode = kernel_ip ? PERF_RECORD_MISC_KERNEL :
					   PERF_RECORD_MISC_USER;
		break;
	case PERF_RECORD_MISC_GUEST_KERNEL:
	case PERF_RECORD_MISC_GUEST_USER:
		addr_cpumode = kernel_ip ? PERF_RECORD_MISC_GUEST_KERNEL :
					   PERF_RECORD_MISC_GUEST_USER;
		break;
	default:
		break;
	}
out:
	return addr_cpumode;
}

3080 3081 3082 3083 3084
struct dso *machine__findnew_dso_id(struct machine *machine, const char *filename, struct dso_id *id)
{
	return dsos__findnew_id(&machine->dsos, filename, id);
}

3085 3086
struct dso *machine__findnew_dso(struct machine *machine, const char *filename)
{
3087
	return machine__findnew_dso_id(machine, filename, NULL);
3088
}
3089 3090 3091 3092 3093

char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
{
	struct machine *machine = vmachine;
	struct map *map;
3094
	struct symbol *sym = machine__find_kernel_symbol(machine, *addrp, &map);
3095 3096 3097 3098 3099 3100 3101 3102

	if (sym == NULL)
		return NULL;

	*modp = __map__is_kmodule(map) ? (char *)map->dso->short_name : NULL;
	*addrp = map->unmap_ip(map, sym->start);
	return sym->name;
}