symbol.c 53.3 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5 6
#include <dirent.h>
#include <errno.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
7
#include <linux/capability.h>
8
#include <linux/kernel.h>
9
#include <linux/mman.h>
10
#include <linux/string.h>
A
Andi Kleen 已提交
11
#include <linux/time64.h>
12 13 14 15 16
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/param.h>
#include <fcntl.h>
#include <unistd.h>
17
#include <inttypes.h>
18
#include "annotate.h"
19
#include "build-id.h"
20
#include "cap.h"
21
#include "dso.h"
22
#include "util.h"
23
#include "debug.h"
24
#include "event.h"
25
#include "machine.h"
26
#include "map.h"
27
#include "symbol.h"
28
#include "strlist.h"
29
#include "intlist.h"
30
#include "namespaces.h"
31
#include "header.h"
32
#include "path.h"
33
#include <linux/ctype.h>
34
#include <linux/zalloc.h>
35 36

#include <elf.h>
37
#include <limits.h>
38
#include <symbol/kallsyms.h>
39
#include <sys/utsname.h>
P
Peter Zijlstra 已提交
40

41 42
static int dso__load_kernel_sym(struct dso *dso, struct map *map);
static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map);
43 44
static bool symbol__is_idle(const char *name);

45 46
int vmlinux_path__nr_entries;
char **vmlinux_path;
47

48
struct symbol_conf symbol_conf = {
49
	.nanosecs		= false,
50 51 52
	.use_modules		= true,
	.try_vmlinux_path	= true,
	.demangle		= true,
53
	.demangle_kernel	= false,
54
	.cumulate_callchain	= true,
A
Andi Kleen 已提交
55
	.time_quantum		= 100 * NSEC_PER_MSEC, /* 100ms */
56
	.show_hist_headers	= true,
57
	.symfs			= "",
58
	.event_group		= true,
59
	.inline_name		= true,
60
	.res_sample		= 0,
61 62
};

63 64 65 66 67 68
static enum dso_binary_type binary_type_symtab[] = {
	DSO_BINARY_TYPE__KALLSYMS,
	DSO_BINARY_TYPE__GUEST_KALLSYMS,
	DSO_BINARY_TYPE__JAVA_JIT,
	DSO_BINARY_TYPE__DEBUGLINK,
	DSO_BINARY_TYPE__BUILD_ID_CACHE,
69
	DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO,
70 71 72 73 74
	DSO_BINARY_TYPE__FEDORA_DEBUGINFO,
	DSO_BINARY_TYPE__UBUNTU_DEBUGINFO,
	DSO_BINARY_TYPE__BUILDID_DEBUGINFO,
	DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
	DSO_BINARY_TYPE__GUEST_KMODULE,
75
	DSO_BINARY_TYPE__GUEST_KMODULE_COMP,
76
	DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE,
77
	DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP,
78
	DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
79 80 81
	DSO_BINARY_TYPE__NOT_FOUND,
};

82
#define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab)
83

84
static bool symbol_type__filter(char symbol_type)
85
{
86
	symbol_type = toupper(symbol_type);
87
	return symbol_type == 'T' || symbol_type == 'W' || symbol_type == 'D' || symbol_type == 'B';
88 89
}

90 91 92 93 94 95 96 97 98 99
static int prefix_underscores_count(const char *str)
{
	const char *tail = str;

	while (*tail == '_')
		tail++;

	return tail - str;
}

100 101 102 103 104
void __weak arch__symbols__fixup_end(struct symbol *p, struct symbol *c)
{
	p->end = c->start;
}

105 106 107 108 109
const char * __weak arch__normalize_symbol_name(const char *name)
{
	return name;
}

110 111 112 113 114 115 116 117 118 119 120
int __weak arch__compare_symbol_names(const char *namea, const char *nameb)
{
	return strcmp(namea, nameb);
}

int __weak arch__compare_symbol_names_n(const char *namea, const char *nameb,
					unsigned int n)
{
	return strncmp(namea, nameb, n);
}

121 122 123 124 125 126 127 128 129 130 131
int __weak arch__choose_best_symbol(struct symbol *syma,
				    struct symbol *symb __maybe_unused)
{
	/* Avoid "SyS" kernel syscall aliases */
	if (strlen(syma->name) >= 3 && !strncmp(syma->name, "SyS", 3))
		return SYMBOL_B;
	if (strlen(syma->name) >= 10 && !strncmp(syma->name, "compat_SyS", 10))
		return SYMBOL_B;

	return SYMBOL_A;
}
132 133 134 135 136

static int choose_best_symbol(struct symbol *syma, struct symbol *symb)
{
	s64 a;
	s64 b;
137
	size_t na, nb;
138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170

	/* Prefer a symbol with non zero length */
	a = syma->end - syma->start;
	b = symb->end - symb->start;
	if ((b == 0) && (a > 0))
		return SYMBOL_A;
	else if ((a == 0) && (b > 0))
		return SYMBOL_B;

	/* Prefer a non weak symbol over a weak one */
	a = syma->binding == STB_WEAK;
	b = symb->binding == STB_WEAK;
	if (b && !a)
		return SYMBOL_A;
	if (a && !b)
		return SYMBOL_B;

	/* Prefer a global symbol over a non global one */
	a = syma->binding == STB_GLOBAL;
	b = symb->binding == STB_GLOBAL;
	if (a && !b)
		return SYMBOL_A;
	if (b && !a)
		return SYMBOL_B;

	/* Prefer a symbol with less underscores */
	a = prefix_underscores_count(syma->name);
	b = prefix_underscores_count(symb->name);
	if (b > a)
		return SYMBOL_A;
	else if (a > b)
		return SYMBOL_B;

171 172 173 174
	/* Choose the symbol with the longest name */
	na = strlen(syma->name);
	nb = strlen(symb->name);
	if (na > nb)
175
		return SYMBOL_A;
176
	else if (na < nb)
177
		return SYMBOL_B;
178

179
	return arch__choose_best_symbol(syma, symb);
180 181
}

182
void symbols__fixup_duplicate(struct rb_root_cached *symbols)
183 184 185 186
{
	struct rb_node *nd;
	struct symbol *curr, *next;

187 188 189
	if (symbol_conf.allow_aliases)
		return;

190
	nd = rb_first_cached(symbols);
191 192 193 194 195 196 197 198 199 200 201 202 203 204

	while (nd) {
		curr = rb_entry(nd, struct symbol, rb_node);
again:
		nd = rb_next(&curr->rb_node);
		next = rb_entry(nd, struct symbol, rb_node);

		if (!nd)
			break;

		if (curr->start != next->start)
			continue;

		if (choose_best_symbol(curr, next) == SYMBOL_A) {
205
			rb_erase_cached(&next->rb_node, symbols);
206
			symbol__delete(next);
207 208 209
			goto again;
		} else {
			nd = rb_next(&curr->rb_node);
210
			rb_erase_cached(&curr->rb_node, symbols);
211
			symbol__delete(curr);
212 213 214 215
		}
	}
}

216
void symbols__fixup_end(struct rb_root_cached *symbols)
217
{
218
	struct rb_node *nd, *prevnd = rb_first_cached(symbols);
219
	struct symbol *curr, *prev;
220 221 222 223

	if (prevnd == NULL)
		return;

224 225
	curr = rb_entry(prevnd, struct symbol, rb_node);

226
	for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) {
227 228
		prev = curr;
		curr = rb_entry(nd, struct symbol, rb_node);
229

230
		if (prev->end == prev->start && prev->end != curr->start)
231
			arch__symbols__fixup_end(prev, curr);
232
	}
233 234 235

	/* Last entry */
	if (curr->end == curr->start)
236
		curr->end = roundup(curr->start, 4096) + 4096;
237 238
}

239
void map_groups__fixup_end(struct map_groups *mg)
240
{
241
	struct maps *maps = &mg->maps;
242
	struct map *next, *curr;
243

244
	down_write(&maps->lock);
245

246 247
	curr = maps__first(maps);
	if (curr == NULL)
248
		goto out_unlock;
249

250
	for (next = map__next(curr); next; next = map__next(curr)) {
251 252
		if (!curr->end)
			curr->end = next->start;
253
		curr = next;
254
	}
255 256 257 258 259

	/*
	 * We still haven't the actual symbols, so guess the
	 * last map final address.
	 */
260 261
	if (!curr->end)
		curr->end = ~0ULL;
262 263

out_unlock:
264
	up_write(&maps->lock);
265 266
}

267
struct symbol *symbol__new(u64 start, u64 len, u8 binding, u8 type, const char *name)
268
{
269
	size_t namelen = strlen(name) + 1;
270 271 272
	struct symbol *sym = calloc(1, (symbol_conf.priv_size +
					sizeof(*sym) + namelen));
	if (sym == NULL)
273 274
		return NULL;

275 276 277 278 279
	if (symbol_conf.priv_size) {
		if (symbol_conf.init_annotation) {
			struct annotation *notes = (void *)sym;
			pthread_mutex_init(&notes->lock, NULL);
		}
280
		sym = ((void *)sym) + symbol_conf.priv_size;
281
	}
282

283
	sym->start   = start;
284
	sym->end     = len ? start + len : start;
285
	sym->type    = type;
286 287
	sym->binding = binding;
	sym->namelen = namelen - 1;
288

289 290 291
	pr_debug4("%s: %s %#" PRIx64 "-%#" PRIx64 "\n",
		  __func__, name, start, sym->end);
	memcpy(sym->name, name, namelen);
292

293
	return sym;
294 295
}

296
void symbol__delete(struct symbol *sym)
297
{
298
	free(((void *)sym) - symbol_conf.priv_size);
299 300
}

301
void symbols__delete(struct rb_root_cached *symbols)
302 303
{
	struct symbol *pos;
304
	struct rb_node *next = rb_first_cached(symbols);
305 306 307 308

	while (next) {
		pos = rb_entry(next, struct symbol, rb_node);
		next = rb_next(&pos->rb_node);
309
		rb_erase_cached(&pos->rb_node, symbols);
310
		symbol__delete(pos);
311 312 313
	}
}

314 315
void __symbols__insert(struct rb_root_cached *symbols,
		       struct symbol *sym, bool kernel)
316
{
317
	struct rb_node **p = &symbols->rb_root.rb_node;
318
	struct rb_node *parent = NULL;
319
	const u64 ip = sym->start;
320
	struct symbol *s;
321
	bool leftmost = true;
322

323 324 325 326 327 328 329 330 331 332 333
	if (kernel) {
		const char *name = sym->name;
		/*
		 * ppc64 uses function descriptors and appends a '.' to the
		 * start of every instruction address. Remove it.
		 */
		if (name[0] == '.')
			name++;
		sym->idle = symbol__is_idle(name);
	}

334 335 336 337 338
	while (*p != NULL) {
		parent = *p;
		s = rb_entry(parent, struct symbol, rb_node);
		if (ip < s->start)
			p = &(*p)->rb_left;
339
		else {
340
			p = &(*p)->rb_right;
341 342
			leftmost = false;
		}
343 344
	}
	rb_link_node(&sym->rb_node, parent, p);
345
	rb_insert_color_cached(&sym->rb_node, symbols, leftmost);
346 347
}

348
void symbols__insert(struct rb_root_cached *symbols, struct symbol *sym)
349 350 351 352
{
	__symbols__insert(symbols, sym, false);
}

353
static struct symbol *symbols__find(struct rb_root_cached *symbols, u64 ip)
354 355 356
{
	struct rb_node *n;

357
	if (symbols == NULL)
358 359
		return NULL;

360
	n = symbols->rb_root.rb_node;
361 362 363 364 365 366

	while (n) {
		struct symbol *s = rb_entry(n, struct symbol, rb_node);

		if (ip < s->start)
			n = n->rb_left;
367
		else if (ip > s->end || (ip == s->end && ip != s->start))
368 369 370 371 372 373 374 375
			n = n->rb_right;
		else
			return s;
	}

	return NULL;
}

376
static struct symbol *symbols__first(struct rb_root_cached *symbols)
377
{
378
	struct rb_node *n = rb_first_cached(symbols);
379 380 381 382 383 384 385

	if (n)
		return rb_entry(n, struct symbol, rb_node);

	return NULL;
}

386
static struct symbol *symbols__last(struct rb_root_cached *symbols)
387
{
388
	struct rb_node *n = rb_last(&symbols->rb_root);
389 390 391 392 393 394 395

	if (n)
		return rb_entry(n, struct symbol, rb_node);

	return NULL;
}

396 397 398 399 400 401 402 403 404 405
static struct symbol *symbols__next(struct symbol *sym)
{
	struct rb_node *n = rb_next(&sym->rb_node);

	if (n)
		return rb_entry(n, struct symbol, rb_node);

	return NULL;
}

406
static void symbols__insert_by_name(struct rb_root_cached *symbols, struct symbol *sym)
407
{
408
	struct rb_node **p = &symbols->rb_root.rb_node;
409
	struct rb_node *parent = NULL;
410
	struct symbol_name_rb_node *symn, *s;
411
	bool leftmost = true;
412 413

	symn = container_of(sym, struct symbol_name_rb_node, sym);
414 415 416 417 418 419

	while (*p != NULL) {
		parent = *p;
		s = rb_entry(parent, struct symbol_name_rb_node, rb_node);
		if (strcmp(sym->name, s->sym.name) < 0)
			p = &(*p)->rb_left;
420
		else {
421
			p = &(*p)->rb_right;
422 423
			leftmost = false;
		}
424 425
	}
	rb_link_node(&symn->rb_node, parent, p);
426
	rb_insert_color_cached(&symn->rb_node, symbols, leftmost);
427 428
}

429 430
static void symbols__sort_by_name(struct rb_root_cached *symbols,
				  struct rb_root_cached *source)
431 432 433
{
	struct rb_node *nd;

434
	for (nd = rb_first_cached(source); nd; nd = rb_next(nd)) {
435
		struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
436
		symbols__insert_by_name(symbols, pos);
437 438 439
	}
}

440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456
int symbol__match_symbol_name(const char *name, const char *str,
			      enum symbol_tag_include includes)
{
	const char *versioning;

	if (includes == SYMBOL_TAG_INCLUDE__DEFAULT_ONLY &&
	    (versioning = strstr(name, "@@"))) {
		int len = strlen(str);

		if (len < versioning - name)
			len = versioning - name;

		return arch__compare_symbol_names_n(name, str, len);
	} else
		return arch__compare_symbol_names(name, str);
}

457
static struct symbol *symbols__find_by_name(struct rb_root_cached *symbols,
458 459
					    const char *name,
					    enum symbol_tag_include includes)
460 461
{
	struct rb_node *n;
462
	struct symbol_name_rb_node *s = NULL;
463

464
	if (symbols == NULL)
465 466
		return NULL;

467
	n = symbols->rb_root.rb_node;
468 469 470 471 472

	while (n) {
		int cmp;

		s = rb_entry(n, struct symbol_name_rb_node, rb_node);
473
		cmp = symbol__match_symbol_name(s->sym.name, name, includes);
474

475
		if (cmp > 0)
476
			n = n->rb_left;
477
		else if (cmp < 0)
478 479
			n = n->rb_right;
		else
480
			break;
481 482
	}

483 484 485
	if (n == NULL)
		return NULL;

486 487 488 489
	if (includes != SYMBOL_TAG_INCLUDE__DEFAULT_ONLY)
		/* return first symbol that has same name (if any) */
		for (n = rb_prev(n); n; n = rb_prev(n)) {
			struct symbol_name_rb_node *tmp;
490

491 492 493
			tmp = rb_entry(n, struct symbol_name_rb_node, rb_node);
			if (arch__compare_symbol_names(tmp->sym.name, s->sym.name))
				break;
494

495 496
			s = tmp;
		}
497 498

	return &s->sym;
499 500
}

501 502
void dso__reset_find_symbol_cache(struct dso *dso)
{
503 504
	dso->last_find_result.addr   = 0;
	dso->last_find_result.symbol = NULL;
505 506
}

507
void dso__insert_symbol(struct dso *dso, struct symbol *sym)
508
{
509
	__symbols__insert(&dso->symbols, sym, dso->kernel);
510 511

	/* update the symbol cache if necessary */
512 513
	if (dso->last_find_result.addr >= sym->start &&
	    (dso->last_find_result.addr < sym->end ||
514
	    sym->start == sym->end)) {
515
		dso->last_find_result.symbol = sym;
516 517 518
	}
}

519
struct symbol *dso__find_symbol(struct dso *dso, u64 addr)
520
{
521 522 523
	if (dso->last_find_result.addr != addr || dso->last_find_result.symbol == NULL) {
		dso->last_find_result.addr   = addr;
		dso->last_find_result.symbol = symbols__find(&dso->symbols, addr);
524 525
	}

526
	return dso->last_find_result.symbol;
527 528
}

529 530
struct symbol *dso__first_symbol(struct dso *dso)
{
531
	return symbols__first(&dso->symbols);
532 533
}

534 535
struct symbol *dso__last_symbol(struct dso *dso)
{
536
	return symbols__last(&dso->symbols);
537 538
}

539 540 541
struct symbol *dso__next_symbol(struct symbol *sym)
{
	return symbols__next(sym);
542 543
}

544 545 546 547 548 549 550 551 552
struct symbol *symbol__next_by_name(struct symbol *sym)
{
	struct symbol_name_rb_node *s = container_of(sym, struct symbol_name_rb_node, sym);
	struct rb_node *n = rb_next(&s->rb_node);

	return n ? &rb_entry(n, struct symbol_name_rb_node, rb_node)->sym : NULL;
}

 /*
553
  * Returns first symbol that matched with @name.
554
  */
555
struct symbol *dso__find_symbol_by_name(struct dso *dso, const char *name)
556
{
557
	struct symbol *s = symbols__find_by_name(&dso->symbol_names, name,
558 559
						 SYMBOL_TAG_INCLUDE__NONE);
	if (!s)
560
		s = symbols__find_by_name(&dso->symbol_names, name,
561 562
					  SYMBOL_TAG_INCLUDE__DEFAULT_ONLY);
	return s;
563 564
}

565
void dso__sort_by_name(struct dso *dso)
566
{
567 568
	dso__set_sorted_by_name(dso);
	return symbols__sort_by_name(&dso->symbol_names, &dso->symbols);
569 570
}

571 572
int modules__parse(const char *filename, void *arg,
		   int (*process_module)(void *arg, const char *name,
573
					 u64 start, u64 size))
574 575 576 577 578 579 580 581 582 583 584 585
{
	char *line = NULL;
	size_t n;
	FILE *file;
	int err = 0;

	file = fopen(filename, "r");
	if (file == NULL)
		return -1;

	while (1) {
		char name[PATH_MAX];
586 587
		u64 start, size;
		char *sep, *endptr;
588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618
		ssize_t line_len;

		line_len = getline(&line, &n, file);
		if (line_len < 0) {
			if (feof(file))
				break;
			err = -1;
			goto out;
		}

		if (!line) {
			err = -1;
			goto out;
		}

		line[--line_len] = '\0'; /* \n */

		sep = strrchr(line, 'x');
		if (sep == NULL)
			continue;

		hex2u64(sep + 1, &start);

		sep = strchr(line, ' ');
		if (sep == NULL)
			continue;

		*sep = '\0';

		scnprintf(name, sizeof(name), "[%s]", line);

619 620 621 622 623
		size = strtoul(sep + 1, &endptr, 0);
		if (*endptr != ' ' && *endptr != '\t')
			continue;

		err = process_module(arg, name, start, size);
624 625 626 627 628 629 630 631 632
		if (err)
			break;
	}
out:
	free(line);
	fclose(file);
	return err;
}

633 634 635 636
/*
 * These are symbols in the kernel image, so make sure that
 * sym is from a kernel DSO.
 */
637
static bool symbol__is_idle(const char *name)
638 639
{
	const char * const idle_symbols[] = {
640
		"arch_cpu_idle",
641
		"cpu_idle",
642
		"cpu_startup_entry",
643 644 645 646 647 648 649 650 651 652 653 654 655 656 657
		"intel_idle",
		"default_idle",
		"native_safe_halt",
		"enter_idle",
		"exit_idle",
		"mwait_idle",
		"mwait_idle_with_hints",
		"poll_idle",
		"ppc64_runlatch_off",
		"pseries_dedicated_idle_sleep",
		NULL
	};
	int i;

	for (i = 0; idle_symbols[i]; i++) {
658
		if (!strcmp(idle_symbols[i], name))
659 660 661 662 663 664
			return true;
	}

	return false;
}

665
static int map__process_kallsym_symbol(void *arg, const char *name,
666
				       char type, u64 start)
667 668
{
	struct symbol *sym;
669
	struct dso *dso = arg;
670
	struct rb_root_cached *root = &dso->symbols;
671

672
	if (!symbol_type__filter(type))
673 674
		return 0;

675 676 677 678 679
	/*
	 * module symbols are not sorted so we add all
	 * symbols, setting length to 0, and rely on
	 * symbols__fixup_end() to fix it up.
	 */
680
	sym = symbol__new(start, 0, kallsyms2elf_binding(type), kallsyms2elf_type(type), name);
681 682 683 684 685 686
	if (sym == NULL)
		return -ENOMEM;
	/*
	 * We will pass the symbols to the filter later, in
	 * map__split_kallsyms, when we have split the maps per module
	 */
687
	__symbols__insert(root, sym, !strchr(name, '['));
688

689 690 691 692 693 694 695 696
	return 0;
}

/*
 * Loads the function entries in /proc/kallsyms into kernel_map->dso,
 * so that we can in the next step set the symbol ->end address and then
 * call kernel_maps__split_kallsyms.
 */
697
static int dso__load_all_kallsyms(struct dso *dso, const char *filename)
698
{
699
	return kallsyms__parse(filename, dso, map__process_kallsym_symbol);
700 701
}

702
static int map_groups__split_kallsyms_for_kcore(struct map_groups *kmaps, struct dso *dso)
703 704 705
{
	struct map *curr_map;
	struct symbol *pos;
706
	int count = 0;
707 708 709
	struct rb_root_cached old_root = dso->symbols;
	struct rb_root_cached *root = &dso->symbols;
	struct rb_node *next = rb_first_cached(root);
710

711 712 713
	if (!kmaps)
		return -1;

714
	*root = RB_ROOT_CACHED;
715

716 717 718 719 720 721
	while (next) {
		char *module;

		pos = rb_entry(next, struct symbol, rb_node);
		next = rb_next(&pos->rb_node);

722 723
		rb_erase_cached(&pos->rb_node, &old_root);
		RB_CLEAR_NODE(&pos->rb_node);
724 725 726 727
		module = strchr(pos->name, '\t');
		if (module)
			*module = '\0';

728
		curr_map = map_groups__find(kmaps, pos->start);
729

730
		if (!curr_map) {
731
			symbol__delete(pos);
732
			continue;
733
		}
734 735

		pos->start -= curr_map->start - curr_map->pgoff;
736 737
		if (pos->end > curr_map->end)
			pos->end = curr_map->end;
738 739
		if (pos->end)
			pos->end -= curr_map->start - curr_map->pgoff;
740
		symbols__insert(&curr_map->dso->symbols, pos);
741
		++count;
742 743 744 745 746
	}

	/* Symbols have been adjusted */
	dso->adjust_symbols = 1;

747
	return count;
748 749
}

750 751 752 753 754
/*
 * Split the symbols into maps, making sure there are no overlaps, i.e. the
 * kernel range is broken in several maps, named [kernel].N, as we don't have
 * the original ELF section names vmlinux have.
 */
755 756
static int map_groups__split_kallsyms(struct map_groups *kmaps, struct dso *dso, u64 delta,
				      struct map *initial_map)
757
{
758
	struct machine *machine;
759
	struct map *curr_map = initial_map;
760
	struct symbol *pos;
761
	int count = 0, moved = 0;
762 763
	struct rb_root_cached *root = &dso->symbols;
	struct rb_node *next = rb_first_cached(root);
764
	int kernel_range = 0;
765
	bool x86_64;
766

767 768 769 770 771
	if (!kmaps)
		return -1;

	machine = kmaps->machine;

772 773
	x86_64 = machine__is(machine, "x86_64");

774 775 776 777 778 779 780 781
	while (next) {
		char *module;

		pos = rb_entry(next, struct symbol, rb_node);
		next = rb_next(&pos->rb_node);

		module = strchr(pos->name, '\t');
		if (module) {
782
			if (!symbol_conf.use_modules)
783 784
				goto discard_symbol;

785 786
			*module++ = '\0';

787
			if (strcmp(curr_map->dso->short_name, module)) {
788
				if (curr_map != initial_map &&
789
				    dso->kernel == DSO_TYPE_GUEST_KERNEL &&
790
				    machine__is_default_guest(machine)) {
791 792 793 794 795 796 797
					/*
					 * We assume all symbols of a module are
					 * continuous in * kallsyms, so curr_map
					 * points to a module and all its
					 * symbols are in its kmap. Mark it as
					 * loaded.
					 */
798
					dso__set_loaded(curr_map->dso);
799 800
				}

801
				curr_map = map_groups__find_by_name(kmaps, module);
802
				if (curr_map == NULL) {
803
					pr_debug("%s/proc/{kallsyms,modules} "
804
					         "inconsistency while looking "
805
						 "for \"%s\" module!\n",
806
						 machine->root_dir, module);
807
					curr_map = initial_map;
808
					goto discard_symbol;
809
				}
810

811
				if (curr_map->dso->loaded &&
812
				    !machine__is_default_guest(machine))
813
					goto discard_symbol;
814
			}
815 816
			/*
			 * So that we look just like we get from .ko files,
817
			 * i.e. not prelinked, relative to initial_map->start.
818
			 */
819 820
			pos->start = curr_map->map_ip(curr_map, pos->start);
			pos->end   = curr_map->map_ip(curr_map, pos->end);
821 822 823 824 825 826 827 828 829 830
		} else if (x86_64 && is_entry_trampoline(pos->name)) {
			/*
			 * These symbols are not needed anymore since the
			 * trampoline maps refer to the text section and it's
			 * symbols instead. Avoid having to deal with
			 * relocations, and the assumption that the first symbol
			 * is the start of kernel text, by simply removing the
			 * symbols at this point.
			 */
			goto discard_symbol;
831
		} else if (curr_map != initial_map) {
832
			char dso_name[PATH_MAX];
833
			struct dso *ndso;
834

835 836 837 838 839 840
			if (delta) {
				/* Kernel was relocated at boot time */
				pos->start -= delta;
				pos->end -= delta;
			}

841
			if (count == 0) {
842
				curr_map = initial_map;
843
				goto add_symbol;
844 845
			}

846
			if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
847 848 849 850 851 852 853
				snprintf(dso_name, sizeof(dso_name),
					"[guest.kernel].%d",
					kernel_range++);
			else
				snprintf(dso_name, sizeof(dso_name),
					"[kernel].%d",
					kernel_range++);
854

855 856
			ndso = dso__new(dso_name);
			if (ndso == NULL)
857 858
				return -1;

859
			ndso->kernel = dso->kernel;
860

861
			curr_map = map__new2(pos->start, ndso);
862
			if (curr_map == NULL) {
863
				dso__put(ndso);
864 865
				return -1;
			}
866

867
			curr_map->map_ip = curr_map->unmap_ip = identity__map_ip;
868
			map_groups__insert(kmaps, curr_map);
869
			++kernel_range;
870 871 872 873
		} else if (delta) {
			/* Kernel was relocated at boot time */
			pos->start -= delta;
			pos->end -= delta;
874
		}
875
add_symbol:
876
		if (curr_map != initial_map) {
877
			rb_erase_cached(&pos->rb_node, root);
878
			symbols__insert(&curr_map->dso->symbols, pos);
879 880 881 882 883 884
			++moved;
		} else
			++count;

		continue;
discard_symbol:
885
		rb_erase_cached(&pos->rb_node, root);
886
		symbol__delete(pos);
887 888
	}

889
	if (curr_map != initial_map &&
890
	    dso->kernel == DSO_TYPE_GUEST_KERNEL &&
891
	    machine__is_default_guest(kmaps->machine)) {
892
		dso__set_loaded(curr_map->dso);
893 894
	}

895
	return count + moved;
896
}
897

898 899
bool symbol__restricted_filename(const char *filename,
				 const char *restricted_filename)
900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915
{
	bool restricted = false;

	if (symbol_conf.kptr_restrict) {
		char *r = realpath(filename, NULL);

		if (r != NULL) {
			restricted = strcmp(r, restricted_filename) == 0;
			free(r);
			return restricted;
		}
	}

	return restricted;
}

916 917 918 919
struct module_info {
	struct rb_node rb_node;
	char *name;
	u64 start;
920 921
};

922
static void add_module(struct module_info *mi, struct rb_root *modules)
923
{
924 925 926
	struct rb_node **p = &modules->rb_node;
	struct rb_node *parent = NULL;
	struct module_info *m;
927

928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948
	while (*p != NULL) {
		parent = *p;
		m = rb_entry(parent, struct module_info, rb_node);
		if (strcmp(mi->name, m->name) < 0)
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
	}
	rb_link_node(&mi->rb_node, parent, p);
	rb_insert_color(&mi->rb_node, modules);
}

static void delete_modules(struct rb_root *modules)
{
	struct module_info *mi;
	struct rb_node *next = rb_first(modules);

	while (next) {
		mi = rb_entry(next, struct module_info, rb_node);
		next = rb_next(&mi->rb_node);
		rb_erase(&mi->rb_node, modules);
949
		zfree(&mi->name);
950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975
		free(mi);
	}
}

static struct module_info *find_module(const char *name,
				       struct rb_root *modules)
{
	struct rb_node *n = modules->rb_node;

	while (n) {
		struct module_info *m;
		int cmp;

		m = rb_entry(n, struct module_info, rb_node);
		cmp = strcmp(name, m->name);
		if (cmp < 0)
			n = n->rb_left;
		else if (cmp > 0)
			n = n->rb_right;
		else
			return m;
	}

	return NULL;
}

976 977
static int __read_proc_modules(void *arg, const char *name, u64 start,
			       u64 size __maybe_unused)
978 979 980 981 982 983
{
	struct rb_root *modules = arg;
	struct module_info *mi;

	mi = zalloc(sizeof(struct module_info));
	if (!mi)
984 985
		return -ENOMEM;

986 987
	mi->name = strdup(name);
	mi->start = start;
988

989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007
	if (!mi->name) {
		free(mi);
		return -ENOMEM;
	}

	add_module(mi, modules);

	return 0;
}

static int read_proc_modules(const char *filename, struct rb_root *modules)
{
	if (symbol__restricted_filename(filename, "/proc/modules"))
		return -1;

	if (modules__parse(filename, modules, __read_proc_modules)) {
		delete_modules(modules);
		return -1;
	}
1008 1009 1010 1011

	return 0;
}

1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052
int compare_proc_modules(const char *from, const char *to)
{
	struct rb_root from_modules = RB_ROOT;
	struct rb_root to_modules = RB_ROOT;
	struct rb_node *from_node, *to_node;
	struct module_info *from_m, *to_m;
	int ret = -1;

	if (read_proc_modules(from, &from_modules))
		return -1;

	if (read_proc_modules(to, &to_modules))
		goto out_delete_from;

	from_node = rb_first(&from_modules);
	to_node = rb_first(&to_modules);
	while (from_node) {
		if (!to_node)
			break;

		from_m = rb_entry(from_node, struct module_info, rb_node);
		to_m = rb_entry(to_node, struct module_info, rb_node);

		if (from_m->start != to_m->start ||
		    strcmp(from_m->name, to_m->name))
			break;

		from_node = rb_next(from_node);
		to_node = rb_next(to_node);
	}

	if (!from_node && !to_node)
		ret = 0;

	delete_modules(&to_modules);
out_delete_from:
	delete_modules(&from_modules);

	return ret;
}

1053 1054
struct map *map_groups__first(struct map_groups *mg)
{
1055
	return maps__first(&mg->maps);
1056 1057
}

1058
static int do_validate_kcore_modules(const char *filename,
1059 1060 1061 1062 1063 1064 1065 1066 1067 1068
				  struct map_groups *kmaps)
{
	struct rb_root modules = RB_ROOT;
	struct map *old_map;
	int err;

	err = read_proc_modules(filename, &modules);
	if (err)
		return err;

1069
	old_map = map_groups__first(kmaps);
1070 1071 1072 1073
	while (old_map) {
		struct map *next = map_groups__next(old_map);
		struct module_info *mi;

1074
		if (!__map__is_kmodule(old_map)) {
1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092
			old_map = next;
			continue;
		}

		/* Module must be in memory at the same address */
		mi = find_module(old_map->dso->short_name, &modules);
		if (!mi || mi->start != old_map->start) {
			err = -EINVAL;
			goto out;
		}

		old_map = next;
	}
out:
	delete_modules(&modules);
	return err;
}

1093
/*
1094
 * If kallsyms is referenced by name then we look for filename in the same
1095 1096
 * directory.
 */
1097 1098 1099
static bool filename_from_kallsyms_filename(char *filename,
					    const char *base_name,
					    const char *kallsyms_filename)
1100 1101 1102
{
	char *name;

1103 1104
	strcpy(filename, kallsyms_filename);
	name = strrchr(filename, '/');
1105 1106 1107
	if (!name)
		return false;

1108 1109 1110 1111
	name += 1;

	if (!strcmp(name, "kallsyms")) {
		strcpy(name, base_name);
1112 1113 1114 1115 1116 1117
		return true;
	}

	return false;
}

1118 1119 1120
static int validate_kcore_modules(const char *kallsyms_filename,
				  struct map *map)
{
1121
	struct map_groups *kmaps = map__kmaps(map);
1122 1123
	char modules_filename[PATH_MAX];

1124 1125 1126
	if (!kmaps)
		return -EINVAL;

1127 1128 1129 1130
	if (!filename_from_kallsyms_filename(modules_filename, "modules",
					     kallsyms_filename))
		return -EINVAL;

1131
	if (do_validate_kcore_modules(modules_filename, kmaps))
1132 1133 1134 1135 1136
		return -EINVAL;

	return 0;
}

1137 1138 1139 1140 1141
static int validate_kcore_addresses(const char *kallsyms_filename,
				    struct map *map)
{
	struct kmap *kmap = map__kmap(map);

1142 1143 1144
	if (!kmap)
		return -EINVAL;

1145 1146 1147
	if (kmap->ref_reloc_sym && kmap->ref_reloc_sym->name) {
		u64 start;

1148 1149 1150
		if (kallsyms__get_function_start(kallsyms_filename,
						 kmap->ref_reloc_sym->name, &start))
			return -ENOENT;
1151 1152 1153 1154 1155 1156 1157
		if (start != kmap->ref_reloc_sym->addr)
			return -EINVAL;
	}

	return validate_kcore_modules(kallsyms_filename, map);
}

1158 1159 1160 1161 1162 1163 1164 1165 1166 1167
struct kcore_mapfn_data {
	struct dso *dso;
	struct list_head maps;
};

static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data)
{
	struct kcore_mapfn_data *md = data;
	struct map *map;

1168
	map = map__new2(start, md->dso);
1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179
	if (map == NULL)
		return -ENOMEM;

	map->end = map->start + len;
	map->pgoff = pgoff;

	list_add(&map->node, &md->maps);

	return 0;
}

1180 1181 1182 1183
/*
 * Merges map into map_groups by splitting the new map
 * within the existing map regions.
 */
1184
int map_groups__merge_in(struct map_groups *kmaps, struct map *new_map)
1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258
{
	struct map *old_map;
	LIST_HEAD(merged);

	for (old_map = map_groups__first(kmaps); old_map;
	     old_map = map_groups__next(old_map)) {

		/* no overload with this one */
		if (new_map->end < old_map->start ||
		    new_map->start >= old_map->end)
			continue;

		if (new_map->start < old_map->start) {
			/*
			 * |new......
			 *       |old....
			 */
			if (new_map->end < old_map->end) {
				/*
				 * |new......|     -> |new..|
				 *       |old....| ->       |old....|
				 */
				new_map->end = old_map->start;
			} else {
				/*
				 * |new.............| -> |new..|       |new..|
				 *       |old....|    ->       |old....|
				 */
				struct map *m = map__clone(new_map);

				if (!m)
					return -ENOMEM;

				m->end = old_map->start;
				list_add_tail(&m->node, &merged);
				new_map->start = old_map->end;
			}
		} else {
			/*
			 *      |new......
			 * |old....
			 */
			if (new_map->end < old_map->end) {
				/*
				 *      |new..|   -> x
				 * |old.........| -> |old.........|
				 */
				map__put(new_map);
				new_map = NULL;
				break;
			} else {
				/*
				 *      |new......| ->         |new...|
				 * |old....|        -> |old....|
				 */
				new_map->start = old_map->end;
			}
		}
	}

	while (!list_empty(&merged)) {
		old_map = list_entry(merged.next, struct map, node);
		list_del_init(&old_map->node);
		map_groups__insert(kmaps, old_map);
		map__put(old_map);
	}

	if (new_map) {
		map_groups__insert(kmaps, new_map);
		map__put(new_map);
	}
	return 0;
}

1259 1260 1261
static int dso__load_kcore(struct dso *dso, struct map *map,
			   const char *kallsyms_filename)
{
1262
	struct map_groups *kmaps = map__kmaps(map);
1263 1264
	struct kcore_mapfn_data md;
	struct map *old_map, *new_map, *replacement_map = NULL;
1265
	struct machine *machine;
1266 1267 1268
	bool is_64_bit;
	int err, fd;
	char kcore_filename[PATH_MAX];
1269
	u64 stext;
1270

1271 1272 1273
	if (!kmaps)
		return -EINVAL;

1274 1275
	machine = kmaps->machine;

1276
	/* This function requires that the map is the kernel map */
1277
	if (!__map__is_kernel(map))
1278 1279
		return -EINVAL;

1280 1281 1282 1283
	if (!filename_from_kallsyms_filename(kcore_filename, "kcore",
					     kallsyms_filename))
		return -EINVAL;

1284 1285
	/* Modules and kernel must be present at their original addresses */
	if (validate_kcore_addresses(kallsyms_filename, map))
1286 1287 1288 1289 1290 1291
		return -EINVAL;

	md.dso = dso;
	INIT_LIST_HEAD(&md.maps);

	fd = open(kcore_filename, O_RDONLY);
1292
	if (fd < 0) {
1293 1294
		pr_debug("Failed to open %s. Note /proc/kcore requires CAP_SYS_RAWIO capability to access.\n",
			 kcore_filename);
1295
		return -EINVAL;
1296
	}
1297 1298

	/* Read new maps into temporary lists */
1299
	err = file__read_maps(fd, map->prot & PROT_EXEC, kcore_mapfn, &md,
1300 1301 1302
			      &is_64_bit);
	if (err)
		goto out_err;
1303
	dso->is_64_bit = is_64_bit;
1304 1305 1306 1307 1308 1309 1310

	if (list_empty(&md.maps)) {
		err = -EINVAL;
		goto out_err;
	}

	/* Remove old maps */
1311
	old_map = map_groups__first(kmaps);
1312 1313 1314
	while (old_map) {
		struct map *next = map_groups__next(old_map);

1315 1316 1317 1318 1319 1320
		/*
		 * We need to preserve eBPF maps even if they are
		 * covered by kcore, because we need to access
		 * eBPF dso for source data.
		 */
		if (old_map != map && !__map__is_bpf_prog(old_map))
1321 1322 1323
			map_groups__remove(kmaps, old_map);
		old_map = next;
	}
1324
	machine->trampolines_mapped = false;
1325

1326 1327 1328 1329 1330 1331 1332
	/* Find the kernel map using the '_stext' symbol */
	if (!kallsyms__get_function_start(kallsyms_filename, "_stext", &stext)) {
		list_for_each_entry(new_map, &md.maps, node) {
			if (stext >= new_map->start && stext < new_map->end) {
				replacement_map = new_map;
				break;
			}
1333 1334 1335 1336 1337 1338 1339 1340 1341
		}
	}

	if (!replacement_map)
		replacement_map = list_entry(md.maps.next, struct map, node);

	/* Add new maps */
	while (!list_empty(&md.maps)) {
		new_map = list_entry(md.maps.next, struct map, node);
1342
		list_del_init(&new_map->node);
1343 1344 1345 1346 1347 1348 1349
		if (new_map == replacement_map) {
			map->start	= new_map->start;
			map->end	= new_map->end;
			map->pgoff	= new_map->pgoff;
			map->map_ip	= new_map->map_ip;
			map->unmap_ip	= new_map->unmap_ip;
			/* Ensure maps are correctly ordered */
1350
			map__get(map);
1351 1352
			map_groups__remove(kmaps, map);
			map_groups__insert(kmaps, map);
1353
			map__put(map);
1354
			map__put(new_map);
1355
		} else {
1356 1357 1358 1359 1360 1361 1362
			/*
			 * Merge kcore map into existing maps,
			 * and ensure that current maps (eBPF)
			 * stay intact.
			 */
			if (map_groups__merge_in(kmaps, new_map))
				goto out_err;
1363 1364 1365
		}
	}

1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378
	if (machine__is(machine, "x86_64")) {
		u64 addr;

		/*
		 * If one of the corresponding symbols is there, assume the
		 * entry trampoline maps are too.
		 */
		if (!kallsyms__get_function_start(kallsyms_filename,
						  ENTRY_TRAMPOLINE_NAME,
						  &addr))
			machine->trampolines_mapped = true;
	}

1379 1380 1381 1382 1383
	/*
	 * Set the data type and long name so that kcore can be read via
	 * dso__data_read_addr().
	 */
	if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1384
		dso->binary_type = DSO_BINARY_TYPE__GUEST_KCORE;
1385
	else
1386
		dso->binary_type = DSO_BINARY_TYPE__KCORE;
1387
	dso__set_long_name(dso, strdup(kcore_filename), true);
1388 1389 1390

	close(fd);

1391
	if (map->prot & PROT_EXEC)
1392 1393 1394 1395 1396 1397 1398 1399 1400
		pr_debug("Using %s for kernel object code\n", kcore_filename);
	else
		pr_debug("Using %s for kernel data\n", kcore_filename);

	return 0;

out_err:
	while (!list_empty(&md.maps)) {
		map = list_entry(md.maps.next, struct map, node);
1401
		list_del_init(&map->node);
1402
		map__put(map);
1403 1404 1405 1406 1407
	}
	close(fd);
	return -EINVAL;
}

1408 1409 1410 1411
/*
 * If the kernel is relocated at boot time, kallsyms won't match.  Compute the
 * delta based on the relocation reference symbol.
 */
1412
static int kallsyms__delta(struct kmap *kmap, const char *filename, u64 *delta)
1413 1414 1415 1416 1417 1418
{
	u64 addr;

	if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name)
		return 0;

1419
	if (kallsyms__get_function_start(filename, kmap->ref_reloc_sym->name, &addr))
1420 1421 1422 1423 1424 1425
		return -1;

	*delta = addr - kmap->ref_reloc_sym->addr;
	return 0;
}

1426
int __dso__load_kallsyms(struct dso *dso, const char *filename,
1427
			 struct map *map, bool no_kcore)
1428
{
1429
	struct kmap *kmap = map__kmap(map);
1430 1431
	u64 delta = 0;

1432 1433 1434
	if (symbol__restricted_filename(filename, "/proc/kallsyms"))
		return -1;

1435 1436 1437
	if (!kmap || !kmap->kmaps)
		return -1;

1438
	if (dso__load_all_kallsyms(dso, filename) < 0)
1439 1440
		return -1;

1441
	if (kallsyms__delta(kmap, filename, &delta))
1442 1443
		return -1;

1444 1445
	symbols__fixup_end(&dso->symbols);
	symbols__fixup_duplicate(&dso->symbols);
1446

1447
	if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1448
		dso->symtab_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;
1449
	else
1450
		dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS;
1451

1452
	if (!no_kcore && !dso__load_kcore(dso, map, filename))
1453
		return map_groups__split_kallsyms_for_kcore(kmap->kmaps, dso);
1454
	else
1455
		return map_groups__split_kallsyms(kmap->kmaps, dso, delta, map);
1456 1457
}

1458
int dso__load_kallsyms(struct dso *dso, const char *filename,
1459
		       struct map *map)
1460
{
1461
	return __dso__load_kallsyms(dso, filename, map, false);
1462 1463
}

1464
static int dso__load_perf_map(const char *map_path, struct dso *dso)
1465 1466 1467 1468 1469 1470
{
	char *line = NULL;
	size_t n;
	FILE *file;
	int nr_syms = 0;

1471
	file = fopen(map_path, "r");
1472 1473 1474 1475
	if (file == NULL)
		goto out_failure;

	while (!feof(file)) {
1476
		u64 start, size;
1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500
		struct symbol *sym;
		int line_len, len;

		line_len = getline(&line, &n, file);
		if (line_len < 0)
			break;

		if (!line)
			goto out_failure;

		line[--line_len] = '\0'; /* \n */

		len = hex2u64(line, &start);

		len++;
		if (len + 2 >= line_len)
			continue;

		len += hex2u64(line + len, &size);

		len++;
		if (len + 2 >= line_len)
			continue;

1501
		sym = symbol__new(start, size, STB_GLOBAL, STT_FUNC, line + len);
1502 1503 1504 1505

		if (sym == NULL)
			goto out_delete_line;

1506
		symbols__insert(&dso->symbols, sym);
1507
		nr_syms++;
1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520
	}

	free(line);
	fclose(file);

	return nr_syms;

out_delete_line:
	free(line);
out_failure:
	return -1;
}

1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544
static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod,
					   enum dso_binary_type type)
{
	switch (type) {
	case DSO_BINARY_TYPE__JAVA_JIT:
	case DSO_BINARY_TYPE__DEBUGLINK:
	case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
	case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
	case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
	case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
	case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
		return !kmod && dso->kernel == DSO_TYPE_USER;

	case DSO_BINARY_TYPE__KALLSYMS:
	case DSO_BINARY_TYPE__VMLINUX:
	case DSO_BINARY_TYPE__KCORE:
		return dso->kernel == DSO_TYPE_KERNEL;

	case DSO_BINARY_TYPE__GUEST_KALLSYMS:
	case DSO_BINARY_TYPE__GUEST_VMLINUX:
	case DSO_BINARY_TYPE__GUEST_KCORE:
		return dso->kernel == DSO_TYPE_GUEST_KERNEL;

	case DSO_BINARY_TYPE__GUEST_KMODULE:
1545
	case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
1546
	case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
1547
	case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
1548 1549
		/*
		 * kernel modules know their symtab type - it's set when
1550
		 * creating a module dso in machine__findnew_module_map().
1551 1552 1553 1554
		 */
		return kmod && dso->symtab_type == type;

	case DSO_BINARY_TYPE__BUILD_ID_CACHE:
1555
	case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO:
1556 1557
		return true;

1558
	case DSO_BINARY_TYPE__BPF_PROG_INFO:
1559 1560 1561 1562 1563 1564
	case DSO_BINARY_TYPE__NOT_FOUND:
	default:
		return false;
	}
}

1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602
/* Checks for the existence of the perf-<pid>.map file in two different
 * locations.  First, if the process is a separate mount namespace, check in
 * that namespace using the pid of the innermost pid namespace.  If's not in a
 * namespace, or the file can't be found there, try in the mount namespace of
 * the tracing process using our view of its pid.
 */
static int dso__find_perf_map(char *filebuf, size_t bufsz,
			      struct nsinfo **nsip)
{
	struct nscookie nsc;
	struct nsinfo *nsi;
	struct nsinfo *nnsi;
	int rc = -1;

	nsi = *nsip;

	if (nsi->need_setns) {
		snprintf(filebuf, bufsz, "/tmp/perf-%d.map", nsi->nstgid);
		nsinfo__mountns_enter(nsi, &nsc);
		rc = access(filebuf, R_OK);
		nsinfo__mountns_exit(&nsc);
		if (rc == 0)
			return rc;
	}

	nnsi = nsinfo__copy(nsi);
	if (nnsi) {
		nsinfo__put(nsi);

		nnsi->need_setns = false;
		snprintf(filebuf, bufsz, "/tmp/perf-%d.map", nnsi->tgid);
		*nsip = nnsi;
		rc = 0;
	}

	return rc;
}

1603
int dso__load(struct dso *dso, struct map *map)
1604
{
1605
	char *name;
1606
	int ret = -1;
1607
	u_int i;
1608
	struct machine *machine;
1609
	char *root_dir = (char *) "";
1610 1611 1612
	int ss_pos = 0;
	struct symsrc ss_[2];
	struct symsrc *syms_ss = NULL, *runtime_ss = NULL;
1613
	bool kmod;
1614
	bool perfmap;
1615
	unsigned char build_id[BUILD_ID_SIZE];
1616
	struct nscookie nsc;
1617 1618 1619 1620 1621 1622 1623 1624 1625 1626
	char newmapname[PATH_MAX];
	const char *map_path = dso->long_name;

	perfmap = strncmp(dso->name, "/tmp/perf-", 10) == 0;
	if (perfmap) {
		if (dso->nsinfo && (dso__find_perf_map(newmapname,
		    sizeof(newmapname), &dso->nsinfo) == 0)) {
			map_path = newmapname;
		}
	}
1627

1628
	nsinfo__mountns_enter(dso->nsinfo, &nsc);
1629 1630 1631
	pthread_mutex_lock(&dso->lock);

	/* check again under the dso->lock */
1632
	if (dso__loaded(dso)) {
1633 1634 1635
		ret = 1;
		goto out;
	}
1636

1637 1638 1639 1640 1641
	if (map->groups && map->groups->machine)
		machine = map->groups->machine;
	else
		machine = NULL;

1642 1643
	if (dso->kernel) {
		if (dso->kernel == DSO_TYPE_KERNEL)
1644
			ret = dso__load_kernel_sym(dso, map);
1645
		else if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1646
			ret = dso__load_guest_kernel_sym(dso, map);
1647

1648 1649
		if (machine__is(machine, "x86_64"))
			machine__map_x86_64_entry_trampolines(machine, dso);
1650 1651
		goto out;
	}
1652

1653
	dso->adjust_symbols = 0;
1654

1655
	if (perfmap) {
1656
		ret = dso__load_perf_map(map_path, dso);
1657 1658
		dso->symtab_type = ret > 0 ? DSO_BINARY_TYPE__JAVA_JIT :
					     DSO_BINARY_TYPE__NOT_FOUND;
1659
		goto out;
1660 1661
	}

1662 1663 1664
	if (machine)
		root_dir = machine->root_dir;

1665 1666
	name = malloc(PATH_MAX);
	if (!name)
1667
		goto out;
1668

1669
	kmod = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
1670 1671 1672
		dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
		dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE ||
		dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
1673

1674 1675 1676 1677 1678

	/*
	 * Read the build id if possible. This is required for
	 * DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work
	 */
1679
	if (!dso->has_build_id &&
1680 1681 1682
	    is_regular_file(dso->long_name)) {
	    __symbol__join_symfs(name, PATH_MAX, dso->long_name);
	    if (filename__read_build_id(name, build_id, BUILD_ID_SIZE) > 0)
1683
		dso__set_build_id(dso, build_id);
1684
	}
1685

1686 1687
	/*
	 * Iterate over candidate debug images.
1688 1689
	 * Keep track of "interesting" ones (those which have a symtab, dynsym,
	 * and/or opd section) for processing.
1690
	 */
1691
	for (i = 0; i < DSO_BINARY_TYPE__SYMTAB_CNT; i++) {
1692 1693
		struct symsrc *ss = &ss_[ss_pos];
		bool next_slot = false;
1694
		bool is_reg;
1695
		bool nsexit;
1696
		int sirc = -1;
1697

1698
		enum dso_binary_type symtab_type = binary_type_symtab[i];
1699

1700 1701 1702
		nsexit = (symtab_type == DSO_BINARY_TYPE__BUILD_ID_CACHE ||
		    symtab_type == DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO);

1703 1704 1705
		if (!dso__is_compatible_symtab_type(dso, kmod, symtab_type))
			continue;

1706 1707
		if (dso__read_binary_type_filename(dso, symtab_type,
						   root_dir, name, PATH_MAX))
1708
			continue;
1709

1710
		if (nsexit)
1711 1712 1713
			nsinfo__mountns_exit(&nsc);

		is_reg = is_regular_file(name);
1714 1715
		if (is_reg)
			sirc = symsrc__init(ss, dso, name, symtab_type);
1716

1717
		if (nsexit)
1718 1719
			nsinfo__mountns_enter(dso->nsinfo, &nsc);

1720
		if (!is_reg || sirc < 0)
1721
			continue;
1722

1723 1724 1725
		if (!syms_ss && symsrc__has_symtab(ss)) {
			syms_ss = ss;
			next_slot = true;
1726 1727
			if (!dso->symsrc_filename)
				dso->symsrc_filename = strdup(name);
1728 1729
		}

1730 1731 1732
		if (!runtime_ss && symsrc__possibly_runtime(ss)) {
			runtime_ss = ss;
			next_slot = true;
1733
		}
1734

1735 1736
		if (next_slot) {
			ss_pos++;
1737

1738 1739
			if (syms_ss && runtime_ss)
				break;
1740 1741
		} else {
			symsrc__destroy(ss);
1742
		}
1743

1744
	}
1745

1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756
	if (!runtime_ss && !syms_ss)
		goto out_free;

	if (runtime_ss && !syms_ss) {
		syms_ss = runtime_ss;
	}

	/* We'll have to hope for the best */
	if (!runtime_ss && syms_ss)
		runtime_ss = syms_ss;

1757
	if (syms_ss)
1758
		ret = dso__load_sym(dso, map, syms_ss, runtime_ss, kmod);
1759
	else
1760 1761
		ret = -1;

1762
	if (ret > 0) {
1763 1764
		int nr_plt;

1765
		nr_plt = dso__synthesize_plt_symbols(dso, runtime_ss);
1766 1767
		if (nr_plt > 0)
			ret += nr_plt;
1768 1769
	}

1770 1771 1772
	for (; ss_pos > 0; ss_pos--)
		symsrc__destroy(&ss_[ss_pos - 1]);
out_free:
1773
	free(name);
1774
	if (ret < 0 && strstr(dso->name, " (deleted)") != NULL)
1775 1776
		ret = 0;
out:
1777
	dso__set_loaded(dso);
1778
	pthread_mutex_unlock(&dso->lock);
1779
	nsinfo__mountns_exit(&nsc);
1780

1781 1782 1783
	return ret;
}

1784
struct map *map_groups__find_by_name(struct map_groups *mg, const char *name)
1785
{
1786
	struct maps *maps = &mg->maps;
1787
	struct map *map;
1788
	struct rb_node *node;
1789

1790
	down_read(&maps->lock);
1791

1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803
	for (node = maps->names.rb_node; node; ) {
		int rc;

		map = rb_entry(node, struct map, rb_node_name);

		rc = strcmp(map->dso->short_name, name);
		if (rc < 0)
			node = node->rb_left;
		else if (rc > 0)
			node = node->rb_right;
		else

1804
			goto out_unlock;
1805 1806
	}

1807 1808 1809
	map = NULL;

out_unlock:
1810
	up_read(&maps->lock);
1811
	return map;
1812 1813
}

1814
int dso__load_vmlinux(struct dso *dso, struct map *map,
1815
		      const char *vmlinux, bool vmlinux_allocated)
1816
{
1817 1818
	int err = -1;
	struct symsrc ss;
1819
	char symfs_vmlinux[PATH_MAX];
1820
	enum dso_binary_type symtab_type;
1821

1822 1823 1824
	if (vmlinux[0] == '/')
		snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s", vmlinux);
	else
1825
		symbol__join_symfs(symfs_vmlinux, vmlinux);
1826

1827
	if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1828
		symtab_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
1829
	else
1830
		symtab_type = DSO_BINARY_TYPE__VMLINUX;
1831

1832
	if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type))
1833 1834
		return -1;

1835
	err = dso__load_sym(dso, map, &ss, &ss, 0);
1836
	symsrc__destroy(&ss);
1837

1838
	if (err > 0) {
1839
		if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1840
			dso->binary_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
1841
		else
1842
			dso->binary_type = DSO_BINARY_TYPE__VMLINUX;
1843
		dso__set_long_name(dso, vmlinux, vmlinux_allocated);
1844
		dso__set_loaded(dso);
1845
		pr_debug("Using %s for symbols\n", symfs_vmlinux);
1846
	}
1847

1848 1849 1850
	return err;
}

1851
int dso__load_vmlinux_path(struct dso *dso, struct map *map)
1852 1853
{
	int i, err = 0;
1854
	char *filename = NULL;
1855

1856 1857 1858 1859
	pr_debug("Looking at the vmlinux_path (%d entries long)\n",
		 vmlinux_path__nr_entries + 1);

	for (i = 0; i < vmlinux_path__nr_entries; ++i) {
1860
		err = dso__load_vmlinux(dso, map, vmlinux_path[i], false);
1861 1862 1863 1864
		if (err > 0)
			goto out;
	}

1865
	if (!symbol_conf.ignore_vmlinux_buildid)
1866
		filename = dso__build_id_filename(dso, NULL, 0, false);
1867
	if (filename != NULL) {
1868
		err = dso__load_vmlinux(dso, map, filename, true);
1869
		if (err > 0)
1870 1871 1872 1873
			goto out;
		free(filename);
	}
out:
1874 1875 1876
	return err;
}

1877 1878 1879 1880 1881 1882 1883
static bool visible_dir_filter(const char *name, struct dirent *d)
{
	if (d->d_type != DT_DIR)
		return false;
	return lsdir_no_dot_filter(name, d);
}

1884 1885 1886 1887
static int find_matching_kcore(struct map *map, char *dir, size_t dir_sz)
{
	char kallsyms_filename[PATH_MAX];
	int ret = -1;
1888 1889
	struct strlist *dirs;
	struct str_node *nd;
1890

1891 1892
	dirs = lsdir(dir, visible_dir_filter);
	if (!dirs)
1893 1894
		return -1;

1895
	strlist__for_each_entry(nd, dirs) {
1896
		scnprintf(kallsyms_filename, sizeof(kallsyms_filename),
1897
			  "%s/%s/kallsyms", dir, nd->s);
1898
		if (!validate_kcore_addresses(kallsyms_filename, map)) {
1899 1900 1901 1902 1903 1904
			strlcpy(dir, kallsyms_filename, dir_sz);
			ret = 0;
			break;
		}
	}

1905
	strlist__delete(dirs);
1906 1907 1908 1909

	return ret;
}

1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923
/*
 * Use open(O_RDONLY) to check readability directly instead of access(R_OK)
 * since access(R_OK) only checks with real UID/GID but open() use effective
 * UID/GID and actual capabilities (e.g. /proc/kcore requires CAP_SYS_RAWIO).
 */
static bool filename__readable(const char *file)
{
	int fd = open(file, O_RDONLY);
	if (fd < 0)
		return false;
	close(fd);
	return true;
}

1924 1925 1926
static char *dso__find_kallsyms(struct dso *dso, struct map *map)
{
	u8 host_build_id[BUILD_ID_SIZE];
1927
	char sbuild_id[SBUILD_ID_SIZE];
1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942
	bool is_host = false;
	char path[PATH_MAX];

	if (!dso->has_build_id) {
		/*
		 * Last resort, if we don't have a build-id and couldn't find
		 * any vmlinux file, try the running kernel kallsyms table.
		 */
		goto proc_kallsyms;
	}

	if (sysfs__read_build_id("/sys/kernel/notes", host_build_id,
				 sizeof(host_build_id)) == 0)
		is_host = dso__build_id_equal(dso, host_build_id);

1943
	/* Try a fast path for /proc/kallsyms if possible */
1944 1945
	if (is_host) {
		/*
1946 1947 1948 1949 1950
		 * Do not check the build-id cache, unless we know we cannot use
		 * /proc/kcore or module maps don't match to /proc/kallsyms.
		 * To check readability of /proc/kcore, do not use access(R_OK)
		 * since /proc/kcore requires CAP_SYS_RAWIO to read and access
		 * can't check it.
1951
		 */
1952 1953 1954
		if (filename__readable("/proc/kcore") &&
		    !validate_kcore_addresses("/proc/kallsyms", map))
			goto proc_kallsyms;
1955 1956
	}

1957 1958
	build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);

1959
	/* Find kallsyms in build-id cache with kcore */
1960 1961 1962
	scnprintf(path, sizeof(path), "%s/%s/%s",
		  buildid_dir, DSO__NAME_KCORE, sbuild_id);

1963 1964 1965
	if (!find_matching_kcore(map, path, sizeof(path)))
		return strdup(path);

1966 1967 1968 1969 1970 1971 1972
	/* Use current /proc/kallsyms if possible */
	if (is_host) {
proc_kallsyms:
		return strdup("/proc/kallsyms");
	}

	/* Finally, find a cache of kallsyms */
1973
	if (!build_id_cache__kallsyms_path(sbuild_id, path, sizeof(path))) {
1974 1975 1976 1977 1978 1979 1980 1981
		pr_err("No kallsyms or vmlinux with build-id %s was found\n",
		       sbuild_id);
		return NULL;
	}

	return strdup(path);
}

1982
static int dso__load_kernel_sym(struct dso *dso, struct map *map)
1983
{
1984
	int err;
1985 1986
	const char *kallsyms_filename = NULL;
	char *kallsyms_allocated_filename = NULL;
1987
	/*
1988 1989
	 * Step 1: if the user specified a kallsyms or vmlinux filename, use
	 * it and only it, reporting errors to the user if it cannot be used.
1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001
	 *
	 * For instance, try to analyse an ARM perf.data file _without_ a
	 * build-id, or if the user specifies the wrong path to the right
	 * vmlinux file, obviously we can't fallback to another vmlinux (a
	 * x86_86 one, on the machine where analysis is being performed, say),
	 * or worse, /proc/kallsyms.
	 *
	 * If the specified file _has_ a build-id and there is a build-id
	 * section in the perf.data file, we will still do the expected
	 * validation in dso__load_vmlinux and will bail out if they don't
	 * match.
	 */
2002 2003 2004 2005 2006
	if (symbol_conf.kallsyms_name != NULL) {
		kallsyms_filename = symbol_conf.kallsyms_name;
		goto do_kallsyms;
	}

2007
	if (!symbol_conf.ignore_vmlinux && symbol_conf.vmlinux_name != NULL) {
2008
		return dso__load_vmlinux(dso, map, symbol_conf.vmlinux_name, false);
2009
	}
2010

2011
	if (!symbol_conf.ignore_vmlinux && vmlinux_path != NULL) {
2012
		err = dso__load_vmlinux_path(dso, map);
2013
		if (err > 0)
2014
			return err;
2015 2016
	}

2017 2018 2019 2020
	/* do not try local files if a symfs was given */
	if (symbol_conf.symfs[0] != 0)
		return -1;

2021 2022 2023
	kallsyms_allocated_filename = dso__find_kallsyms(dso, map);
	if (!kallsyms_allocated_filename)
		return -1;
2024

2025
	kallsyms_filename = kallsyms_allocated_filename;
2026

2027
do_kallsyms:
2028
	err = dso__load_kallsyms(dso, kallsyms_filename, map);
2029 2030
	if (err > 0)
		pr_debug("Using %s for symbols\n", kallsyms_filename);
2031
	free(kallsyms_allocated_filename);
2032

2033
	if (err > 0 && !dso__is_kcore(dso)) {
2034
		dso->binary_type = DSO_BINARY_TYPE__KALLSYMS;
2035
		dso__set_long_name(dso, DSO__NAME_KALLSYMS, false);
2036 2037
		map__fixup_start(map);
		map__fixup_end(map);
2038
	}
2039

2040 2041 2042
	return err;
}

2043
static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map)
2044 2045 2046
{
	int err;
	const char *kallsyms_filename = NULL;
2047
	struct machine *machine;
2048 2049 2050 2051 2052 2053
	char path[PATH_MAX];

	if (!map->groups) {
		pr_debug("Guest kernel map hasn't the point to groups\n");
		return -1;
	}
2054
	machine = map->groups->machine;
2055

2056
	if (machine__is_default_guest(machine)) {
2057 2058 2059 2060 2061 2062
		/*
		 * if the user specified a vmlinux filename, use it and only
		 * it, reporting errors to the user if it cannot be used.
		 * Or use file guest_kallsyms inputted by user on commandline
		 */
		if (symbol_conf.default_guest_vmlinux_name != NULL) {
2063
			err = dso__load_vmlinux(dso, map,
2064
						symbol_conf.default_guest_vmlinux_name,
2065
						false);
2066
			return err;
2067 2068 2069 2070 2071 2072
		}

		kallsyms_filename = symbol_conf.default_guest_kallsyms;
		if (!kallsyms_filename)
			return -1;
	} else {
2073
		sprintf(path, "%s/proc/kallsyms", machine->root_dir);
2074 2075 2076
		kallsyms_filename = path;
	}

2077
	err = dso__load_kallsyms(dso, kallsyms_filename, map);
2078
	if (err > 0)
2079
		pr_debug("Using %s for symbols\n", kallsyms_filename);
2080
	if (err > 0 && !dso__is_kcore(dso)) {
2081
		dso->binary_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;
2082
		dso__set_long_name(dso, machine->mmap_name, false);
2083 2084 2085 2086 2087 2088
		map__fixup_start(map);
		map__fixup_end(map);
	}

	return err;
}
2089

2090 2091
static void vmlinux_path__exit(void)
{
2092 2093
	while (--vmlinux_path__nr_entries >= 0)
		zfree(&vmlinux_path[vmlinux_path__nr_entries]);
2094
	vmlinux_path__nr_entries = 0;
2095

2096
	zfree(&vmlinux_path);
2097 2098
}

2099 2100 2101 2102 2103 2104 2105 2106 2107
static const char * const vmlinux_paths[] = {
	"vmlinux",
	"/boot/vmlinux"
};

static const char * const vmlinux_paths_upd[] = {
	"/boot/vmlinux-%s",
	"/usr/lib/debug/boot/vmlinux-%s",
	"/lib/modules/%s/build/vmlinux",
2108 2109
	"/usr/lib/debug/lib/modules/%s/vmlinux",
	"/usr/lib/debug/boot/vmlinux-%s.debug"
2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121
};

static int vmlinux_path__add(const char *new_entry)
{
	vmlinux_path[vmlinux_path__nr_entries] = strdup(new_entry);
	if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
		return -1;
	++vmlinux_path__nr_entries;

	return 0;
}

2122
static int vmlinux_path__init(struct perf_env *env)
2123 2124 2125
{
	struct utsname uts;
	char bf[PATH_MAX];
2126
	char *kernel_version;
2127
	unsigned int i;
2128

2129 2130
	vmlinux_path = malloc(sizeof(char *) * (ARRAY_SIZE(vmlinux_paths) +
			      ARRAY_SIZE(vmlinux_paths_upd)));
2131 2132 2133
	if (vmlinux_path == NULL)
		return -1;

2134 2135 2136
	for (i = 0; i < ARRAY_SIZE(vmlinux_paths); i++)
		if (vmlinux_path__add(vmlinux_paths[i]) < 0)
			goto out_fail;
2137

2138
	/* only try kernel version if no symfs was given */
2139 2140 2141
	if (symbol_conf.symfs[0] != 0)
		return 0;

2142 2143 2144 2145 2146 2147 2148 2149
	if (env) {
		kernel_version = env->os_release;
	} else {
		if (uname(&uts) < 0)
			goto out_fail;

		kernel_version = uts.release;
	}
2150

2151 2152 2153 2154 2155
	for (i = 0; i < ARRAY_SIZE(vmlinux_paths_upd); i++) {
		snprintf(bf, sizeof(bf), vmlinux_paths_upd[i], kernel_version);
		if (vmlinux_path__add(bf) < 0)
			goto out_fail;
	}
2156 2157 2158 2159 2160 2161 2162 2163

	return 0;

out_fail:
	vmlinux_path__exit();
	return -1;
}

D
David Ahern 已提交
2164
int setup_list(struct strlist **list, const char *list_str,
2165 2166 2167 2168 2169
		      const char *list_name)
{
	if (list_str == NULL)
		return 0;

2170
	*list = strlist__new(list_str, NULL);
2171 2172 2173 2174
	if (!*list) {
		pr_err("problems parsing %s list\n", list_name);
		return -1;
	}
2175 2176

	symbol_conf.has_filter = true;
2177 2178 2179
	return 0;
}

2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193
int setup_intlist(struct intlist **list, const char *list_str,
		  const char *list_name)
{
	if (list_str == NULL)
		return 0;

	*list = intlist__new(list_str);
	if (!*list) {
		pr_err("problems parsing %s list\n", list_name);
		return -1;
	}
	return 0;
}

2194 2195 2196
static bool symbol__read_kptr_restrict(void)
{
	bool value = false;
2197
	FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r");
2198

2199 2200
	if (fp != NULL) {
		char line[8];
2201

2202
		if (fgets(line, sizeof(line), fp) != NULL)
2203 2204 2205
			value = perf_cap__capable(CAP_SYSLOG) ?
					(atoi(line) >= 2) :
					(atoi(line) != 0);
2206

2207
		fclose(fp);
2208 2209
	}

2210 2211 2212 2213 2214 2215
	/* Per kernel/kallsyms.c:
	 * we also restrict when perf_event_paranoid > 1 w/o CAP_SYSLOG
	 */
	if (perf_event_paranoid() > 1 && !perf_cap__capable(CAP_SYSLOG))
		value = true;

2216 2217 2218
	return value;
}

2219 2220
int symbol__annotation_init(void)
{
2221 2222 2223
	if (symbol_conf.init_annotation)
		return 0;

2224 2225 2226 2227 2228 2229 2230 2231 2232 2233
	if (symbol_conf.initialized) {
		pr_err("Annotation needs to be init before symbol__init()\n");
		return -1;
	}

	symbol_conf.priv_size += sizeof(struct annotation);
	symbol_conf.init_annotation = true;
	return 0;
}

2234
int symbol__init(struct perf_env *env)
2235
{
2236 2237
	const char *symfs;

2238 2239 2240
	if (symbol_conf.initialized)
		return 0;

2241
	symbol_conf.priv_size = PERF_ALIGN(symbol_conf.priv_size, sizeof(u64));
2242

2243 2244
	symbol__elf_init();

2245 2246 2247
	if (symbol_conf.sort_by_name)
		symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) -
					  sizeof(struct symbol));
2248

2249
	if (symbol_conf.try_vmlinux_path && vmlinux_path__init(env) < 0)
2250 2251
		return -1;

2252 2253 2254 2255 2256
	if (symbol_conf.field_sep && *symbol_conf.field_sep == '.') {
		pr_err("'.' is the only non valid --field-separator argument\n");
		return -1;
	}

2257 2258 2259 2260 2261 2262 2263 2264
	if (setup_list(&symbol_conf.dso_list,
		       symbol_conf.dso_list_str, "dso") < 0)
		return -1;

	if (setup_list(&symbol_conf.comm_list,
		       symbol_conf.comm_list_str, "comm") < 0)
		goto out_free_dso_list;

2265 2266 2267 2268 2269 2270 2271 2272
	if (setup_intlist(&symbol_conf.pid_list,
		       symbol_conf.pid_list_str, "pid") < 0)
		goto out_free_comm_list;

	if (setup_intlist(&symbol_conf.tid_list,
		       symbol_conf.tid_list_str, "tid") < 0)
		goto out_free_pid_list;

2273 2274
	if (setup_list(&symbol_conf.sym_list,
		       symbol_conf.sym_list_str, "symbol") < 0)
2275
		goto out_free_tid_list;
2276

2277 2278 2279 2280
	if (setup_list(&symbol_conf.bt_stop_list,
		       symbol_conf.bt_stop_list_str, "symbol") < 0)
		goto out_free_sym_list;

2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292
	/*
	 * A path to symbols of "/" is identical to ""
	 * reset here for simplicity.
	 */
	symfs = realpath(symbol_conf.symfs, NULL);
	if (symfs == NULL)
		symfs = symbol_conf.symfs;
	if (strcmp(symfs, "/") == 0)
		symbol_conf.symfs = "";
	if (symfs != symbol_conf.symfs)
		free((void *)symfs);

2293 2294
	symbol_conf.kptr_restrict = symbol__read_kptr_restrict();

2295
	symbol_conf.initialized = true;
2296
	return 0;
2297

2298 2299
out_free_sym_list:
	strlist__delete(symbol_conf.sym_list);
2300 2301 2302 2303
out_free_tid_list:
	intlist__delete(symbol_conf.tid_list);
out_free_pid_list:
	intlist__delete(symbol_conf.pid_list);
2304 2305
out_free_comm_list:
	strlist__delete(symbol_conf.comm_list);
2306 2307
out_free_dso_list:
	strlist__delete(symbol_conf.dso_list);
2308
	return -1;
2309 2310
}

2311 2312
void symbol__exit(void)
{
2313 2314
	if (!symbol_conf.initialized)
		return;
2315
	strlist__delete(symbol_conf.bt_stop_list);
2316 2317 2318
	strlist__delete(symbol_conf.sym_list);
	strlist__delete(symbol_conf.dso_list);
	strlist__delete(symbol_conf.comm_list);
2319 2320
	intlist__delete(symbol_conf.tid_list);
	intlist__delete(symbol_conf.pid_list);
2321 2322
	vmlinux_path__exit();
	symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL;
2323
	symbol_conf.bt_stop_list = NULL;
2324
	symbol_conf.initialized = false;
2325
}
2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348

int symbol__config_symfs(const struct option *opt __maybe_unused,
			 const char *dir, int unset __maybe_unused)
{
	char *bf = NULL;
	int ret;

	symbol_conf.symfs = strdup(dir);
	if (symbol_conf.symfs == NULL)
		return -ENOMEM;

	/* skip the locally configured cache if a symfs is given, and
	 * config buildid dir to symfs/.debug
	 */
	ret = asprintf(&bf, "%s/%s", dir, ".debug");
	if (ret < 0)
		return -ENOMEM;

	set_buildid_dir(bf);

	free(bf);
	return 0;
}
2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370

struct mem_info *mem_info__get(struct mem_info *mi)
{
	if (mi)
		refcount_inc(&mi->refcnt);
	return mi;
}

void mem_info__put(struct mem_info *mi)
{
	if (mi && refcount_dec_and_test(&mi->refcnt))
		free(mi);
}

struct mem_info *mem_info__new(void)
{
	struct mem_info *mi = zalloc(sizeof(*mi));

	if (mi)
		refcount_set(&mi->refcnt, 1);
	return mi;
}
2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392

struct block_info *block_info__get(struct block_info *bi)
{
	if (bi)
		refcount_inc(&bi->refcnt);
	return bi;
}

void block_info__put(struct block_info *bi)
{
	if (bi && refcount_dec_and_test(&bi->refcnt))
		free(bi);
}

struct block_info *block_info__new(void)
{
	struct block_info *bi = zalloc(sizeof(*bi));

	if (bi)
		refcount_set(&bi->refcnt, 1);
	return bi;
}