libbpf.c 161.9 KB
Newer Older
1
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2

3 4 5 6 7 8
/*
 * Common eBPF ELF object loading operations.
 *
 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
 * Copyright (C) 2015 Huawei Inc.
9
 * Copyright (C) 2017 Nicira, Inc.
10
 * Copyright (C) 2019 Isovalent, Inc.
11 12
 */

13
#ifndef _GNU_SOURCE
14
#define _GNU_SOURCE
15
#endif
16
#include <stdlib.h>
17 18
#include <stdio.h>
#include <stdarg.h>
19
#include <libgen.h>
20
#include <inttypes.h>
21
#include <string.h>
22
#include <unistd.h>
23
#include <endian.h>
24 25
#include <fcntl.h>
#include <errno.h>
26
#include <asm/unistd.h>
27
#include <linux/err.h>
28
#include <linux/kernel.h>
29
#include <linux/bpf.h>
30
#include <linux/btf.h>
31
#include <linux/filter.h>
32
#include <linux/list.h>
33
#include <linux/limits.h>
34
#include <linux/perf_event.h>
35
#include <linux/ring_buffer.h>
36
#include <linux/version.h>
A
Andrii Nakryiko 已提交
37
#include <sys/epoll.h>
38
#include <sys/ioctl.h>
A
Andrii Nakryiko 已提交
39
#include <sys/mman.h>
40 41 42
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/vfs.h>
43
#include <sys/utsname.h>
44
#include <tools/libc_compat.h>
45 46
#include <libelf.h>
#include <gelf.h>
47 48

#include "libbpf.h"
49
#include "bpf.h"
50
#include "btf.h"
51
#include "str_error.h"
52
#include "libbpf_internal.h"
53
#include "hashmap.h"
54

55 56 57 58
#ifndef EM_BPF
#define EM_BPF 247
#endif

59 60 61 62
#ifndef BPF_FS_MAGIC
#define BPF_FS_MAGIC		0xcafe4a11
#endif

63 64 65 66 67
/* vsprintf() in __base_pr() uses nonliteral format string. It may break
 * compilation if user enables corresponding warning. Disable it explicitly.
 */
#pragma GCC diagnostic ignored "-Wformat-nonliteral"

68 69
#define __printf(a, b)	__attribute__((format(printf, a, b)))

S
Stanislav Fomichev 已提交
70 71
static int __base_pr(enum libbpf_print_level level, const char *format,
		     va_list args)
72
{
73 74 75
	if (level == LIBBPF_DEBUG)
		return 0;

S
Stanislav Fomichev 已提交
76
	return vfprintf(stderr, format, args);
77 78
}

S
Stanislav Fomichev 已提交
79
static libbpf_print_fn_t __libbpf_pr = __base_pr;
80

81
libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn)
82
{
83 84
	libbpf_print_fn_t old_print_fn = __libbpf_pr;

85
	__libbpf_pr = fn;
86
	return old_print_fn;
87
}
88

89 90 91 92 93
__printf(2, 3)
void libbpf_print(enum libbpf_print_level level, const char *format, ...)
{
	va_list args;

94 95 96
	if (!__libbpf_pr)
		return;

97
	va_start(args, format);
98
	__libbpf_pr(level, format, args);
99 100 101
	va_end(args);
}

102 103 104 105 106 107
#define STRERR_BUFSIZE  128

#define CHECK_ERR(action, err, out) do {	\
	err = action;			\
	if (err)			\
		goto out;		\
108
} while (0)
109 110


111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130
/* Copied from tools/perf/util/util.h */
#ifndef zfree
# define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
#endif

#ifndef zclose
# define zclose(fd) ({			\
	int ___err = 0;			\
	if ((fd) >= 0)			\
		___err = close((fd));	\
	fd = -1;			\
	___err; })
#endif

#ifdef HAVE_LIBELF_MMAP_SUPPORT
# define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP
#else
# define LIBBPF_ELF_C_READ_MMAP ELF_C_READ
#endif

131 132 133 134 135
static inline __u64 ptr_to_u64(const void *ptr)
{
	return (__u64) (unsigned long) ptr;
}

136 137 138
struct bpf_capabilities {
	/* v4.14: kernel support for program & map names. */
	__u32 name:1;
139 140
	/* v5.2: kernel support for global data sections. */
	__u32 global_data:1;
141 142 143 144
	/* BTF_KIND_FUNC and BTF_KIND_FUNC_PROTO support */
	__u32 btf_func:1;
	/* BTF_KIND_VAR and BTF_KIND_DATASEC support */
	__u32 btf_datasec:1;
145 146
	/* BPF_F_MMAPABLE is supported for arrays */
	__u32 array_mmap:1;
147 148
};

149 150 151 152 153 154 155
/*
 * bpf_prog should be a better name but it has been used in
 * linux/filter.h.
 */
struct bpf_program {
	/* Index in elf obj file, for relocation use. */
	int idx;
156
	char *name;
157
	int prog_ifindex;
158
	char *section_name;
S
Stanislav Fomichev 已提交
159 160 161 162
	/* section_name with / replaced by _; makes recursive pinning
	 * in bpf_object__pin_programs easier
	 */
	char *pin_name;
163
	struct bpf_insn *insns;
164
	size_t insns_cnt, main_prog_cnt;
165
	enum bpf_prog_type type;
166

167 168 169 170
	struct reloc_desc {
		enum {
			RELO_LD64,
			RELO_CALL,
171
			RELO_DATA,
172
		} type;
173
		int insn_idx;
174 175
		int map_idx;
		int sym_off;
176 177
	} *reloc_desc;
	int nr_reloc;
178
	int log_level;
179

180 181 182 183 184
	struct {
		int nr;
		int *fds;
	} instances;
	bpf_program_prep_t preprocessor;
185 186 187 188

	struct bpf_object *obj;
	void *priv;
	bpf_program_clear_priv_t clear_priv;
189 190

	enum bpf_attach_type expected_attach_type;
191
	__u32 attach_btf_id;
192
	__u32 attach_prog_fd;
193 194
	void *func_info;
	__u32 func_info_rec_size;
195
	__u32 func_info_cnt;
196 197

	struct bpf_capabilities *caps;
198 199 200 201

	void *line_info;
	__u32 line_info_rec_size;
	__u32 line_info_cnt;
202
	__u32 prog_flags;
203 204
};

205 206 207 208 209 210 211 212 213 214 215 216 217
enum libbpf_map_type {
	LIBBPF_MAP_UNSPEC,
	LIBBPF_MAP_DATA,
	LIBBPF_MAP_BSS,
	LIBBPF_MAP_RODATA,
};

static const char * const libbpf_type_to_btf_name[] = {
	[LIBBPF_MAP_DATA]	= ".data",
	[LIBBPF_MAP_BSS]	= ".bss",
	[LIBBPF_MAP_RODATA]	= ".rodata",
};

218 219
struct bpf_map {
	int fd;
220
	char *name;
221 222
	int sec_idx;
	size_t sec_offset;
223
	int map_ifindex;
224
	int inner_map_fd;
225
	struct bpf_map_def def;
226 227
	__u32 btf_key_type_id;
	__u32 btf_value_type_id;
228 229
	void *priv;
	bpf_map_clear_priv_t clear_priv;
230
	enum libbpf_map_type libbpf_type;
231 232
	char *pin_path;
	bool pinned;
233
	bool reused;
234 235 236 237 238
};

struct bpf_secdata {
	void *rodata;
	void *data;
239 240
};

241 242
static LIST_HEAD(bpf_objects_list);

243
struct bpf_object {
244
	char name[BPF_OBJ_NAME_LEN];
245
	char license[64];
246
	__u32 kern_version;
247

248 249
	struct bpf_program *programs;
	size_t nr_programs;
250 251
	struct bpf_map *maps;
	size_t nr_maps;
252
	size_t maps_cap;
253
	struct bpf_secdata sections;
254

255
	bool loaded;
256
	bool has_pseudo_calls;
257
	bool relaxed_core_relocs;
258

259 260 261 262 263 264
	/*
	 * Information when doing elf related work. Only valid if fd
	 * is valid.
	 */
	struct {
		int fd;
265
		const void *obj_buf;
266
		size_t obj_buf_sz;
267 268
		Elf *elf;
		GElf_Ehdr ehdr;
269
		Elf_Data *symbols;
270 271 272
		Elf_Data *data;
		Elf_Data *rodata;
		Elf_Data *bss;
273
		size_t strtabidx;
274 275 276
		struct {
			GElf_Shdr shdr;
			Elf_Data *data;
277 278
		} *reloc_sects;
		int nr_reloc_sects;
279
		int maps_shndx;
280
		int btf_maps_shndx;
281
		int text_shndx;
282 283 284
		int data_shndx;
		int rodata_shndx;
		int bss_shndx;
285
	} efile;
286 287 288 289 290 291
	/*
	 * All loaded bpf_object is linked in a list, which is
	 * hidden to caller. bpf_objects__<func> handlers deal with
	 * all objects.
	 */
	struct list_head list;
292

293
	struct btf *btf;
294
	struct btf_ext *btf_ext;
295

296 297 298
	void *priv;
	bpf_object_clear_priv_t clear_priv;

299 300
	struct bpf_capabilities caps;

301 302 303 304
	char path[];
};
#define obj_elf_valid(o)	((o)->efile.elf)

305
void bpf_program__unload(struct bpf_program *prog)
306
{
307 308
	int i;

309 310 311
	if (!prog)
		return;

312 313 314 315 316 317 318 319
	/*
	 * If the object is opened but the program was never loaded,
	 * it is possible that prog->instances.nr == -1.
	 */
	if (prog->instances.nr > 0) {
		for (i = 0; i < prog->instances.nr; i++)
			zclose(prog->instances.fds[i]);
	} else if (prog->instances.nr != -1) {
320 321
		pr_warn("Internal error: instances.nr is %d\n",
			prog->instances.nr);
322 323 324 325
	}

	prog->instances.nr = -1;
	zfree(&prog->instances.fds);
326 327

	zfree(&prog->func_info);
328
	zfree(&prog->line_info);
329 330
}

331 332 333 334 335
static void bpf_program__exit(struct bpf_program *prog)
{
	if (!prog)
		return;

336 337 338 339 340 341
	if (prog->clear_priv)
		prog->clear_priv(prog, prog->priv);

	prog->priv = NULL;
	prog->clear_priv = NULL;

342
	bpf_program__unload(prog);
343
	zfree(&prog->name);
344
	zfree(&prog->section_name);
S
Stanislav Fomichev 已提交
345
	zfree(&prog->pin_name);
346
	zfree(&prog->insns);
347 348 349
	zfree(&prog->reloc_desc);

	prog->nr_reloc = 0;
350 351 352 353
	prog->insns_cnt = 0;
	prog->idx = -1;
}

S
Stanislav Fomichev 已提交
354 355 356 357 358 359 360 361 362 363 364
static char *__bpf_program__pin_name(struct bpf_program *prog)
{
	char *name, *p;

	name = p = strdup(prog->section_name);
	while ((p = strchr(p, '/')))
		*p = '_';

	return name;
}

365
static int
366 367
bpf_program__init(void *data, size_t size, char *section_name, int idx,
		  struct bpf_program *prog)
368
{
369 370 371
	const size_t bpf_insn_sz = sizeof(struct bpf_insn);

	if (size == 0 || size % bpf_insn_sz) {
372 373
		pr_warn("corrupted section '%s', size: %zu\n",
			section_name, size);
374 375 376
		return -EINVAL;
	}

377
	memset(prog, 0, sizeof(*prog));
378

379
	prog->section_name = strdup(section_name);
380
	if (!prog->section_name) {
381 382
		pr_warn("failed to alloc name for prog under section(%d) %s\n",
			idx, section_name);
383 384 385
		goto errout;
	}

S
Stanislav Fomichev 已提交
386 387
	prog->pin_name = __bpf_program__pin_name(prog);
	if (!prog->pin_name) {
388 389
		pr_warn("failed to alloc pin name for prog under section(%d) %s\n",
			idx, section_name);
S
Stanislav Fomichev 已提交
390 391 392
		goto errout;
	}

393 394
	prog->insns = malloc(size);
	if (!prog->insns) {
395 396
		pr_warn("failed to alloc insns for prog under section %s\n",
			section_name);
397 398
		goto errout;
	}
399 400
	prog->insns_cnt = size / bpf_insn_sz;
	memcpy(prog->insns, data, size);
401
	prog->idx = idx;
402 403
	prog->instances.fds = NULL;
	prog->instances.nr = -1;
404
	prog->type = BPF_PROG_TYPE_UNSPEC;
405 406 407 408 409 410 411 412 413

	return 0;
errout:
	bpf_program__exit(prog);
	return -ENOMEM;
}

static int
bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
414
			char *section_name, int idx)
415 416 417 418
{
	struct bpf_program prog, *progs;
	int nr_progs, err;

419
	err = bpf_program__init(data, size, section_name, idx, &prog);
420 421 422
	if (err)
		return err;

423
	prog.caps = &obj->caps;
424 425 426
	progs = obj->programs;
	nr_progs = obj->nr_programs;

427
	progs = reallocarray(progs, nr_progs + 1, sizeof(progs[0]));
428 429 430 431 432 433
	if (!progs) {
		/*
		 * In this case the original obj->programs
		 * is still valid, so don't need special treat for
		 * bpf_close_object().
		 */
434 435
		pr_warn("failed to alloc a new program under section '%s'\n",
			section_name);
436 437 438 439 440 441 442
		bpf_program__exit(&prog);
		return -ENOMEM;
	}

	pr_debug("found program %s\n", prog.section_name);
	obj->programs = progs;
	obj->nr_programs = nr_progs + 1;
443
	prog.obj = obj;
444 445 446 447
	progs[nr_progs] = prog;
	return 0;
}

448 449 450 451 452 453 454 455
static int
bpf_object__init_prog_names(struct bpf_object *obj)
{
	Elf_Data *symbols = obj->efile.symbols;
	struct bpf_program *prog;
	size_t pi, si;

	for (pi = 0; pi < obj->nr_programs; pi++) {
456
		const char *name = NULL;
457 458 459 460 461 462 463 464 465 466 467

		prog = &obj->programs[pi];

		for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name;
		     si++) {
			GElf_Sym sym;

			if (!gelf_getsym(symbols, si, &sym))
				continue;
			if (sym.st_shndx != prog->idx)
				continue;
468 469
			if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL)
				continue;
470 471 472 473 474

			name = elf_strptr(obj->efile.elf,
					  obj->efile.strtabidx,
					  sym.st_name);
			if (!name) {
475 476
				pr_warn("failed to get sym name string for prog %s\n",
					prog->section_name);
477 478 479 480
				return -LIBBPF_ERRNO__LIBELF;
			}
		}

481 482 483
		if (!name && prog->idx == obj->efile.text_shndx)
			name = ".text";

484
		if (!name) {
485 486
			pr_warn("failed to find sym for prog %s\n",
				prog->section_name);
487 488
			return -EINVAL;
		}
489

490 491
		prog->name = strdup(name);
		if (!prog->name) {
492 493
			pr_warn("failed to allocate memory for prog sym %s\n",
				name);
494 495 496 497 498 499 500
			return -ENOMEM;
		}
	}

	return 0;
}

501 502 503 504 505 506 507 508 509 510 511
static __u32 get_kernel_version(void)
{
	__u32 major, minor, patch;
	struct utsname info;

	uname(&info);
	if (sscanf(info.release, "%u.%u.%u", &major, &minor, &patch) != 3)
		return 0;
	return KERNEL_VERSION(major, minor, patch);
}

512
static struct bpf_object *bpf_object__new(const char *path,
513
					  const void *obj_buf,
514 515
					  size_t obj_buf_sz,
					  const char *obj_name)
516 517
{
	struct bpf_object *obj;
518
	char *end;
519 520 521

	obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
	if (!obj) {
522
		pr_warn("alloc memory failed for %s\n", path);
523
		return ERR_PTR(-ENOMEM);
524 525 526
	}

	strcpy(obj->path, path);
527 528 529 530 531 532 533 534 535 536 537
	if (obj_name) {
		strncpy(obj->name, obj_name, sizeof(obj->name) - 1);
		obj->name[sizeof(obj->name) - 1] = 0;
	} else {
		/* Using basename() GNU version which doesn't modify arg. */
		strncpy(obj->name, basename((void *)path),
			sizeof(obj->name) - 1);
		end = strchr(obj->name, '.');
		if (end)
			*end = 0;
	}
538

539
	obj->efile.fd = -1;
540
	/*
541
	 * Caller of this function should also call
542 543 544 545 546 547
	 * bpf_object__elf_finish() after data collection to return
	 * obj_buf to user. If not, we should duplicate the buffer to
	 * avoid user freeing them before elf finish.
	 */
	obj->efile.obj_buf = obj_buf;
	obj->efile.obj_buf_sz = obj_buf_sz;
548
	obj->efile.maps_shndx = -1;
549
	obj->efile.btf_maps_shndx = -1;
550 551 552
	obj->efile.data_shndx = -1;
	obj->efile.rodata_shndx = -1;
	obj->efile.bss_shndx = -1;
553

554
	obj->kern_version = get_kernel_version();
555
	obj->loaded = false;
556 557 558

	INIT_LIST_HEAD(&obj->list);
	list_add(&obj->list, &bpf_objects_list);
559 560 561 562 563 564 565 566 567 568 569 570
	return obj;
}

static void bpf_object__elf_finish(struct bpf_object *obj)
{
	if (!obj_elf_valid(obj))
		return;

	if (obj->efile.elf) {
		elf_end(obj->efile.elf);
		obj->efile.elf = NULL;
	}
571
	obj->efile.symbols = NULL;
572 573 574
	obj->efile.data = NULL;
	obj->efile.rodata = NULL;
	obj->efile.bss = NULL;
575

576 577
	zfree(&obj->efile.reloc_sects);
	obj->efile.nr_reloc_sects = 0;
578
	zclose(obj->efile.fd);
579 580
	obj->efile.obj_buf = NULL;
	obj->efile.obj_buf_sz = 0;
581 582 583 584 585 586 587 588
}

static int bpf_object__elf_init(struct bpf_object *obj)
{
	int err = 0;
	GElf_Ehdr *ep;

	if (obj_elf_valid(obj)) {
589
		pr_warn("elf init: internal error\n");
590
		return -LIBBPF_ERRNO__LIBELF;
591 592
	}

593 594 595 596 597
	if (obj->efile.obj_buf_sz > 0) {
		/*
		 * obj_buf should have been validated by
		 * bpf_object__open_buffer().
		 */
598
		obj->efile.elf = elf_memory((char *)obj->efile.obj_buf,
599 600 601 602
					    obj->efile.obj_buf_sz);
	} else {
		obj->efile.fd = open(obj->path, O_RDONLY);
		if (obj->efile.fd < 0) {
603
			char errmsg[STRERR_BUFSIZE], *cp;
604

605 606
			err = -errno;
			cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
607
			pr_warn("failed to open %s: %s\n", obj->path, cp);
608
			return err;
609 610 611
		}

		obj->efile.elf = elf_begin(obj->efile.fd,
612
					   LIBBPF_ELF_C_READ_MMAP, NULL);
613 614 615
	}

	if (!obj->efile.elf) {
616
		pr_warn("failed to open %s as ELF file\n", obj->path);
617
		err = -LIBBPF_ERRNO__LIBELF;
618 619 620 621
		goto errout;
	}

	if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
622
		pr_warn("failed to get EHDR from %s\n", obj->path);
623
		err = -LIBBPF_ERRNO__FORMAT;
624 625 626 627
		goto errout;
	}
	ep = &obj->efile.ehdr;

628
	/* Old LLVM set e_machine to EM_NONE */
629 630
	if (ep->e_type != ET_REL ||
	    (ep->e_machine && ep->e_machine != EM_BPF)) {
631
		pr_warn("%s is not an eBPF object file\n", obj->path);
632
		err = -LIBBPF_ERRNO__FORMAT;
633 634 635 636 637 638 639 640 641
		goto errout;
	}

	return 0;
errout:
	bpf_object__elf_finish(obj);
	return err;
}

642
static int bpf_object__check_endianness(struct bpf_object *obj)
643
{
644
#if __BYTE_ORDER == __LITTLE_ENDIAN
645 646
	if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2LSB)
		return 0;
647
#elif __BYTE_ORDER == __BIG_ENDIAN
648 649 650 651 652
	if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2MSB)
		return 0;
#else
# error "Unrecognized __BYTE_ORDER__"
#endif
653
	pr_warn("endianness mismatch.\n");
654
	return -LIBBPF_ERRNO__ENDIAN;
655 656
}

657
static int
658
bpf_object__init_license(struct bpf_object *obj, void *data, size_t size)
659
{
660
	memcpy(obj->license, data, min(size, sizeof(obj->license) - 1));
661 662 663 664
	pr_debug("license of %s is %s\n", obj->path, obj->license);
	return 0;
}

665 666 667 668 669 670
static int
bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size)
{
	__u32 kver;

	if (size != sizeof(kver)) {
671
		pr_warn("invalid kver section in %s\n", obj->path);
672 673 674 675 676 677 678 679
		return -LIBBPF_ERRNO__FORMAT;
	}
	memcpy(&kver, data, sizeof(kver));
	obj->kern_version = kver;
	pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version);
	return 0;
}

E
Eric Leblond 已提交
680 681 682 683
static int compare_bpf_map(const void *_a, const void *_b)
{
	const struct bpf_map *a = _a;
	const struct bpf_map *b = _b;
684

685 686 687
	if (a->sec_idx != b->sec_idx)
		return a->sec_idx - b->sec_idx;
	return a->sec_offset - b->sec_offset;
688 689
}

690 691 692 693 694 695 696 697
static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
{
	if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
	    type == BPF_MAP_TYPE_HASH_OF_MAPS)
		return true;
	return false;
}

698 699 700 701 702 703 704 705 706 707 708 709 710 711 712
static int bpf_object_search_section_size(const struct bpf_object *obj,
					  const char *name, size_t *d_size)
{
	const GElf_Ehdr *ep = &obj->efile.ehdr;
	Elf *elf = obj->efile.elf;
	Elf_Scn *scn = NULL;
	int idx = 0;

	while ((scn = elf_nextscn(elf, scn)) != NULL) {
		const char *sec_name;
		Elf_Data *data;
		GElf_Shdr sh;

		idx++;
		if (gelf_getshdr(scn, &sh) != &sh) {
713 714
			pr_warn("failed to get section(%d) header from %s\n",
				idx, obj->path);
715 716 717 718 719
			return -EIO;
		}

		sec_name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
		if (!sec_name) {
720 721
			pr_warn("failed to get section(%d) name from %s\n",
				idx, obj->path);
722 723 724 725 726 727 728 729
			return -EIO;
		}

		if (strcmp(name, sec_name))
			continue;

		data = elf_getdata(scn, 0);
		if (!data) {
730 731
			pr_warn("failed to get section(%d) data from %s(%s)\n",
				idx, name, obj->path);
732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790
			return -EIO;
		}

		*d_size = data->d_size;
		return 0;
	}

	return -ENOENT;
}

int bpf_object__section_size(const struct bpf_object *obj, const char *name,
			     __u32 *size)
{
	int ret = -ENOENT;
	size_t d_size;

	*size = 0;
	if (!name) {
		return -EINVAL;
	} else if (!strcmp(name, ".data")) {
		if (obj->efile.data)
			*size = obj->efile.data->d_size;
	} else if (!strcmp(name, ".bss")) {
		if (obj->efile.bss)
			*size = obj->efile.bss->d_size;
	} else if (!strcmp(name, ".rodata")) {
		if (obj->efile.rodata)
			*size = obj->efile.rodata->d_size;
	} else {
		ret = bpf_object_search_section_size(obj, name, &d_size);
		if (!ret)
			*size = d_size;
	}

	return *size ? 0 : ret;
}

int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
				__u32 *off)
{
	Elf_Data *symbols = obj->efile.symbols;
	const char *sname;
	size_t si;

	if (!name || !off)
		return -EINVAL;

	for (si = 0; si < symbols->d_size / sizeof(GElf_Sym); si++) {
		GElf_Sym sym;

		if (!gelf_getsym(symbols, si, &sym))
			continue;
		if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
		    GELF_ST_TYPE(sym.st_info) != STT_OBJECT)
			continue;

		sname = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
				   sym.st_name);
		if (!sname) {
791 792
			pr_warn("failed to get sym name string for var %s\n",
				name);
793 794 795 796 797 798 799 800 801 802 803
			return -EIO;
		}
		if (strcmp(name, sname) == 0) {
			*off = sym.st_value;
			return 0;
		}
	}

	return -ENOENT;
}

804
static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
805
{
806 807 808 809 810 811 812
	struct bpf_map *new_maps;
	size_t new_cap;
	int i;

	if (obj->nr_maps < obj->maps_cap)
		return &obj->maps[obj->nr_maps++];

813
	new_cap = max((size_t)4, obj->maps_cap * 3 / 2);
814 815
	new_maps = realloc(obj->maps, new_cap * sizeof(*obj->maps));
	if (!new_maps) {
816
		pr_warn("alloc maps for object failed\n");
817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835
		return ERR_PTR(-ENOMEM);
	}

	obj->maps_cap = new_cap;
	obj->maps = new_maps;

	/* zero out new maps */
	memset(obj->maps + obj->nr_maps, 0,
	       (obj->maps_cap - obj->nr_maps) * sizeof(*obj->maps));
	/*
	 * fill all fd with -1 so won't close incorrect fd (fd=0 is stdin)
	 * when failure (zclose won't close negative fd)).
	 */
	for (i = obj->nr_maps; i < obj->maps_cap; i++) {
		obj->maps[i].fd = -1;
		obj->maps[i].inner_map_fd = -1;
	}

	return &obj->maps[obj->nr_maps++];
836 837 838
}

static int
839
bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
840
			      int sec_idx, Elf_Data *data, void **data_buff)
841 842
{
	char map_name[BPF_OBJ_NAME_LEN];
843 844 845 846 847 848
	struct bpf_map_def *def;
	struct bpf_map *map;

	map = bpf_object__add_map(obj);
	if (IS_ERR(map))
		return PTR_ERR(map);
849 850

	map->libbpf_type = type;
851 852
	map->sec_idx = sec_idx;
	map->sec_offset = 0;
853 854 855 856
	snprintf(map_name, sizeof(map_name), "%.8s%.7s", obj->name,
		 libbpf_type_to_btf_name[type]);
	map->name = strdup(map_name);
	if (!map->name) {
857
		pr_warn("failed to alloc map name\n");
858 859 860
		return -ENOMEM;
	}

861
	def = &map->def;
862 863 864 865
	def->type = BPF_MAP_TYPE_ARRAY;
	def->key_size = sizeof(int);
	def->value_size = data->d_size;
	def->max_entries = 1;
866
	def->map_flags = type == LIBBPF_MAP_RODATA ? BPF_F_RDONLY_PROG : 0;
867 868 869 870 871 872
	if (obj->caps.array_mmap)
		def->map_flags |= BPF_F_MMAPABLE;

	pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n",
		 map_name, map->sec_idx, map->sec_offset, def->map_flags);

873 874 875 876
	if (data_buff) {
		*data_buff = malloc(data->d_size);
		if (!*data_buff) {
			zfree(&map->name);
877
			pr_warn("failed to alloc map content buffer\n");
878 879 880 881 882
			return -ENOMEM;
		}
		memcpy(*data_buff, data->d_buf, data->d_size);
	}

883
	pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name);
884 885 886
	return 0;
}

887 888 889 890 891 892 893 894 895 896 897
static int bpf_object__init_global_data_maps(struct bpf_object *obj)
{
	int err;

	if (!obj->caps.global_data)
		return 0;
	/*
	 * Populate obj->maps with libbpf internal maps.
	 */
	if (obj->efile.data_shndx >= 0) {
		err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA,
898
						    obj->efile.data_shndx,
899 900 901 902 903 904 905
						    obj->efile.data,
						    &obj->sections.data);
		if (err)
			return err;
	}
	if (obj->efile.rodata_shndx >= 0) {
		err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA,
906
						    obj->efile.rodata_shndx,
907 908 909 910 911 912 913
						    obj->efile.rodata,
						    &obj->sections.rodata);
		if (err)
			return err;
	}
	if (obj->efile.bss_shndx >= 0) {
		err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS,
914
						    obj->efile.bss_shndx,
915 916 917 918 919 920 921 922
						    obj->efile.bss, NULL);
		if (err)
			return err;
	}
	return 0;
}

static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
923 924
{
	Elf_Data *symbols = obj->efile.symbols;
925
	int i, map_def_sz = 0, nr_maps = 0, nr_syms;
926
	Elf_Data *data = NULL;
927 928 929 930
	Elf_Scn *scn;

	if (obj->efile.maps_shndx < 0)
		return 0;
931

E
Eric Leblond 已提交
932 933 934
	if (!symbols)
		return -EINVAL;

935 936 937 938
	scn = elf_getscn(obj->efile.elf, obj->efile.maps_shndx);
	if (scn)
		data = elf_getdata(scn, NULL);
	if (!scn || !data) {
939 940
		pr_warn("failed to get Elf_Data from map section %d\n",
			obj->efile.maps_shndx);
941
		return -EINVAL;
E
Eric Leblond 已提交
942
	}
943

E
Eric Leblond 已提交
944 945 946 947 948 949 950
	/*
	 * Count number of maps. Each map has a name.
	 * Array of maps is not supported: only the first element is
	 * considered.
	 *
	 * TODO: Detect array of map and report error.
	 */
951 952
	nr_syms = symbols->d_size / sizeof(GElf_Sym);
	for (i = 0; i < nr_syms; i++) {
953
		GElf_Sym sym;
E
Eric Leblond 已提交
954 955 956 957 958 959 960

		if (!gelf_getsym(symbols, i, &sym))
			continue;
		if (sym.st_shndx != obj->efile.maps_shndx)
			continue;
		nr_maps++;
	}
961
	/* Assume equally sized map definitions */
962 963 964
	pr_debug("maps in %s: %d maps in %zd bytes\n",
		 obj->path, nr_maps, data->d_size);

965
	if (!data->d_size || nr_maps == 0 || (data->d_size % nr_maps) != 0) {
966
		pr_warn("unable to determine map definition size section %s, %d maps in %zd bytes\n",
967
			obj->path, nr_maps, data->d_size);
968
		return -EINVAL;
969
	}
970
	map_def_sz = data->d_size / nr_maps;
E
Eric Leblond 已提交
971

972 973
	/* Fill obj->maps using data in "maps" section.  */
	for (i = 0; i < nr_syms; i++) {
E
Eric Leblond 已提交
974
		GElf_Sym sym;
975
		const char *map_name;
E
Eric Leblond 已提交
976
		struct bpf_map_def *def;
977
		struct bpf_map *map;
978 979 980

		if (!gelf_getsym(symbols, i, &sym))
			continue;
981
		if (sym.st_shndx != obj->efile.maps_shndx)
982 983
			continue;

984 985 986 987 988
		map = bpf_object__add_map(obj);
		if (IS_ERR(map))
			return PTR_ERR(map);

		map_name = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
989
				      sym.st_name);
990
		if (!map_name) {
991 992
			pr_warn("failed to get map #%d name sym string for obj %s\n",
				i, obj->path);
993 994
			return -LIBBPF_ERRNO__FORMAT;
		}
995

996
		map->libbpf_type = LIBBPF_MAP_UNSPEC;
997 998 999 1000
		map->sec_idx = sym.st_shndx;
		map->sec_offset = sym.st_value;
		pr_debug("map '%s' (legacy): at sec_idx %d, offset %zu.\n",
			 map_name, map->sec_idx, map->sec_offset);
1001
		if (sym.st_value + map_def_sz > data->d_size) {
1002 1003
			pr_warn("corrupted maps section in %s: last map \"%s\" too small\n",
				obj->path, map_name);
E
Eric Leblond 已提交
1004
			return -EINVAL;
1005
		}
E
Eric Leblond 已提交
1006

1007 1008
		map->name = strdup(map_name);
		if (!map->name) {
1009
			pr_warn("failed to alloc map name\n");
1010 1011
			return -ENOMEM;
		}
1012
		pr_debug("map %d is \"%s\"\n", i, map->name);
E
Eric Leblond 已提交
1013
		def = (struct bpf_map_def *)(data->d_buf + sym.st_value);
1014 1015 1016 1017 1018 1019 1020
		/*
		 * If the definition of the map in the object file fits in
		 * bpf_map_def, copy it.  Any extra fields in our version
		 * of bpf_map_def will default to zero as a result of the
		 * calloc above.
		 */
		if (map_def_sz <= sizeof(struct bpf_map_def)) {
1021
			memcpy(&map->def, def, map_def_sz);
1022 1023 1024 1025 1026 1027 1028 1029
		} else {
			/*
			 * Here the map structure being read is bigger than what
			 * we expect, truncate if the excess bits are all zero.
			 * If they are not zero, reject this map as
			 * incompatible.
			 */
			char *b;
1030

1031 1032 1033
			for (b = ((char *)def) + sizeof(struct bpf_map_def);
			     b < ((char *)def) + map_def_sz; b++) {
				if (*b != 0) {
1034
					pr_warn("maps section in %s: \"%s\" has unrecognized, non-zero options\n",
1035
						obj->path, map_name);
1036 1037
					if (strict)
						return -EINVAL;
1038 1039
				}
			}
1040
			memcpy(&map->def, def, sizeof(struct bpf_map_def));
1041
		}
1042
	}
1043 1044
	return 0;
}
E
Eric Leblond 已提交
1045

1046 1047
static const struct btf_type *
skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id)
1048 1049
{
	const struct btf_type *t = btf__type_by_id(btf, id);
1050

1051 1052 1053 1054 1055 1056 1057
	if (res_id)
		*res_id = id;

	while (btf_is_mod(t) || btf_is_typedef(t)) {
		if (res_id)
			*res_id = t->type;
		t = btf__type_by_id(btf, t->type);
1058
	}
1059 1060

	return t;
1061 1062
}

1063 1064 1065 1066 1067 1068 1069 1070
/*
 * Fetch integer attribute of BTF map definition. Such attributes are
 * represented using a pointer to an array, in which dimensionality of array
 * encodes specified integer value. E.g., int (*type)[BPF_MAP_TYPE_ARRAY];
 * encodes `type => BPF_MAP_TYPE_ARRAY` key/value pair completely using BTF
 * type definition, while using only sizeof(void *) space in ELF data section.
 */
static bool get_map_field_int(const char *map_name, const struct btf *btf,
1071
			      const struct btf_type *def,
1072 1073
			      const struct btf_member *m, __u32 *res)
{
1074
	const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
1075
	const char *name = btf__name_by_offset(btf, m->name_off);
1076 1077
	const struct btf_array *arr_info;
	const struct btf_type *arr_t;
1078

1079
	if (!btf_is_ptr(t)) {
1080 1081
		pr_warn("map '%s': attr '%s': expected PTR, got %u.\n",
			map_name, name, btf_kind(t));
1082 1083
		return false;
	}
1084 1085 1086

	arr_t = btf__type_by_id(btf, t->type);
	if (!arr_t) {
1087 1088
		pr_warn("map '%s': attr '%s': type [%u] not found.\n",
			map_name, name, t->type);
1089 1090
		return false;
	}
1091
	if (!btf_is_array(arr_t)) {
1092 1093
		pr_warn("map '%s': attr '%s': expected ARRAY, got %u.\n",
			map_name, name, btf_kind(arr_t));
1094 1095
		return false;
	}
1096
	arr_info = btf_array(arr_t);
1097
	*res = arr_info->nelems;
1098 1099 1100
	return true;
}

1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121
static int build_map_pin_path(struct bpf_map *map, const char *path)
{
	char buf[PATH_MAX];
	int err, len;

	if (!path)
		path = "/sys/fs/bpf";

	len = snprintf(buf, PATH_MAX, "%s/%s", path, bpf_map__name(map));
	if (len < 0)
		return -EINVAL;
	else if (len >= PATH_MAX)
		return -ENAMETOOLONG;

	err = bpf_map__set_pin_path(map, buf);
	if (err)
		return err;

	return 0;
}

1122 1123 1124
static int bpf_object__init_user_btf_map(struct bpf_object *obj,
					 const struct btf_type *sec,
					 int var_idx, int sec_idx,
1125 1126
					 const Elf_Data *data, bool strict,
					 const char *pin_root_path)
1127 1128 1129 1130 1131 1132 1133 1134 1135
{
	const struct btf_type *var, *def, *t;
	const struct btf_var_secinfo *vi;
	const struct btf_var *var_extra;
	const struct btf_member *m;
	const char *map_name;
	struct bpf_map *map;
	int vlen, i;

1136
	vi = btf_var_secinfos(sec) + var_idx;
1137
	var = btf__type_by_id(obj->btf, vi->type);
1138
	var_extra = btf_var(var);
1139
	map_name = btf__name_by_offset(obj->btf, var->name_off);
1140
	vlen = btf_vlen(var);
1141 1142

	if (map_name == NULL || map_name[0] == '\0') {
1143
		pr_warn("map #%d: empty name.\n", var_idx);
1144 1145 1146
		return -EINVAL;
	}
	if ((__u64)vi->offset + vi->size > data->d_size) {
1147
		pr_warn("map '%s' BTF data is corrupted.\n", map_name);
1148 1149
		return -EINVAL;
	}
1150
	if (!btf_is_var(var)) {
1151 1152
		pr_warn("map '%s': unexpected var kind %u.\n",
			map_name, btf_kind(var));
1153 1154 1155 1156
		return -EINVAL;
	}
	if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED &&
	    var_extra->linkage != BTF_VAR_STATIC) {
1157 1158
		pr_warn("map '%s': unsupported var linkage %u.\n",
			map_name, var_extra->linkage);
1159 1160 1161
		return -EOPNOTSUPP;
	}

1162
	def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
1163
	if (!btf_is_struct(def)) {
1164 1165
		pr_warn("map '%s': unexpected def kind %u.\n",
			map_name, btf_kind(var));
1166 1167 1168
		return -EINVAL;
	}
	if (def->size > vi->size) {
1169
		pr_warn("map '%s': invalid def size.\n", map_name);
1170 1171 1172 1173 1174 1175 1176 1177
		return -EINVAL;
	}

	map = bpf_object__add_map(obj);
	if (IS_ERR(map))
		return PTR_ERR(map);
	map->name = strdup(map_name);
	if (!map->name) {
1178
		pr_warn("map '%s': failed to alloc map name.\n", map_name);
1179 1180 1181 1182 1183 1184 1185 1186 1187
		return -ENOMEM;
	}
	map->libbpf_type = LIBBPF_MAP_UNSPEC;
	map->def.type = BPF_MAP_TYPE_UNSPEC;
	map->sec_idx = sec_idx;
	map->sec_offset = vi->offset;
	pr_debug("map '%s': at sec_idx %d, offset %zu.\n",
		 map_name, map->sec_idx, map->sec_offset);

1188 1189
	vlen = btf_vlen(def);
	m = btf_members(def);
1190 1191 1192 1193
	for (i = 0; i < vlen; i++, m++) {
		const char *name = btf__name_by_offset(obj->btf, m->name_off);

		if (!name) {
1194
			pr_warn("map '%s': invalid field #%d.\n", map_name, i);
1195 1196 1197 1198
			return -EINVAL;
		}
		if (strcmp(name, "type") == 0) {
			if (!get_map_field_int(map_name, obj->btf, def, m,
1199
					       &map->def.type))
1200 1201 1202 1203 1204
				return -EINVAL;
			pr_debug("map '%s': found type = %u.\n",
				 map_name, map->def.type);
		} else if (strcmp(name, "max_entries") == 0) {
			if (!get_map_field_int(map_name, obj->btf, def, m,
1205
					       &map->def.max_entries))
1206 1207 1208 1209 1210
				return -EINVAL;
			pr_debug("map '%s': found max_entries = %u.\n",
				 map_name, map->def.max_entries);
		} else if (strcmp(name, "map_flags") == 0) {
			if (!get_map_field_int(map_name, obj->btf, def, m,
1211
					       &map->def.map_flags))
1212 1213 1214 1215 1216 1217 1218
				return -EINVAL;
			pr_debug("map '%s': found map_flags = %u.\n",
				 map_name, map->def.map_flags);
		} else if (strcmp(name, "key_size") == 0) {
			__u32 sz;

			if (!get_map_field_int(map_name, obj->btf, def, m,
1219
					       &sz))
1220 1221 1222 1223
				return -EINVAL;
			pr_debug("map '%s': found key_size = %u.\n",
				 map_name, sz);
			if (map->def.key_size && map->def.key_size != sz) {
1224 1225
				pr_warn("map '%s': conflicting key size %u != %u.\n",
					map_name, map->def.key_size, sz);
1226 1227 1228 1229 1230 1231 1232 1233
				return -EINVAL;
			}
			map->def.key_size = sz;
		} else if (strcmp(name, "key") == 0) {
			__s64 sz;

			t = btf__type_by_id(obj->btf, m->type);
			if (!t) {
1234 1235
				pr_warn("map '%s': key type [%d] not found.\n",
					map_name, m->type);
1236 1237
				return -EINVAL;
			}
1238
			if (!btf_is_ptr(t)) {
1239 1240
				pr_warn("map '%s': key spec is not PTR: %u.\n",
					map_name, btf_kind(t));
1241 1242 1243 1244
				return -EINVAL;
			}
			sz = btf__resolve_size(obj->btf, t->type);
			if (sz < 0) {
1245 1246
				pr_warn("map '%s': can't determine key size for type [%u]: %zd.\n",
					map_name, t->type, (ssize_t)sz);
1247 1248
				return sz;
			}
1249 1250
			pr_debug("map '%s': found key [%u], sz = %zd.\n",
				 map_name, t->type, (ssize_t)sz);
1251
			if (map->def.key_size && map->def.key_size != sz) {
1252 1253
				pr_warn("map '%s': conflicting key size %u != %zd.\n",
					map_name, map->def.key_size, (ssize_t)sz);
1254 1255 1256 1257 1258 1259 1260 1261
				return -EINVAL;
			}
			map->def.key_size = sz;
			map->btf_key_type_id = t->type;
		} else if (strcmp(name, "value_size") == 0) {
			__u32 sz;

			if (!get_map_field_int(map_name, obj->btf, def, m,
1262
					       &sz))
1263 1264 1265 1266
				return -EINVAL;
			pr_debug("map '%s': found value_size = %u.\n",
				 map_name, sz);
			if (map->def.value_size && map->def.value_size != sz) {
1267 1268
				pr_warn("map '%s': conflicting value size %u != %u.\n",
					map_name, map->def.value_size, sz);
1269 1270 1271 1272 1273 1274 1275 1276
				return -EINVAL;
			}
			map->def.value_size = sz;
		} else if (strcmp(name, "value") == 0) {
			__s64 sz;

			t = btf__type_by_id(obj->btf, m->type);
			if (!t) {
1277 1278
				pr_warn("map '%s': value type [%d] not found.\n",
					map_name, m->type);
1279 1280
				return -EINVAL;
			}
1281
			if (!btf_is_ptr(t)) {
1282 1283
				pr_warn("map '%s': value spec is not PTR: %u.\n",
					map_name, btf_kind(t));
1284 1285 1286 1287
				return -EINVAL;
			}
			sz = btf__resolve_size(obj->btf, t->type);
			if (sz < 0) {
1288 1289
				pr_warn("map '%s': can't determine value size for type [%u]: %zd.\n",
					map_name, t->type, (ssize_t)sz);
1290 1291
				return sz;
			}
1292 1293
			pr_debug("map '%s': found value [%u], sz = %zd.\n",
				 map_name, t->type, (ssize_t)sz);
1294
			if (map->def.value_size && map->def.value_size != sz) {
1295 1296
				pr_warn("map '%s': conflicting value size %u != %zd.\n",
					map_name, map->def.value_size, (ssize_t)sz);
1297 1298 1299 1300
				return -EINVAL;
			}
			map->def.value_size = sz;
			map->btf_value_type_id = t->type;
1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324
		} else if (strcmp(name, "pinning") == 0) {
			__u32 val;
			int err;

			if (!get_map_field_int(map_name, obj->btf, def, m,
					       &val))
				return -EINVAL;
			pr_debug("map '%s': found pinning = %u.\n",
				 map_name, val);

			if (val != LIBBPF_PIN_NONE &&
			    val != LIBBPF_PIN_BY_NAME) {
				pr_warn("map '%s': invalid pinning value %u.\n",
					map_name, val);
				return -EINVAL;
			}
			if (val == LIBBPF_PIN_BY_NAME) {
				err = build_map_pin_path(map, pin_root_path);
				if (err) {
					pr_warn("map '%s': couldn't build pin path.\n",
						map_name);
					return err;
				}
			}
1325 1326
		} else {
			if (strict) {
1327 1328
				pr_warn("map '%s': unknown field '%s'.\n",
					map_name, name);
1329 1330 1331 1332 1333 1334 1335 1336
				return -ENOTSUP;
			}
			pr_debug("map '%s': ignoring unknown field '%s'.\n",
				 map_name, name);
		}
	}

	if (map->def.type == BPF_MAP_TYPE_UNSPEC) {
1337
		pr_warn("map '%s': map type isn't specified.\n", map_name);
1338 1339 1340 1341 1342 1343
		return -EINVAL;
	}

	return 0;
}

1344 1345
static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
					  const char *pin_root_path)
1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360
{
	const struct btf_type *sec = NULL;
	int nr_types, i, vlen, err;
	const struct btf_type *t;
	const char *name;
	Elf_Data *data;
	Elf_Scn *scn;

	if (obj->efile.btf_maps_shndx < 0)
		return 0;

	scn = elf_getscn(obj->efile.elf, obj->efile.btf_maps_shndx);
	if (scn)
		data = elf_getdata(scn, NULL);
	if (!scn || !data) {
1361 1362
		pr_warn("failed to get Elf_Data from map section %d (%s)\n",
			obj->efile.maps_shndx, MAPS_ELF_SEC);
1363 1364 1365 1366 1367 1368
		return -EINVAL;
	}

	nr_types = btf__get_nr_types(obj->btf);
	for (i = 1; i <= nr_types; i++) {
		t = btf__type_by_id(obj->btf, i);
1369
		if (!btf_is_datasec(t))
1370 1371 1372 1373 1374 1375 1376 1377 1378
			continue;
		name = btf__name_by_offset(obj->btf, t->name_off);
		if (strcmp(name, MAPS_ELF_SEC) == 0) {
			sec = t;
			break;
		}
	}

	if (!sec) {
1379
		pr_warn("DATASEC '%s' not found.\n", MAPS_ELF_SEC);
1380 1381 1382
		return -ENOENT;
	}

1383
	vlen = btf_vlen(sec);
1384 1385 1386
	for (i = 0; i < vlen; i++) {
		err = bpf_object__init_user_btf_map(obj, sec, i,
						    obj->efile.btf_maps_shndx,
1387 1388
						    data, strict,
						    pin_root_path);
1389 1390 1391 1392 1393 1394 1395
		if (err)
			return err;
	}

	return 0;
}

1396 1397
static int bpf_object__init_maps(struct bpf_object *obj, bool relaxed_maps,
				 const char *pin_root_path)
1398
{
1399
	bool strict = !relaxed_maps;
1400
	int err;
1401

1402 1403 1404 1405
	err = bpf_object__init_user_maps(obj, strict);
	if (err)
		return err;

1406
	err = bpf_object__init_user_btf_maps(obj, strict, pin_root_path);
1407 1408 1409
	if (err)
		return err;

1410 1411 1412 1413 1414
	err = bpf_object__init_global_data_maps(obj);
	if (err)
		return err;

	if (obj->nr_maps) {
1415 1416
		qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]),
		      compare_bpf_map);
1417 1418
	}
	return 0;
1419 1420
}

1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438
static bool section_have_execinstr(struct bpf_object *obj, int idx)
{
	Elf_Scn *scn;
	GElf_Shdr sh;

	scn = elf_getscn(obj->efile.elf, idx);
	if (!scn)
		return false;

	if (gelf_getshdr(scn, &sh) != &sh)
		return false;

	if (sh.sh_flags & SHF_EXECINSTR)
		return true;

	return false;
}

1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452
static void bpf_object__sanitize_btf(struct bpf_object *obj)
{
	bool has_datasec = obj->caps.btf_datasec;
	bool has_func = obj->caps.btf_func;
	struct btf *btf = obj->btf;
	struct btf_type *t;
	int i, j, vlen;

	if (!obj->btf || (has_func && has_datasec))
		return;

	for (i = 1; i <= btf__get_nr_types(btf); i++) {
		t = (struct btf_type *)btf__type_by_id(btf, i);

1453
		if (!has_datasec && btf_is_var(t)) {
1454 1455
			/* replace VAR with INT */
			t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0);
1456 1457 1458 1459 1460 1461
			/*
			 * using size = 1 is the safest choice, 4 will be too
			 * big and cause kernel BTF validation failure if
			 * original variable took less than 4 bytes
			 */
			t->size = 1;
1462
			*(int *)(t + 1) = BTF_INT_ENC(0, 0, 8);
1463
		} else if (!has_datasec && btf_is_datasec(t)) {
1464
			/* replace DATASEC with STRUCT */
1465 1466
			const struct btf_var_secinfo *v = btf_var_secinfos(t);
			struct btf_member *m = btf_members(t);
1467 1468 1469 1470 1471 1472 1473 1474 1475 1476
			struct btf_type *vt;
			char *name;

			name = (char *)btf__name_by_offset(btf, t->name_off);
			while (*name) {
				if (*name == '.')
					*name = '_';
				name++;
			}

1477
			vlen = btf_vlen(t);
1478 1479 1480 1481 1482 1483 1484 1485 1486
			t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen);
			for (j = 0; j < vlen; j++, v++, m++) {
				/* order of field assignments is important */
				m->offset = v->offset * 8;
				m->type = v->type;
				/* preserve variable name as member name */
				vt = (void *)btf__type_by_id(btf, v->type);
				m->name_off = vt->name_off;
			}
1487
		} else if (!has_func && btf_is_func_proto(t)) {
1488
			/* replace FUNC_PROTO with ENUM */
1489
			vlen = btf_vlen(t);
1490 1491
			t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen);
			t->size = sizeof(__u32); /* kernel enforced */
1492
		} else if (!has_func && btf_is_func(t)) {
1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509
			/* replace FUNC with TYPEDEF */
			t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0);
		}
	}
}

static void bpf_object__sanitize_btf_ext(struct bpf_object *obj)
{
	if (!obj->btf_ext)
		return;

	if (!obj->caps.btf_func) {
		btf_ext__free(obj->btf_ext);
		obj->btf_ext = NULL;
	}
}

1510 1511 1512 1513 1514
static bool bpf_object__is_btf_mandatory(const struct bpf_object *obj)
{
	return obj->efile.btf_maps_shndx >= 0;
}

1515
static int bpf_object__init_btf(struct bpf_object *obj,
1516 1517 1518
				Elf_Data *btf_data,
				Elf_Data *btf_ext_data)
{
1519
	bool btf_required = bpf_object__is_btf_mandatory(obj);
1520 1521 1522 1523 1524
	int err = 0;

	if (btf_data) {
		obj->btf = btf__new(btf_data->d_buf, btf_data->d_size);
		if (IS_ERR(obj->btf)) {
1525 1526
			pr_warn("Error loading ELF section %s: %d.\n",
				BTF_ELF_SEC, err);
1527 1528 1529 1530
			goto out;
		}
		err = btf__finalize_data(obj, obj->btf);
		if (err) {
1531
			pr_warn("Error finalizing %s: %d.\n", BTF_ELF_SEC, err);
1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543
			goto out;
		}
	}
	if (btf_ext_data) {
		if (!obj->btf) {
			pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
				 BTF_EXT_ELF_SEC, BTF_ELF_SEC);
			goto out;
		}
		obj->btf_ext = btf_ext__new(btf_ext_data->d_buf,
					    btf_ext_data->d_size);
		if (IS_ERR(obj->btf_ext)) {
1544 1545
			pr_warn("Error loading ELF section %s: %ld. Ignored and continue.\n",
				BTF_EXT_ELF_SEC, PTR_ERR(obj->btf_ext));
1546 1547 1548 1549 1550 1551
			obj->btf_ext = NULL;
			goto out;
		}
	}
out:
	if (err || IS_ERR(obj->btf)) {
1552 1553 1554 1555
		if (btf_required)
			err = err ? : PTR_ERR(obj->btf);
		else
			err = 0;
1556 1557 1558 1559
		if (!IS_ERR_OR_NULL(obj->btf))
			btf__free(obj->btf);
		obj->btf = NULL;
	}
1560
	if (btf_required && !obj->btf) {
1561
		pr_warn("BTF is required, but is missing or corrupted.\n");
1562 1563
		return err == 0 ? -ENOENT : err;
	}
1564 1565 1566
	return 0;
}

1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578
static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
{
	int err = 0;

	if (!obj->btf)
		return 0;

	bpf_object__sanitize_btf(obj);
	bpf_object__sanitize_btf_ext(obj);

	err = btf__load(obj->btf);
	if (err) {
1579 1580
		pr_warn("Error loading %s into kernel: %d.\n",
			BTF_ELF_SEC, err);
1581 1582
		btf__free(obj->btf);
		obj->btf = NULL;
1583 1584 1585 1586 1587 1588
		/* btf_ext can't exist without btf, so free it as well */
		if (obj->btf_ext) {
			btf_ext__free(obj->btf_ext);
			obj->btf_ext = NULL;
		}

1589 1590
		if (bpf_object__is_btf_mandatory(obj))
			return err;
1591 1592 1593 1594
	}
	return 0;
}

1595 1596
static int bpf_object__elf_collect(struct bpf_object *obj, bool relaxed_maps,
				   const char *pin_root_path)
1597 1598 1599
{
	Elf *elf = obj->efile.elf;
	GElf_Ehdr *ep = &obj->efile.ehdr;
1600
	Elf_Data *btf_ext_data = NULL;
1601
	Elf_Data *btf_data = NULL;
1602
	Elf_Scn *scn = NULL;
1603
	int idx = 0, err = 0;
1604 1605 1606

	/* Elf is corrupted/truncated, avoid calling elf_strptr. */
	if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
1607
		pr_warn("failed to get e_shstrndx from %s\n", obj->path);
1608
		return -LIBBPF_ERRNO__FORMAT;
1609 1610 1611 1612 1613 1614 1615 1616 1617
	}

	while ((scn = elf_nextscn(elf, scn)) != NULL) {
		char *name;
		GElf_Shdr sh;
		Elf_Data *data;

		idx++;
		if (gelf_getshdr(scn, &sh) != &sh) {
1618 1619
			pr_warn("failed to get section(%d) header from %s\n",
				idx, obj->path);
1620
			return -LIBBPF_ERRNO__FORMAT;
1621 1622 1623 1624
		}

		name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
		if (!name) {
1625 1626
			pr_warn("failed to get section(%d) name from %s\n",
				idx, obj->path);
1627
			return -LIBBPF_ERRNO__FORMAT;
1628 1629 1630 1631
		}

		data = elf_getdata(scn, 0);
		if (!data) {
1632 1633
			pr_warn("failed to get section(%d) data from %s(%s)\n",
				idx, name, obj->path);
1634
			return -LIBBPF_ERRNO__FORMAT;
1635
		}
1636 1637
		pr_debug("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
			 idx, name, (unsigned long)data->d_size,
1638 1639
			 (int)sh.sh_link, (unsigned long)sh.sh_flags,
			 (int)sh.sh_type);
1640

1641
		if (strcmp(name, "license") == 0) {
1642 1643 1644
			err = bpf_object__init_license(obj,
						       data->d_buf,
						       data->d_size);
1645 1646
			if (err)
				return err;
1647
		} else if (strcmp(name, "version") == 0) {
1648 1649 1650 1651 1652
			err = bpf_object__init_kversion(obj,
							data->d_buf,
							data->d_size);
			if (err)
				return err;
1653
		} else if (strcmp(name, "maps") == 0) {
1654
			obj->efile.maps_shndx = idx;
1655 1656
		} else if (strcmp(name, MAPS_ELF_SEC) == 0) {
			obj->efile.btf_maps_shndx = idx;
1657 1658
		} else if (strcmp(name, BTF_ELF_SEC) == 0) {
			btf_data = data;
1659
		} else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
1660
			btf_ext_data = data;
1661
		} else if (sh.sh_type == SHT_SYMTAB) {
1662
			if (obj->efile.symbols) {
1663 1664
				pr_warn("bpf: multiple SYMTAB in %s\n",
					obj->path);
1665
				return -LIBBPF_ERRNO__FORMAT;
1666
			}
1667 1668
			obj->efile.symbols = data;
			obj->efile.strtabidx = sh.sh_link;
1669 1670 1671 1672 1673
		} else if (sh.sh_type == SHT_PROGBITS && data->d_size > 0) {
			if (sh.sh_flags & SHF_EXECINSTR) {
				if (strcmp(name, ".text") == 0)
					obj->efile.text_shndx = idx;
				err = bpf_object__add_program(obj, data->d_buf,
1674 1675
							      data->d_size,
							      name, idx);
1676 1677
				if (err) {
					char errmsg[STRERR_BUFSIZE];
1678
					char *cp;
1679

1680 1681
					cp = libbpf_strerror_r(-err, errmsg,
							       sizeof(errmsg));
1682 1683
					pr_warn("failed to alloc program %s (%s): %s",
						name, obj->path, cp);
1684
					return err;
1685
				}
1686 1687 1688 1689 1690 1691 1692 1693
			} else if (strcmp(name, ".data") == 0) {
				obj->efile.data = data;
				obj->efile.data_shndx = idx;
			} else if (strcmp(name, ".rodata") == 0) {
				obj->efile.rodata = data;
				obj->efile.rodata_shndx = idx;
			} else {
				pr_debug("skip section(%d) %s\n", idx, name);
1694
			}
1695
		} else if (sh.sh_type == SHT_REL) {
1696 1697
			int nr_sects = obj->efile.nr_reloc_sects;
			void *sects = obj->efile.reloc_sects;
1698 1699 1700 1701 1702 1703 1704 1705
			int sec = sh.sh_info; /* points to other section */

			/* Only do relo for section with exec instructions */
			if (!section_have_execinstr(obj, sec)) {
				pr_debug("skip relo %s(%d) for section(%d)\n",
					 name, idx, sec);
				continue;
			}
1706

1707 1708 1709 1710
			sects = reallocarray(sects, nr_sects + 1,
					     sizeof(*obj->efile.reloc_sects));
			if (!sects) {
				pr_warn("reloc_sects realloc failed\n");
1711 1712
				return -ENOMEM;
			}
1713

1714 1715
			obj->efile.reloc_sects = sects;
			obj->efile.nr_reloc_sects++;
1716

1717 1718
			obj->efile.reloc_sects[nr_sects].shdr = sh;
			obj->efile.reloc_sects[nr_sects].data = data;
1719 1720 1721
		} else if (sh.sh_type == SHT_NOBITS && strcmp(name, ".bss") == 0) {
			obj->efile.bss = data;
			obj->efile.bss_shndx = idx;
1722 1723
		} else {
			pr_debug("skip section(%d) %s\n", idx, name);
1724
		}
1725
	}
1726

1727
	if (!obj->efile.strtabidx || obj->efile.strtabidx > idx) {
1728
		pr_warn("Corrupted ELF file: index of strtab invalid\n");
1729
		return -LIBBPF_ERRNO__FORMAT;
1730
	}
1731
	err = bpf_object__init_btf(obj, btf_data, btf_ext_data);
1732
	if (!err)
1733
		err = bpf_object__init_maps(obj, relaxed_maps, pin_root_path);
1734 1735
	if (!err)
		err = bpf_object__sanitize_and_load_btf(obj);
1736 1737
	if (!err)
		err = bpf_object__init_prog_names(obj);
1738 1739 1740
	return err;
}

1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754
static struct bpf_program *
bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
{
	struct bpf_program *prog;
	size_t i;

	for (i = 0; i < obj->nr_programs; i++) {
		prog = &obj->programs[i];
		if (prog->idx == idx)
			return prog;
	}
	return NULL;
}

1755
struct bpf_program *
A
Andrii Nakryiko 已提交
1756 1757
bpf_object__find_program_by_title(const struct bpf_object *obj,
				  const char *title)
1758 1759 1760 1761 1762 1763 1764 1765 1766 1767
{
	struct bpf_program *pos;

	bpf_object__for_each_program(pos, obj) {
		if (pos->section_name && !strcmp(pos->section_name, title))
			return pos;
	}
	return NULL;
}

1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778
static bool bpf_object__shndx_is_data(const struct bpf_object *obj,
				      int shndx)
{
	return shndx == obj->efile.data_shndx ||
	       shndx == obj->efile.bss_shndx ||
	       shndx == obj->efile.rodata_shndx;
}

static bool bpf_object__shndx_is_maps(const struct bpf_object *obj,
				      int shndx)
{
1779 1780
	return shndx == obj->efile.maps_shndx ||
	       shndx == obj->efile.btf_maps_shndx;
1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795
}

static enum libbpf_map_type
bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx)
{
	if (shndx == obj->efile.data_shndx)
		return LIBBPF_MAP_DATA;
	else if (shndx == obj->efile.bss_shndx)
		return LIBBPF_MAP_BSS;
	else if (shndx == obj->efile.rodata_shndx)
		return LIBBPF_MAP_RODATA;
	else
		return LIBBPF_MAP_UNSPEC;
}

1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819
static int bpf_program__record_reloc(struct bpf_program *prog,
				     struct reloc_desc *reloc_desc,
				     __u32 insn_idx, const char *name,
				     const GElf_Sym *sym, const GElf_Rel *rel)
{
	struct bpf_insn *insn = &prog->insns[insn_idx];
	size_t map_idx, nr_maps = prog->obj->nr_maps;
	struct bpf_object *obj = prog->obj;
	__u32 shdr_idx = sym->st_shndx;
	enum libbpf_map_type type;
	struct bpf_map *map;

	/* sub-program call relocation */
	if (insn->code == (BPF_JMP | BPF_CALL)) {
		if (insn->src_reg != BPF_PSEUDO_CALL) {
			pr_warn("incorrect bpf_call opcode\n");
			return -LIBBPF_ERRNO__RELOC;
		}
		/* text_shndx can be 0, if no default "main" program exists */
		if (!shdr_idx || shdr_idx != obj->efile.text_shndx) {
			pr_warn("bad call relo against section %u\n", shdr_idx);
			return -LIBBPF_ERRNO__RELOC;
		}
		if (sym->st_value % 8) {
1820 1821
			pr_warn("bad call relo offset: %zu\n",
				(size_t)sym->st_value);
1822 1823 1824 1825
			return -LIBBPF_ERRNO__RELOC;
		}
		reloc_desc->type = RELO_CALL;
		reloc_desc->insn_idx = insn_idx;
1826
		reloc_desc->sym_off = sym->st_value;
1827 1828 1829 1830 1831
		obj->has_pseudo_calls = true;
		return 0;
	}

	if (insn->code != (BPF_LD | BPF_IMM | BPF_DW)) {
1832
		pr_warn("invalid relo for insns[%d].code 0x%x\n",
1833 1834 1835 1836
			insn_idx, insn->code);
		return -LIBBPF_ERRNO__RELOC;
	}
	if (!shdr_idx || shdr_idx >= SHN_LORESERVE) {
1837 1838
		pr_warn("invalid relo for \'%s\' in special section 0x%x; forgot to initialize global var?..\n",
			name, shdr_idx);
1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862
		return -LIBBPF_ERRNO__RELOC;
	}

	type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx);

	/* generic map reference relocation */
	if (type == LIBBPF_MAP_UNSPEC) {
		if (!bpf_object__shndx_is_maps(obj, shdr_idx)) {
			pr_warn("bad map relo against section %u\n",
				shdr_idx);
			return -LIBBPF_ERRNO__RELOC;
		}
		for (map_idx = 0; map_idx < nr_maps; map_idx++) {
			map = &obj->maps[map_idx];
			if (map->libbpf_type != type ||
			    map->sec_idx != sym->st_shndx ||
			    map->sec_offset != sym->st_value)
				continue;
			pr_debug("found map %zd (%s, sec %d, off %zu) for insn %u\n",
				 map_idx, map->name, map->sec_idx,
				 map->sec_offset, insn_idx);
			break;
		}
		if (map_idx >= nr_maps) {
1863 1864
			pr_warn("map relo failed to find map for sec %u, off %zu\n",
				shdr_idx, (size_t)sym->st_value);
1865 1866 1867 1868 1869
			return -LIBBPF_ERRNO__RELOC;
		}
		reloc_desc->type = RELO_LD64;
		reloc_desc->insn_idx = insn_idx;
		reloc_desc->map_idx = map_idx;
1870
		reloc_desc->sym_off = 0; /* sym->st_value determines map_idx */
1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901
		return 0;
	}

	/* global data map relocation */
	if (!bpf_object__shndx_is_data(obj, shdr_idx)) {
		pr_warn("bad data relo against section %u\n", shdr_idx);
		return -LIBBPF_ERRNO__RELOC;
	}
	if (!obj->caps.global_data) {
		pr_warn("relocation: kernel does not support global \'%s\' variable access in insns[%d]\n",
			name, insn_idx);
		return -LIBBPF_ERRNO__RELOC;
	}
	for (map_idx = 0; map_idx < nr_maps; map_idx++) {
		map = &obj->maps[map_idx];
		if (map->libbpf_type != type)
			continue;
		pr_debug("found data map %zd (%s, sec %d, off %zu) for insn %u\n",
			 map_idx, map->name, map->sec_idx, map->sec_offset,
			 insn_idx);
		break;
	}
	if (map_idx >= nr_maps) {
		pr_warn("data relo failed to find map for sec %u\n",
			shdr_idx);
		return -LIBBPF_ERRNO__RELOC;
	}

	reloc_desc->type = RELO_DATA;
	reloc_desc->insn_idx = insn_idx;
	reloc_desc->map_idx = map_idx;
1902
	reloc_desc->sym_off = sym->st_value;
1903 1904 1905
	return 0;
}

1906
static int
1907 1908
bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
			   Elf_Data *data, struct bpf_object *obj)
1909
{
1910
	Elf_Data *symbols = obj->efile.symbols;
1911
	int err, i, nrels;
1912

1913
	pr_debug("collecting relocating info for: '%s'\n", prog->section_name);
1914 1915 1916 1917
	nrels = shdr->sh_size / shdr->sh_entsize;

	prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels);
	if (!prog->reloc_desc) {
1918
		pr_warn("failed to alloc memory in relocation\n");
1919 1920 1921 1922 1923
		return -ENOMEM;
	}
	prog->nr_reloc = nrels;

	for (i = 0; i < nrels; i++) {
1924
		const char *name;
1925
		__u32 insn_idx;
1926 1927
		GElf_Sym sym;
		GElf_Rel rel;
1928 1929

		if (!gelf_getrel(data, i, &rel)) {
1930
			pr_warn("relocation: failed to get %d reloc\n", i);
1931
			return -LIBBPF_ERRNO__FORMAT;
1932
		}
1933
		if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
1934 1935
			pr_warn("relocation: symbol %"PRIx64" not found\n",
				GELF_R_SYM(rel.r_info));
1936
			return -LIBBPF_ERRNO__FORMAT;
1937
		}
1938 1939
		if (rel.r_offset % sizeof(struct bpf_insn))
			return -LIBBPF_ERRNO__FORMAT;
1940

1941
		insn_idx = rel.r_offset / sizeof(struct bpf_insn);
1942 1943 1944
		name = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
				  sym.st_name) ? : "<?>";

1945 1946 1947
		pr_debug("relo for shdr %u, symb %zu, value %zu, type %d, bind %d, name %d (\'%s\'), insn %u\n",
			 (__u32)sym.st_shndx, (size_t)GELF_R_SYM(rel.r_info),
			 (size_t)sym.st_value, GELF_ST_TYPE(sym.st_info),
1948 1949
			 GELF_ST_BIND(sym.st_info), sym.st_name, name,
			 insn_idx);
1950

1951 1952 1953 1954
		err = bpf_program__record_reloc(prog, &prog->reloc_desc[i],
						insn_idx, name, &sym, &rel);
		if (err)
			return err;
1955 1956 1957 1958
	}
	return 0;
}

1959
static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map)
1960 1961
{
	struct bpf_map_def *def = &map->def;
1962
	__u32 key_type_id = 0, value_type_id = 0;
1963
	int ret;
1964

1965 1966 1967 1968
	/* if it's BTF-defined map, we don't need to search for type IDs */
	if (map->sec_idx == obj->efile.btf_maps_shndx)
		return 0;

1969
	if (!bpf_map__is_internal(map)) {
1970
		ret = btf__get_map_kv_tids(obj->btf, map->name, def->key_size,
1971 1972 1973 1974 1975 1976 1977
					   def->value_size, &key_type_id,
					   &value_type_id);
	} else {
		/*
		 * LLVM annotates global data differently in BTF, that is,
		 * only as '.data', '.bss' or '.rodata'.
		 */
1978
		ret = btf__find_by_name(obj->btf,
1979 1980 1981
				libbpf_type_to_btf_name[map->libbpf_type]);
	}
	if (ret < 0)
1982
		return ret;
1983

1984
	map->btf_key_type_id = key_type_id;
1985 1986
	map->btf_value_type_id = bpf_map__is_internal(map) ?
				 ret : value_type_id;
1987 1988 1989
	return 0;
}

J
Jakub Kicinski 已提交
1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005
int bpf_map__reuse_fd(struct bpf_map *map, int fd)
{
	struct bpf_map_info info = {};
	__u32 len = sizeof(info);
	int new_fd, err;
	char *new_name;

	err = bpf_obj_get_info_by_fd(fd, &info, &len);
	if (err)
		return err;

	new_name = strdup(info.name);
	if (!new_name)
		return -errno;

	new_fd = open("/", O_RDONLY | O_CLOEXEC);
2006 2007
	if (new_fd < 0) {
		err = -errno;
J
Jakub Kicinski 已提交
2008
		goto err_free_new_name;
2009
	}
J
Jakub Kicinski 已提交
2010 2011

	new_fd = dup3(fd, new_fd, O_CLOEXEC);
2012 2013
	if (new_fd < 0) {
		err = -errno;
J
Jakub Kicinski 已提交
2014
		goto err_close_new_fd;
2015
	}
J
Jakub Kicinski 已提交
2016 2017

	err = zclose(map->fd);
2018 2019
	if (err) {
		err = -errno;
J
Jakub Kicinski 已提交
2020
		goto err_close_new_fd;
2021
	}
J
Jakub Kicinski 已提交
2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032
	free(map->name);

	map->fd = new_fd;
	map->name = new_name;
	map->def.type = info.type;
	map->def.key_size = info.key_size;
	map->def.value_size = info.value_size;
	map->def.max_entries = info.max_entries;
	map->def.map_flags = info.map_flags;
	map->btf_key_type_id = info.btf_key_type_id;
	map->btf_value_type_id = info.btf_value_type_id;
2033
	map->reused = true;
J
Jakub Kicinski 已提交
2034 2035 2036 2037 2038 2039 2040

	return 0;

err_close_new_fd:
	close(new_fd);
err_free_new_name:
	free(new_name);
2041
	return err;
J
Jakub Kicinski 已提交
2042 2043
}

2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057
int bpf_map__resize(struct bpf_map *map, __u32 max_entries)
{
	if (!map || !max_entries)
		return -EINVAL;

	/* If map already created, its attributes can't be changed. */
	if (map->fd >= 0)
		return -EBUSY;

	map->def.max_entries = max_entries;

	return 0;
}

2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079
static int
bpf_object__probe_name(struct bpf_object *obj)
{
	struct bpf_load_program_attr attr;
	char *cp, errmsg[STRERR_BUFSIZE];
	struct bpf_insn insns[] = {
		BPF_MOV64_IMM(BPF_REG_0, 0),
		BPF_EXIT_INSN(),
	};
	int ret;

	/* make sure basic loading works */

	memset(&attr, 0, sizeof(attr));
	attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
	attr.insns = insns;
	attr.insns_cnt = ARRAY_SIZE(insns);
	attr.license = "GPL";

	ret = bpf_load_program_xattr(&attr, NULL, 0);
	if (ret < 0) {
		cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
2080 2081
		pr_warn("Error in %s():%s(%d). Couldn't load basic 'r0 = 0' BPF program.\n",
			__func__, cp, errno);
2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097
		return -errno;
	}
	close(ret);

	/* now try the same program, but with the name */

	attr.name = "test";
	ret = bpf_load_program_xattr(&attr, NULL, 0);
	if (ret >= 0) {
		obj->caps.name = 1;
		close(ret);
	}

	return 0;
}

2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120
static int
bpf_object__probe_global_data(struct bpf_object *obj)
{
	struct bpf_load_program_attr prg_attr;
	struct bpf_create_map_attr map_attr;
	char *cp, errmsg[STRERR_BUFSIZE];
	struct bpf_insn insns[] = {
		BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16),
		BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
		BPF_MOV64_IMM(BPF_REG_0, 0),
		BPF_EXIT_INSN(),
	};
	int ret, map;

	memset(&map_attr, 0, sizeof(map_attr));
	map_attr.map_type = BPF_MAP_TYPE_ARRAY;
	map_attr.key_size = sizeof(int);
	map_attr.value_size = 32;
	map_attr.max_entries = 1;

	map = bpf_create_map_xattr(&map_attr);
	if (map < 0) {
		cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
2121 2122
		pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
			__func__, cp, errno);
2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143
		return -errno;
	}

	insns[0].imm = map;

	memset(&prg_attr, 0, sizeof(prg_attr));
	prg_attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
	prg_attr.insns = insns;
	prg_attr.insns_cnt = ARRAY_SIZE(insns);
	prg_attr.license = "GPL";

	ret = bpf_load_program_xattr(&prg_attr, NULL, 0);
	if (ret >= 0) {
		obj->caps.global_data = 1;
		close(ret);
	}

	close(map);
	return 0;
}

2144 2145
static int bpf_object__probe_btf_func(struct bpf_object *obj)
{
2146
	static const char strs[] = "\0int\0x\0a";
2147 2148 2149 2150 2151 2152 2153 2154 2155 2156
	/* void x(int a) {} */
	__u32 types[] = {
		/* int */
		BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
		/* FUNC_PROTO */                                /* [2] */
		BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
		BTF_PARAM_ENC(7, 1),
		/* FUNC x */                                    /* [3] */
		BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2),
	};
2157
	int btf_fd;
2158

2159 2160 2161
	btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types),
				      strs, sizeof(strs));
	if (btf_fd >= 0) {
2162
		obj->caps.btf_func = 1;
2163 2164 2165 2166
		close(btf_fd);
		return 1;
	}

2167 2168 2169 2170 2171
	return 0;
}

static int bpf_object__probe_btf_datasec(struct bpf_object *obj)
{
2172
	static const char strs[] = "\0x\0.data";
2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183
	/* static int a; */
	__u32 types[] = {
		/* int */
		BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
		/* VAR x */                                     /* [2] */
		BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
		BTF_VAR_STATIC,
		/* DATASEC val */                               /* [3] */
		BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
		BTF_VAR_SECINFO_ENC(2, 0, 4),
	};
2184
	int btf_fd;
2185

2186 2187 2188
	btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types),
				      strs, sizeof(strs));
	if (btf_fd >= 0) {
2189
		obj->caps.btf_datasec = 1;
2190 2191 2192 2193
		close(btf_fd);
		return 1;
	}

2194 2195 2196
	return 0;
}

2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217
static int bpf_object__probe_array_mmap(struct bpf_object *obj)
{
	struct bpf_create_map_attr attr = {
		.map_type = BPF_MAP_TYPE_ARRAY,
		.map_flags = BPF_F_MMAPABLE,
		.key_size = sizeof(int),
		.value_size = sizeof(int),
		.max_entries = 1,
	};
	int fd;

	fd = bpf_create_map_xattr(&attr);
	if (fd >= 0) {
		obj->caps.array_mmap = 1;
		close(fd);
		return 1;
	}

	return 0;
}

2218 2219 2220
static int
bpf_object__probe_caps(struct bpf_object *obj)
{
2221 2222 2223
	int (*probe_fn[])(struct bpf_object *obj) = {
		bpf_object__probe_name,
		bpf_object__probe_global_data,
2224 2225
		bpf_object__probe_btf_func,
		bpf_object__probe_btf_datasec,
2226
		bpf_object__probe_array_mmap,
2227 2228 2229 2230 2231 2232
	};
	int i, ret;

	for (i = 0; i < ARRAY_SIZE(probe_fn); i++) {
		ret = probe_fn[i](obj);
		if (ret < 0)
2233
			pr_debug("Probe #%d failed with %d.\n", i, ret);
2234 2235 2236
	}

	return 0;
2237 2238
}

2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298
static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
{
	struct bpf_map_info map_info = {};
	char msg[STRERR_BUFSIZE];
	__u32 map_info_len;

	map_info_len = sizeof(map_info);

	if (bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len)) {
		pr_warn("failed to get map info for map FD %d: %s\n",
			map_fd, libbpf_strerror_r(errno, msg, sizeof(msg)));
		return false;
	}

	return (map_info.type == map->def.type &&
		map_info.key_size == map->def.key_size &&
		map_info.value_size == map->def.value_size &&
		map_info.max_entries == map->def.max_entries &&
		map_info.map_flags == map->def.map_flags);
}

static int
bpf_object__reuse_map(struct bpf_map *map)
{
	char *cp, errmsg[STRERR_BUFSIZE];
	int err, pin_fd;

	pin_fd = bpf_obj_get(map->pin_path);
	if (pin_fd < 0) {
		err = -errno;
		if (err == -ENOENT) {
			pr_debug("found no pinned map to reuse at '%s'\n",
				 map->pin_path);
			return 0;
		}

		cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
		pr_warn("couldn't retrieve pinned map '%s': %s\n",
			map->pin_path, cp);
		return err;
	}

	if (!map_is_reuse_compat(map, pin_fd)) {
		pr_warn("couldn't reuse pinned map at '%s': parameter mismatch\n",
			map->pin_path);
		close(pin_fd);
		return -EINVAL;
	}

	err = bpf_map__reuse_fd(map, pin_fd);
	if (err) {
		close(pin_fd);
		return err;
	}
	map->pinned = true;
	pr_debug("reused pinned map at '%s'\n", map->pin_path);

	return 0;
}

2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318
static int
bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
{
	char *cp, errmsg[STRERR_BUFSIZE];
	int err, zero = 0;
	__u8 *data;

	/* Nothing to do here since kernel already zero-initializes .bss map. */
	if (map->libbpf_type == LIBBPF_MAP_BSS)
		return 0;

	data = map->libbpf_type == LIBBPF_MAP_DATA ?
	       obj->sections.data : obj->sections.rodata;

	err = bpf_map_update_elem(map->fd, &zero, data, 0);
	/* Freeze .rodata map as read-only from syscall side. */
	if (!err && map->libbpf_type == LIBBPF_MAP_RODATA) {
		err = bpf_map_freeze(map->fd);
		if (err) {
			cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
2319 2320
			pr_warn("Error freezing map(%s) as read-only: %s\n",
				map->name, cp);
2321 2322 2323 2324 2325 2326
			err = 0;
		}
	}
	return err;
}

2327 2328 2329
static int
bpf_object__create_maps(struct bpf_object *obj)
{
2330
	struct bpf_create_map_attr create_attr = {};
2331
	int nr_cpus = 0;
2332
	unsigned int i;
2333
	int err;
2334

2335
	for (i = 0; i < obj->nr_maps; i++) {
2336 2337
		struct bpf_map *map = &obj->maps[i];
		struct bpf_map_def *def = &map->def;
2338
		char *cp, errmsg[STRERR_BUFSIZE];
2339 2340
		int *pfd = &map->fd;

2341 2342 2343 2344 2345 2346 2347 2348 2349
		if (map->pin_path) {
			err = bpf_object__reuse_map(map);
			if (err) {
				pr_warn("error reusing pinned map %s\n",
					map->name);
				return err;
			}
		}

J
Jakub Kicinski 已提交
2350 2351 2352 2353 2354 2355
		if (map->fd >= 0) {
			pr_debug("skip map create (preset) %s: fd=%d\n",
				 map->name, map->fd);
			continue;
		}

2356 2357
		if (obj->caps.name)
			create_attr.name = map->name;
2358
		create_attr.map_ifindex = map->map_ifindex;
2359 2360 2361 2362
		create_attr.map_type = def->type;
		create_attr.map_flags = def->map_flags;
		create_attr.key_size = def->key_size;
		create_attr.value_size = def->value_size;
2363 2364 2365 2366 2367
		if (def->type == BPF_MAP_TYPE_PERF_EVENT_ARRAY &&
		    !def->max_entries) {
			if (!nr_cpus)
				nr_cpus = libbpf_num_possible_cpus();
			if (nr_cpus < 0) {
2368 2369
				pr_warn("failed to determine number of system CPUs: %d\n",
					nr_cpus);
2370 2371 2372 2373 2374 2375 2376 2377 2378
				err = nr_cpus;
				goto err_out;
			}
			pr_debug("map '%s': setting size to %d\n",
				 map->name, nr_cpus);
			create_attr.max_entries = nr_cpus;
		} else {
			create_attr.max_entries = def->max_entries;
		}
2379
		create_attr.btf_fd = 0;
2380 2381
		create_attr.btf_key_type_id = 0;
		create_attr.btf_value_type_id = 0;
2382 2383 2384
		if (bpf_map_type__is_map_in_map(def->type) &&
		    map->inner_map_fd >= 0)
			create_attr.inner_map_fd = map->inner_map_fd;
2385

2386
		if (obj->btf && !bpf_map_find_btf_info(obj, map)) {
2387
			create_attr.btf_fd = btf__fd(obj->btf);
2388 2389
			create_attr.btf_key_type_id = map->btf_key_type_id;
			create_attr.btf_value_type_id = map->btf_value_type_id;
2390 2391 2392
		}

		*pfd = bpf_create_map_xattr(&create_attr);
2393 2394
		if (*pfd < 0 && (create_attr.btf_key_type_id ||
				 create_attr.btf_value_type_id)) {
2395 2396
			err = -errno;
			cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
2397 2398
			pr_warn("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
				map->name, cp, err);
2399
			create_attr.btf_fd = 0;
2400 2401 2402 2403
			create_attr.btf_key_type_id = 0;
			create_attr.btf_value_type_id = 0;
			map->btf_key_type_id = 0;
			map->btf_value_type_id = 0;
2404 2405 2406
			*pfd = bpf_create_map_xattr(&create_attr);
		}

2407 2408 2409
		if (*pfd < 0) {
			size_t j;

2410
			err = -errno;
2411
err_out:
2412
			cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
2413 2414
			pr_warn("failed to create map (name: '%s'): %s(%d)\n",
				map->name, cp, err);
2415
			for (j = 0; j < i; j++)
2416
				zclose(obj->maps[j].fd);
2417 2418
			return err;
		}
2419 2420 2421 2422 2423 2424 2425 2426 2427

		if (bpf_map__is_internal(map)) {
			err = bpf_object__populate_internal_map(obj, map);
			if (err < 0) {
				zclose(*pfd);
				goto err_out;
			}
		}

2428 2429 2430 2431 2432 2433 2434 2435 2436
		if (map->pin_path && !map->pinned) {
			err = bpf_map__pin(map, NULL);
			if (err) {
				pr_warn("failed to auto-pin map name '%s' at '%s'\n",
					map->name, map->pin_path);
				return err;
			}
		}

2437
		pr_debug("created map %s: fd=%d\n", map->name, *pfd);
2438 2439 2440 2441 2442
	}

	return 0;
}

2443 2444 2445 2446 2447
static int
check_btf_ext_reloc_err(struct bpf_program *prog, int err,
			void *btf_prog_info, const char *info_name)
{
	if (err != -ENOENT) {
2448 2449
		pr_warn("Error in loading %s for sec %s.\n",
			info_name, prog->section_name);
2450 2451 2452 2453 2454 2455 2456 2457
		return err;
	}

	/* err == -ENOENT (i.e. prog->section_name not found in btf_ext) */

	if (btf_prog_info) {
		/*
		 * Some info has already been found but has problem
2458
		 * in the last btf_ext reloc. Must have to error out.
2459
		 */
2460 2461
		pr_warn("Error in relocating %s for sec %s.\n",
			info_name, prog->section_name);
2462 2463 2464
		return err;
	}

2465
	/* Have problem loading the very first info. Ignore the rest. */
2466 2467
	pr_warn("Cannot find %s for main program sec %s. Ignore all %s.\n",
		info_name, prog->section_name, info_name);
2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495
	return 0;
}

static int
bpf_program_reloc_btf_ext(struct bpf_program *prog, struct bpf_object *obj,
			  const char *section_name,  __u32 insn_offset)
{
	int err;

	if (!insn_offset || prog->func_info) {
		/*
		 * !insn_offset => main program
		 *
		 * For sub prog, the main program's func_info has to
		 * be loaded first (i.e. prog->func_info != NULL)
		 */
		err = btf_ext__reloc_func_info(obj->btf, obj->btf_ext,
					       section_name, insn_offset,
					       &prog->func_info,
					       &prog->func_info_cnt);
		if (err)
			return check_btf_ext_reloc_err(prog, err,
						       prog->func_info,
						       "bpf_func_info");

		prog->func_info_rec_size = btf_ext__func_info_rec_size(obj->btf_ext);
	}

2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508
	if (!insn_offset || prog->line_info) {
		err = btf_ext__reloc_line_info(obj->btf, obj->btf_ext,
					       section_name, insn_offset,
					       &prog->line_info,
					       &prog->line_info_cnt);
		if (err)
			return check_btf_ext_reloc_err(prog, err,
						       prog->line_info,
						       "bpf_line_info");

		prog->line_info_rec_size = btf_ext__line_info_rec_size(obj->btf_ext);
	}

2509 2510 2511
	return 0;
}

2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530
#define BPF_CORE_SPEC_MAX_LEN 64

/* represents BPF CO-RE field or array element accessor */
struct bpf_core_accessor {
	__u32 type_id;		/* struct/union type or array element type */
	__u32 idx;		/* field index or array index */
	const char *name;	/* field name or NULL for array accessor */
};

struct bpf_core_spec {
	const struct btf *btf;
	/* high-level spec: named fields and array indices only */
	struct bpf_core_accessor spec[BPF_CORE_SPEC_MAX_LEN];
	/* high-level spec length */
	int len;
	/* raw, low-level spec: 1-to-1 with accessor spec string */
	int raw_spec[BPF_CORE_SPEC_MAX_LEN];
	/* raw spec length */
	int raw_len;
2531 2532
	/* field bit offset represented by spec */
	__u32 bit_offset;
2533 2534 2535 2536 2537 2538 2539 2540
};

static bool str_is_empty(const char *s)
{
	return !s || !s[0];
}

/*
2541
 * Turn bpf_field_reloc into a low- and high-level spec representation,
2542
 * validating correctness along the way, as well as calculating resulting
2543 2544
 * field bit offset, specified by accessor string. Low-level spec captures
 * every single level of nestedness, including traversing anonymous
2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615
 * struct/union members. High-level one only captures semantically meaningful
 * "turning points": named fields and array indicies.
 * E.g., for this case:
 *
 *   struct sample {
 *       int __unimportant;
 *       struct {
 *           int __1;
 *           int __2;
 *           int a[7];
 *       };
 *   };
 *
 *   struct sample *s = ...;
 *
 *   int x = &s->a[3]; // access string = '0:1:2:3'
 *
 * Low-level spec has 1:1 mapping with each element of access string (it's
 * just a parsed access string representation): [0, 1, 2, 3].
 *
 * High-level spec will capture only 3 points:
 *   - intial zero-index access by pointer (&s->... is the same as &s[0]...);
 *   - field 'a' access (corresponds to '2' in low-level spec);
 *   - array element #3 access (corresponds to '3' in low-level spec).
 *
 */
static int bpf_core_spec_parse(const struct btf *btf,
			       __u32 type_id,
			       const char *spec_str,
			       struct bpf_core_spec *spec)
{
	int access_idx, parsed_len, i;
	const struct btf_type *t;
	const char *name;
	__u32 id;
	__s64 sz;

	if (str_is_empty(spec_str) || *spec_str == ':')
		return -EINVAL;

	memset(spec, 0, sizeof(*spec));
	spec->btf = btf;

	/* parse spec_str="0:1:2:3:4" into array raw_spec=[0, 1, 2, 3, 4] */
	while (*spec_str) {
		if (*spec_str == ':')
			++spec_str;
		if (sscanf(spec_str, "%d%n", &access_idx, &parsed_len) != 1)
			return -EINVAL;
		if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
			return -E2BIG;
		spec_str += parsed_len;
		spec->raw_spec[spec->raw_len++] = access_idx;
	}

	if (spec->raw_len == 0)
		return -EINVAL;

	/* first spec value is always reloc type array index */
	t = skip_mods_and_typedefs(btf, type_id, &id);
	if (!t)
		return -EINVAL;

	access_idx = spec->raw_spec[0];
	spec->spec[0].type_id = id;
	spec->spec[0].idx = access_idx;
	spec->len++;

	sz = btf__resolve_size(btf, id);
	if (sz < 0)
		return sz;
2616
	spec->bit_offset = access_idx * sz * 8;
2617 2618 2619 2620 2621 2622 2623 2624 2625 2626

	for (i = 1; i < spec->raw_len; i++) {
		t = skip_mods_and_typedefs(btf, id, &id);
		if (!t)
			return -EINVAL;

		access_idx = spec->raw_spec[i];

		if (btf_is_composite(t)) {
			const struct btf_member *m;
2627
			__u32 bit_offset;
2628 2629 2630 2631

			if (access_idx >= btf_vlen(t))
				return -EINVAL;

2632 2633
			bit_offset = btf_member_bit_offset(t, access_idx);
			spec->bit_offset += bit_offset;
2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661

			m = btf_members(t) + access_idx;
			if (m->name_off) {
				name = btf__name_by_offset(btf, m->name_off);
				if (str_is_empty(name))
					return -EINVAL;

				spec->spec[spec->len].type_id = id;
				spec->spec[spec->len].idx = access_idx;
				spec->spec[spec->len].name = name;
				spec->len++;
			}

			id = m->type;
		} else if (btf_is_array(t)) {
			const struct btf_array *a = btf_array(t);

			t = skip_mods_and_typedefs(btf, a->type, &id);
			if (!t || access_idx >= a->nelems)
				return -EINVAL;

			spec->spec[spec->len].type_id = id;
			spec->spec[spec->len].idx = access_idx;
			spec->len++;

			sz = btf__resolve_size(btf, id);
			if (sz < 0)
				return sz;
2662
			spec->bit_offset += access_idx * sz * 8;
2663
		} else {
2664 2665
			pr_warn("relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %d\n",
				type_id, spec_str, i, id, btf_kind(t));
2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762
			return -EINVAL;
		}
	}

	return 0;
}

static bool bpf_core_is_flavor_sep(const char *s)
{
	/* check X___Y name pattern, where X and Y are not underscores */
	return s[0] != '_' &&				      /* X */
	       s[1] == '_' && s[2] == '_' && s[3] == '_' &&   /* ___ */
	       s[4] != '_';				      /* Y */
}

/* Given 'some_struct_name___with_flavor' return the length of a name prefix
 * before last triple underscore. Struct name part after last triple
 * underscore is ignored by BPF CO-RE relocation during relocation matching.
 */
static size_t bpf_core_essential_name_len(const char *name)
{
	size_t n = strlen(name);
	int i;

	for (i = n - 5; i >= 0; i--) {
		if (bpf_core_is_flavor_sep(name + i))
			return i + 1;
	}
	return n;
}

/* dynamically sized list of type IDs */
struct ids_vec {
	__u32 *data;
	int len;
};

static void bpf_core_free_cands(struct ids_vec *cand_ids)
{
	free(cand_ids->data);
	free(cand_ids);
}

static struct ids_vec *bpf_core_find_cands(const struct btf *local_btf,
					   __u32 local_type_id,
					   const struct btf *targ_btf)
{
	size_t local_essent_len, targ_essent_len;
	const char *local_name, *targ_name;
	const struct btf_type *t;
	struct ids_vec *cand_ids;
	__u32 *new_ids;
	int i, err, n;

	t = btf__type_by_id(local_btf, local_type_id);
	if (!t)
		return ERR_PTR(-EINVAL);

	local_name = btf__name_by_offset(local_btf, t->name_off);
	if (str_is_empty(local_name))
		return ERR_PTR(-EINVAL);
	local_essent_len = bpf_core_essential_name_len(local_name);

	cand_ids = calloc(1, sizeof(*cand_ids));
	if (!cand_ids)
		return ERR_PTR(-ENOMEM);

	n = btf__get_nr_types(targ_btf);
	for (i = 1; i <= n; i++) {
		t = btf__type_by_id(targ_btf, i);
		targ_name = btf__name_by_offset(targ_btf, t->name_off);
		if (str_is_empty(targ_name))
			continue;

		targ_essent_len = bpf_core_essential_name_len(targ_name);
		if (targ_essent_len != local_essent_len)
			continue;

		if (strncmp(local_name, targ_name, local_essent_len) == 0) {
			pr_debug("[%d] %s: found candidate [%d] %s\n",
				 local_type_id, local_name, i, targ_name);
			new_ids = realloc(cand_ids->data, cand_ids->len + 1);
			if (!new_ids) {
				err = -ENOMEM;
				goto err_out;
			}
			cand_ids->data = new_ids;
			cand_ids->data[cand_ids->len++] = i;
		}
	}
	return cand_ids;
err_out:
	bpf_core_free_cands(cand_ids);
	return ERR_PTR(err);
}

/* Check two types for compatibility, skipping const/volatile/restrict and
2763
 * typedefs, to ensure we are relocating compatible entities:
2764
 *   - any two STRUCTs/UNIONs are compatible and can be mixed;
2765
 *   - any two FWDs are compatible, if their names match (modulo flavor suffix);
2766
 *   - any two PTRs are always compatible;
2767 2768
 *   - for ENUMs, names should be the same (ignoring flavor suffix) or at
 *     least one of enums should be anonymous;
2769
 *   - for ENUMs, check sizes, names are ignored;
2770
 *   - for INT, size and signedness are ignored;
2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797
 *   - for ARRAY, dimensionality is ignored, element types are checked for
 *     compatibility recursively;
 *   - everything else shouldn't be ever a target of relocation.
 * These rules are not set in stone and probably will be adjusted as we get
 * more experience with using BPF CO-RE relocations.
 */
static int bpf_core_fields_are_compat(const struct btf *local_btf,
				      __u32 local_id,
				      const struct btf *targ_btf,
				      __u32 targ_id)
{
	const struct btf_type *local_type, *targ_type;

recur:
	local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
	targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
	if (!local_type || !targ_type)
		return -EINVAL;

	if (btf_is_composite(local_type) && btf_is_composite(targ_type))
		return 1;
	if (btf_kind(local_type) != btf_kind(targ_type))
		return 0;

	switch (btf_kind(local_type)) {
	case BTF_KIND_PTR:
		return 1;
2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812
	case BTF_KIND_FWD:
	case BTF_KIND_ENUM: {
		const char *local_name, *targ_name;
		size_t local_len, targ_len;

		local_name = btf__name_by_offset(local_btf,
						 local_type->name_off);
		targ_name = btf__name_by_offset(targ_btf, targ_type->name_off);
		local_len = bpf_core_essential_name_len(local_name);
		targ_len = bpf_core_essential_name_len(targ_name);
		/* one of them is anonymous or both w/ same flavor-less names */
		return local_len == 0 || targ_len == 0 ||
		       (local_len == targ_len &&
			strncmp(local_name, targ_name, local_len) == 0);
	}
2813
	case BTF_KIND_INT:
2814 2815 2816
		/* just reject deprecated bitfield-like integers; all other
		 * integers are by default compatible between each other
		 */
2817
		return btf_int_offset(local_type) == 0 &&
2818
		       btf_int_offset(targ_type) == 0;
2819 2820 2821 2822 2823
	case BTF_KIND_ARRAY:
		local_id = btf_array(local_type)->type;
		targ_id = btf_array(targ_type)->type;
		goto recur;
	default:
2824 2825
		pr_warn("unexpected kind %d relocated, local [%d], target [%d]\n",
			btf_kind(local_type), local_id, targ_id);
2826 2827 2828 2829 2830 2831 2832 2833
		return 0;
	}
}

/*
 * Given single high-level named field accessor in local type, find
 * corresponding high-level accessor for a target type. Along the way,
 * maintain low-level spec for target as well. Also keep updating target
2834
 * bit offset.
2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872
 *
 * Searching is performed through recursive exhaustive enumeration of all
 * fields of a struct/union. If there are any anonymous (embedded)
 * structs/unions, they are recursively searched as well. If field with
 * desired name is found, check compatibility between local and target types,
 * before returning result.
 *
 * 1 is returned, if field is found.
 * 0 is returned if no compatible field is found.
 * <0 is returned on error.
 */
static int bpf_core_match_member(const struct btf *local_btf,
				 const struct bpf_core_accessor *local_acc,
				 const struct btf *targ_btf,
				 __u32 targ_id,
				 struct bpf_core_spec *spec,
				 __u32 *next_targ_id)
{
	const struct btf_type *local_type, *targ_type;
	const struct btf_member *local_member, *m;
	const char *local_name, *targ_name;
	__u32 local_id;
	int i, n, found;

	targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
	if (!targ_type)
		return -EINVAL;
	if (!btf_is_composite(targ_type))
		return 0;

	local_id = local_acc->type_id;
	local_type = btf__type_by_id(local_btf, local_id);
	local_member = btf_members(local_type) + local_acc->idx;
	local_name = btf__name_by_offset(local_btf, local_member->name_off);

	n = btf_vlen(targ_type);
	m = btf_members(targ_type);
	for (i = 0; i < n; i++, m++) {
2873
		__u32 bit_offset;
2874

2875
		bit_offset = btf_member_bit_offset(targ_type, i);
2876 2877 2878 2879 2880 2881

		/* too deep struct/union/array nesting */
		if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
			return -E2BIG;

		/* speculate this member will be the good one */
2882
		spec->bit_offset += bit_offset;
2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910
		spec->raw_spec[spec->raw_len++] = i;

		targ_name = btf__name_by_offset(targ_btf, m->name_off);
		if (str_is_empty(targ_name)) {
			/* embedded struct/union, we need to go deeper */
			found = bpf_core_match_member(local_btf, local_acc,
						      targ_btf, m->type,
						      spec, next_targ_id);
			if (found) /* either found or error */
				return found;
		} else if (strcmp(local_name, targ_name) == 0) {
			/* matching named field */
			struct bpf_core_accessor *targ_acc;

			targ_acc = &spec->spec[spec->len++];
			targ_acc->type_id = targ_id;
			targ_acc->idx = i;
			targ_acc->name = targ_name;

			*next_targ_id = m->type;
			found = bpf_core_fields_are_compat(local_btf,
							   local_member->type,
							   targ_btf, m->type);
			if (!found)
				spec->len--; /* pop accessor */
			return found;
		}
		/* member turned out not to be what we looked for */
2911
		spec->bit_offset -= bit_offset;
2912 2913 2914 2915 2916 2917 2918 2919
		spec->raw_len--;
	}

	return 0;
}

/*
 * Try to match local spec to a target type and, if successful, produce full
2920
 * target spec (high-level, low-level + bit offset).
2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982
 */
static int bpf_core_spec_match(struct bpf_core_spec *local_spec,
			       const struct btf *targ_btf, __u32 targ_id,
			       struct bpf_core_spec *targ_spec)
{
	const struct btf_type *targ_type;
	const struct bpf_core_accessor *local_acc;
	struct bpf_core_accessor *targ_acc;
	int i, sz, matched;

	memset(targ_spec, 0, sizeof(*targ_spec));
	targ_spec->btf = targ_btf;

	local_acc = &local_spec->spec[0];
	targ_acc = &targ_spec->spec[0];

	for (i = 0; i < local_spec->len; i++, local_acc++, targ_acc++) {
		targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id,
						   &targ_id);
		if (!targ_type)
			return -EINVAL;

		if (local_acc->name) {
			matched = bpf_core_match_member(local_spec->btf,
							local_acc,
							targ_btf, targ_id,
							targ_spec, &targ_id);
			if (matched <= 0)
				return matched;
		} else {
			/* for i=0, targ_id is already treated as array element
			 * type (because it's the original struct), for others
			 * we should find array element type first
			 */
			if (i > 0) {
				const struct btf_array *a;

				if (!btf_is_array(targ_type))
					return 0;

				a = btf_array(targ_type);
				if (local_acc->idx >= a->nelems)
					return 0;
				if (!skip_mods_and_typedefs(targ_btf, a->type,
							    &targ_id))
					return -EINVAL;
			}

			/* too deep struct/union/array nesting */
			if (targ_spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
				return -E2BIG;

			targ_acc->type_id = targ_id;
			targ_acc->idx = local_acc->idx;
			targ_acc->name = NULL;
			targ_spec->len++;
			targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx;
			targ_spec->raw_len++;

			sz = btf__resolve_size(targ_btf, targ_id);
			if (sz < 0)
				return sz;
2983
			targ_spec->bit_offset += local_acc->idx * sz * 8;
2984 2985 2986 2987 2988 2989
		}
	}

	return 1;
}

2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000
static int bpf_core_calc_field_relo(const struct bpf_program *prog,
				    const struct bpf_field_reloc *relo,
				    const struct bpf_core_spec *spec,
				    __u32 *val, bool *validate)
{
	const struct bpf_core_accessor *acc = &spec->spec[spec->len - 1];
	const struct btf_type *t = btf__type_by_id(spec->btf, acc->type_id);
	__u32 byte_off, byte_sz, bit_off, bit_sz;
	const struct btf_member *m;
	const struct btf_type *mt;
	bool bitfield;
3001
	__s64 sz;
3002 3003 3004

	/* a[n] accessor needs special handling */
	if (!acc->name) {
3005 3006 3007 3008 3009 3010 3011 3012 3013
		if (relo->kind == BPF_FIELD_BYTE_OFFSET) {
			*val = spec->bit_offset / 8;
		} else if (relo->kind == BPF_FIELD_BYTE_SIZE) {
			sz = btf__resolve_size(spec->btf, acc->type_id);
			if (sz < 0)
				return -EINVAL;
			*val = sz;
		} else {
			pr_warn("prog '%s': relo %d at insn #%d can't be applied to array access\n",
3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044
				bpf_program__title(prog, false),
				relo->kind, relo->insn_off / 8);
			return -EINVAL;
		}
		if (validate)
			*validate = true;
		return 0;
	}

	m = btf_members(t) + acc->idx;
	mt = skip_mods_and_typedefs(spec->btf, m->type, NULL);
	bit_off = spec->bit_offset;
	bit_sz = btf_member_bitfield_size(t, acc->idx);

	bitfield = bit_sz > 0;
	if (bitfield) {
		byte_sz = mt->size;
		byte_off = bit_off / 8 / byte_sz * byte_sz;
		/* figure out smallest int size necessary for bitfield load */
		while (bit_off + bit_sz - byte_off * 8 > byte_sz * 8) {
			if (byte_sz >= 8) {
				/* bitfield can't be read with 64-bit read */
				pr_warn("prog '%s': relo %d at insn #%d can't be satisfied for bitfield\n",
					bpf_program__title(prog, false),
					relo->kind, relo->insn_off / 8);
				return -E2BIG;
			}
			byte_sz *= 2;
			byte_off = bit_off / 8 / byte_sz * byte_sz;
		}
	} else {
3045 3046 3047 3048
		sz = btf__resolve_size(spec->btf, m->type);
		if (sz < 0)
			return -EINVAL;
		byte_sz = sz;
3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096
		byte_off = spec->bit_offset / 8;
		bit_sz = byte_sz * 8;
	}

	/* for bitfields, all the relocatable aspects are ambiguous and we
	 * might disagree with compiler, so turn off validation of expected
	 * value, except for signedness
	 */
	if (validate)
		*validate = !bitfield;

	switch (relo->kind) {
	case BPF_FIELD_BYTE_OFFSET:
		*val = byte_off;
		break;
	case BPF_FIELD_BYTE_SIZE:
		*val = byte_sz;
		break;
	case BPF_FIELD_SIGNED:
		/* enums will be assumed unsigned */
		*val = btf_is_enum(mt) ||
		       (btf_int_encoding(mt) & BTF_INT_SIGNED);
		if (validate)
			*validate = true; /* signedness is never ambiguous */
		break;
	case BPF_FIELD_LSHIFT_U64:
#if __BYTE_ORDER == __LITTLE_ENDIAN
		*val = 64 - (bit_off + bit_sz - byte_off  * 8);
#else
		*val = (8 - byte_sz) * 8 + (bit_off - byte_off * 8);
#endif
		break;
	case BPF_FIELD_RSHIFT_U64:
		*val = 64 - bit_sz;
		if (validate)
			*validate = true; /* right shift is never ambiguous */
		break;
	case BPF_FIELD_EXISTS:
	default:
		pr_warn("prog '%s': unknown relo %d at insn #%d\n",
			bpf_program__title(prog, false),
			relo->kind, relo->insn_off / 8);
		return -EINVAL;
	}

	return 0;
}

3097 3098
/*
 * Patch relocatable BPF instruction.
3099 3100 3101 3102 3103 3104 3105
 *
 * Patched value is determined by relocation kind and target specification.
 * For field existence relocation target spec will be NULL if field is not
 * found.
 * Expected insn->imm value is determined using relocation kind and local
 * spec, and is checked before patching instruction. If actual insn->imm value
 * is wrong, bail out with error.
3106 3107 3108 3109 3110
 *
 * Currently three kinds of BPF instructions are supported:
 * 1. rX = <imm> (assignment with immediate operand);
 * 2. rX += <imm> (arithmetic operations with immediate operand);
 */
3111 3112 3113 3114
static int bpf_core_reloc_insn(struct bpf_program *prog,
			       const struct bpf_field_reloc *relo,
			       const struct bpf_core_spec *local_spec,
			       const struct bpf_core_spec *targ_spec)
3115
{
3116
	bool failed = false, validate = true;
3117
	__u32 orig_val, new_val;
3118
	struct bpf_insn *insn;
3119
	int insn_idx, err;
3120 3121
	__u8 class;

3122 3123 3124 3125
	if (relo->insn_off % sizeof(struct bpf_insn))
		return -EINVAL;
	insn_idx = relo->insn_off / sizeof(struct bpf_insn);

3126
	if (relo->kind == BPF_FIELD_EXISTS) {
3127 3128
		orig_val = 1; /* can't generate EXISTS relo w/o local field */
		new_val = targ_spec ? 1 : 0;
3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140
	} else if (!targ_spec) {
		failed = true;
		new_val = (__u32)-1;
	} else {
		err = bpf_core_calc_field_relo(prog, relo, local_spec,
					       &orig_val, &validate);
		if (err)
			return err;
		err = bpf_core_calc_field_relo(prog, relo, targ_spec,
					       &new_val, NULL);
		if (err)
			return err;
3141
	}
3142 3143 3144 3145 3146 3147 3148

	insn = &prog->insns[insn_idx];
	class = BPF_CLASS(insn->code);

	if (class == BPF_ALU || class == BPF_ALU64) {
		if (BPF_SRC(insn->code) != BPF_K)
			return -EINVAL;
3149 3150 3151 3152
		if (!failed && validate && insn->imm != orig_val) {
			pr_warn("prog '%s': unexpected insn #%d value: got %u, exp %u -> %u\n",
				bpf_program__title(prog, false), insn_idx,
				insn->imm, orig_val, new_val);
3153
			return -EINVAL;
3154 3155
		}
		orig_val = insn->imm;
3156
		insn->imm = new_val;
3157 3158 3159
		pr_debug("prog '%s': patched insn #%d (ALU/ALU64)%s imm %u -> %u\n",
			 bpf_program__title(prog, false), insn_idx,
			 failed ? " w/ failed reloc" : "", orig_val, new_val);
3160
	} else {
3161 3162 3163 3164
		pr_warn("prog '%s': trying to relocate unrecognized insn #%d, code:%x, src:%x, dst:%x, off:%x, imm:%x\n",
			bpf_program__title(prog, false),
			insn_idx, insn->code, insn->src_reg, insn->dst_reg,
			insn->off, insn->imm);
3165 3166
		return -EINVAL;
	}
3167

3168 3169 3170
	return 0;
}

3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205
static struct btf *btf_load_raw(const char *path)
{
	struct btf *btf;
	size_t read_cnt;
	struct stat st;
	void *data;
	FILE *f;

	if (stat(path, &st))
		return ERR_PTR(-errno);

	data = malloc(st.st_size);
	if (!data)
		return ERR_PTR(-ENOMEM);

	f = fopen(path, "rb");
	if (!f) {
		btf = ERR_PTR(-errno);
		goto cleanup;
	}

	read_cnt = fread(data, 1, st.st_size, f);
	fclose(f);
	if (read_cnt < st.st_size) {
		btf = ERR_PTR(-EBADF);
		goto cleanup;
	}

	btf = btf__new(data, read_cnt);

cleanup:
	free(data);
	return btf;
}

3206 3207 3208 3209 3210 3211
/*
 * Probe few well-known locations for vmlinux kernel image and try to load BTF
 * data out of it to use for target BTF.
 */
static struct btf *bpf_core_find_kernel_btf(void)
{
3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225
	struct {
		const char *path_fmt;
		bool raw_btf;
	} locations[] = {
		/* try canonical vmlinux BTF through sysfs first */
		{ "/sys/kernel/btf/vmlinux", true /* raw BTF */ },
		/* fall back to trying to find vmlinux ELF on disk otherwise */
		{ "/boot/vmlinux-%1$s" },
		{ "/lib/modules/%1$s/vmlinux-%1$s" },
		{ "/lib/modules/%1$s/build/vmlinux" },
		{ "/usr/lib/modules/%1$s/kernel/vmlinux" },
		{ "/usr/lib/debug/boot/vmlinux-%1$s" },
		{ "/usr/lib/debug/boot/vmlinux-%1$s.debug" },
		{ "/usr/lib/debug/lib/modules/%1$s/vmlinux" },
3226 3227 3228 3229 3230 3231 3232 3233 3234
	};
	char path[PATH_MAX + 1];
	struct utsname buf;
	struct btf *btf;
	int i;

	uname(&buf);

	for (i = 0; i < ARRAY_SIZE(locations); i++) {
3235
		snprintf(path, PATH_MAX, locations[i].path_fmt, buf.release);
3236 3237 3238 3239

		if (access(path, R_OK))
			continue;

3240 3241 3242 3243 3244 3245 3246
		if (locations[i].raw_btf)
			btf = btf_load_raw(path);
		else
			btf = btf__parse_elf(path, NULL);

		pr_debug("loading kernel BTF '%s': %ld\n",
			 path, IS_ERR(btf) ? PTR_ERR(btf) : 0);
3247 3248 3249 3250 3251 3252
		if (IS_ERR(btf))
			continue;

		return btf;
	}

3253
	pr_warn("failed to find valid kernel BTF\n");
3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276
	return ERR_PTR(-ESRCH);
}

/* Output spec definition in the format:
 * [<type-id>] (<type-name>) + <raw-spec> => <offset>@<spec>,
 * where <spec> is a C-syntax view of recorded field access, e.g.: x.a[3].b
 */
static void bpf_core_dump_spec(int level, const struct bpf_core_spec *spec)
{
	const struct btf_type *t;
	const char *s;
	__u32 type_id;
	int i;

	type_id = spec->spec[0].type_id;
	t = btf__type_by_id(spec->btf, type_id);
	s = btf__name_by_offset(spec->btf, t->name_off);
	libbpf_print(level, "[%u] %s + ", type_id, s);

	for (i = 0; i < spec->raw_len; i++)
		libbpf_print(level, "%d%s", spec->raw_spec[i],
			     i == spec->raw_len - 1 ? " => " : ":");

3277 3278
	libbpf_print(level, "%u.%u @ &x",
		     spec->bit_offset / 8, spec->bit_offset % 8);
3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334

	for (i = 0; i < spec->len; i++) {
		if (spec->spec[i].name)
			libbpf_print(level, ".%s", spec->spec[i].name);
		else
			libbpf_print(level, "[%u]", spec->spec[i].idx);
	}

}

static size_t bpf_core_hash_fn(const void *key, void *ctx)
{
	return (size_t)key;
}

static bool bpf_core_equal_fn(const void *k1, const void *k2, void *ctx)
{
	return k1 == k2;
}

static void *u32_as_hash_key(__u32 x)
{
	return (void *)(uintptr_t)x;
}

/*
 * CO-RE relocate single instruction.
 *
 * The outline and important points of the algorithm:
 * 1. For given local type, find corresponding candidate target types.
 *    Candidate type is a type with the same "essential" name, ignoring
 *    everything after last triple underscore (___). E.g., `sample`,
 *    `sample___flavor_one`, `sample___flavor_another_one`, are all candidates
 *    for each other. Names with triple underscore are referred to as
 *    "flavors" and are useful, among other things, to allow to
 *    specify/support incompatible variations of the same kernel struct, which
 *    might differ between different kernel versions and/or build
 *    configurations.
 *
 *    N.B. Struct "flavors" could be generated by bpftool's BTF-to-C
 *    converter, when deduplicated BTF of a kernel still contains more than
 *    one different types with the same name. In that case, ___2, ___3, etc
 *    are appended starting from second name conflict. But start flavors are
 *    also useful to be defined "locally", in BPF program, to extract same
 *    data from incompatible changes between different kernel
 *    versions/configurations. For instance, to handle field renames between
 *    kernel versions, one can use two flavors of the struct name with the
 *    same common name and use conditional relocations to extract that field,
 *    depending on target kernel version.
 * 2. For each candidate type, try to match local specification to this
 *    candidate target type. Matching involves finding corresponding
 *    high-level spec accessors, meaning that all named fields should match,
 *    as well as all array accesses should be within the actual bounds. Also,
 *    types should be compatible (see bpf_core_fields_are_compat for details).
 * 3. It is supported and expected that there might be multiple flavors
 *    matching the spec. As long as all the specs resolve to the same set of
3335
 *    offsets across all candidates, there is no error. If there is any
3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349
 *    ambiguity, CO-RE relocation will fail. This is necessary to accomodate
 *    imprefection of BTF deduplication, which can cause slight duplication of
 *    the same BTF type, if some directly or indirectly referenced (by
 *    pointer) type gets resolved to different actual types in different
 *    object files. If such situation occurs, deduplicated BTF will end up
 *    with two (or more) structurally identical types, which differ only in
 *    types they refer to through pointer. This should be OK in most cases and
 *    is not an error.
 * 4. Candidate types search is performed by linearly scanning through all
 *    types in target BTF. It is anticipated that this is overall more
 *    efficient memory-wise and not significantly worse (if not better)
 *    CPU-wise compared to prebuilding a map from all local type names to
 *    a list of candidate type names. It's also sped up by caching resolved
 *    list of matching candidates per each local "root" type ID, that has at
3350
 *    least one bpf_field_reloc associated with it. This list is shared
3351 3352 3353
 *    between multiple relocations for the same type ID and is updated as some
 *    of the candidates are pruned due to structural incompatibility.
 */
3354 3355
static int bpf_core_reloc_field(struct bpf_program *prog,
				 const struct bpf_field_reloc *relo,
3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385
				 int relo_idx,
				 const struct btf *local_btf,
				 const struct btf *targ_btf,
				 struct hashmap *cand_cache)
{
	const char *prog_name = bpf_program__title(prog, false);
	struct bpf_core_spec local_spec, cand_spec, targ_spec;
	const void *type_key = u32_as_hash_key(relo->type_id);
	const struct btf_type *local_type, *cand_type;
	const char *local_name, *cand_name;
	struct ids_vec *cand_ids;
	__u32 local_id, cand_id;
	const char *spec_str;
	int i, j, err;

	local_id = relo->type_id;
	local_type = btf__type_by_id(local_btf, local_id);
	if (!local_type)
		return -EINVAL;

	local_name = btf__name_by_offset(local_btf, local_type->name_off);
	if (str_is_empty(local_name))
		return -EINVAL;

	spec_str = btf__name_by_offset(local_btf, relo->access_str_off);
	if (str_is_empty(spec_str))
		return -EINVAL;

	err = bpf_core_spec_parse(local_btf, local_id, spec_str, &local_spec);
	if (err) {
3386 3387 3388
		pr_warn("prog '%s': relo #%d: parsing [%d] %s + %s failed: %d\n",
			prog_name, relo_idx, local_id, local_name, spec_str,
			err);
3389 3390 3391
		return -EINVAL;
	}

3392 3393
	pr_debug("prog '%s': relo #%d: kind %d, spec is ", prog_name, relo_idx,
		 relo->kind);
3394 3395 3396 3397 3398 3399
	bpf_core_dump_spec(LIBBPF_DEBUG, &local_spec);
	libbpf_print(LIBBPF_DEBUG, "\n");

	if (!hashmap__find(cand_cache, type_key, (void **)&cand_ids)) {
		cand_ids = bpf_core_find_cands(local_btf, local_id, targ_btf);
		if (IS_ERR(cand_ids)) {
3400 3401 3402
			pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s: %ld",
				prog_name, relo_idx, local_id, local_name,
				PTR_ERR(cand_ids));
3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423
			return PTR_ERR(cand_ids);
		}
		err = hashmap__set(cand_cache, type_key, cand_ids, NULL, NULL);
		if (err) {
			bpf_core_free_cands(cand_ids);
			return err;
		}
	}

	for (i = 0, j = 0; i < cand_ids->len; i++) {
		cand_id = cand_ids->data[i];
		cand_type = btf__type_by_id(targ_btf, cand_id);
		cand_name = btf__name_by_offset(targ_btf, cand_type->name_off);

		err = bpf_core_spec_match(&local_spec, targ_btf,
					  cand_id, &cand_spec);
		pr_debug("prog '%s': relo #%d: matching candidate #%d %s against spec ",
			 prog_name, relo_idx, i, cand_name);
		bpf_core_dump_spec(LIBBPF_DEBUG, &cand_spec);
		libbpf_print(LIBBPF_DEBUG, ": %d\n", err);
		if (err < 0) {
3424 3425
			pr_warn("prog '%s': relo #%d: matching error: %d\n",
				prog_name, relo_idx, err);
3426 3427 3428 3429 3430 3431 3432
			return err;
		}
		if (err == 0)
			continue;

		if (j == 0) {
			targ_spec = cand_spec;
3433
		} else if (cand_spec.bit_offset != targ_spec.bit_offset) {
3434
			/* if there are many candidates, they should all
3435
			 * resolve to the same bit offset
3436
			 */
3437
			pr_warn("prog '%s': relo #%d: offset ambiguity: %u != %u\n",
3438 3439
				prog_name, relo_idx, cand_spec.bit_offset,
				targ_spec.bit_offset);
3440 3441 3442 3443 3444 3445
			return -EINVAL;
		}

		cand_ids->data[j++] = cand_spec.spec[0].type_id;
	}

3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457
	/*
	 * For BPF_FIELD_EXISTS relo or when relaxed CO-RE reloc mode is
	 * requested, it's expected that we might not find any candidates.
	 * In this case, if field wasn't found in any candidate, the list of
	 * candidates shouldn't change at all, we'll just handle relocating
	 * appropriately, depending on relo's kind.
	 */
	if (j > 0)
		cand_ids->len = j;

	if (j == 0 && !prog->obj->relaxed_core_relocs &&
	    relo->kind != BPF_FIELD_EXISTS) {
3458 3459
		pr_warn("prog '%s': relo #%d: no matching targets found for [%d] %s + %s\n",
			prog_name, relo_idx, local_id, local_name, spec_str);
3460 3461 3462
		return -ESRCH;
	}

3463 3464 3465
	/* bpf_core_reloc_insn should know how to handle missing targ_spec */
	err = bpf_core_reloc_insn(prog, relo, &local_spec,
				  j ? &targ_spec : NULL);
3466
	if (err) {
3467 3468
		pr_warn("prog '%s': relo #%d: failed to patch insn at offset %d: %d\n",
			prog_name, relo_idx, relo->insn_off, err);
3469 3470 3471 3472 3473 3474 3475
		return -EINVAL;
	}

	return 0;
}

static int
3476
bpf_core_reloc_fields(struct bpf_object *obj, const char *targ_btf_path)
3477 3478
{
	const struct btf_ext_info_sec *sec;
3479
	const struct bpf_field_reloc *rec;
3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492
	const struct btf_ext_info *seg;
	struct hashmap_entry *entry;
	struct hashmap *cand_cache = NULL;
	struct bpf_program *prog;
	struct btf *targ_btf;
	const char *sec_name;
	int i, err = 0;

	if (targ_btf_path)
		targ_btf = btf__parse_elf(targ_btf_path, NULL);
	else
		targ_btf = bpf_core_find_kernel_btf();
	if (IS_ERR(targ_btf)) {
3493
		pr_warn("failed to get target BTF: %ld\n", PTR_ERR(targ_btf));
3494 3495 3496 3497 3498 3499 3500 3501 3502
		return PTR_ERR(targ_btf);
	}

	cand_cache = hashmap__new(bpf_core_hash_fn, bpf_core_equal_fn, NULL);
	if (IS_ERR(cand_cache)) {
		err = PTR_ERR(cand_cache);
		goto out;
	}

3503
	seg = &obj->btf_ext->field_reloc_info;
3504 3505 3506 3507 3508 3509 3510 3511
	for_each_btf_ext_sec(seg, sec) {
		sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
		if (str_is_empty(sec_name)) {
			err = -EINVAL;
			goto out;
		}
		prog = bpf_object__find_program_by_title(obj, sec_name);
		if (!prog) {
3512 3513
			pr_warn("failed to find program '%s' for CO-RE offset relocation\n",
				sec_name);
3514 3515 3516 3517 3518 3519 3520 3521
			err = -EINVAL;
			goto out;
		}

		pr_debug("prog '%s': performing %d CO-RE offset relocs\n",
			 sec_name, sec->num_info);

		for_each_btf_ext_rec(seg, sec, i, rec) {
3522 3523
			err = bpf_core_reloc_field(prog, rec, i, obj->btf,
						   targ_btf, cand_cache);
3524
			if (err) {
3525 3526
				pr_warn("prog '%s': relo #%d: failed to relocate: %d\n",
					sec_name, i, err);
3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547
				goto out;
			}
		}
	}

out:
	btf__free(targ_btf);
	if (!IS_ERR_OR_NULL(cand_cache)) {
		hashmap__for_each_entry(cand_cache, entry, i) {
			bpf_core_free_cands(entry->value);
		}
		hashmap__free(cand_cache);
	}
	return err;
}

static int
bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
{
	int err = 0;

3548 3549
	if (obj->btf_ext->field_reloc_info.len)
		err = bpf_core_reloc_fields(obj, targ_btf_path);
3550 3551 3552 3553

	return err;
}

3554 3555 3556 3557 3558 3559 3560
static int
bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
			struct reloc_desc *relo)
{
	struct bpf_insn *insn, *new_insn;
	struct bpf_program *text;
	size_t new_cnt;
3561
	int err;
3562 3563 3564 3565 3566

	if (relo->type != RELO_CALL)
		return -LIBBPF_ERRNO__RELOC;

	if (prog->idx == obj->efile.text_shndx) {
3567 3568
		pr_warn("relo in .text insn %d into off %d (insn #%d)\n",
			relo->insn_idx, relo->sym_off, relo->sym_off / 8);
3569 3570 3571 3572 3573 3574
		return -LIBBPF_ERRNO__RELOC;
	}

	if (prog->main_prog_cnt == 0) {
		text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx);
		if (!text) {
3575
			pr_warn("no .text section found yet relo into text exist\n");
3576 3577 3578
			return -LIBBPF_ERRNO__RELOC;
		}
		new_cnt = prog->insns_cnt + text->insns_cnt;
3579
		new_insn = reallocarray(prog->insns, new_cnt, sizeof(*insn));
3580
		if (!new_insn) {
3581
			pr_warn("oom in prog realloc\n");
3582 3583
			return -ENOMEM;
		}
3584
		prog->insns = new_insn;
3585

3586 3587 3588 3589 3590
		if (obj->btf_ext) {
			err = bpf_program_reloc_btf_ext(prog, obj,
							text->section_name,
							prog->insns_cnt);
			if (err)
3591 3592 3593
				return err;
		}

3594 3595 3596 3597
		memcpy(new_insn + prog->insns_cnt, text->insns,
		       text->insns_cnt * sizeof(*insn));
		prog->main_prog_cnt = prog->insns_cnt;
		prog->insns_cnt = new_cnt;
3598 3599 3600
		pr_debug("added %zd insn from %s to prog %s\n",
			 text->insns_cnt, text->section_name,
			 prog->section_name);
3601 3602
	}
	insn = &prog->insns[relo->insn_idx];
3603
	insn->imm += relo->sym_off / 8 + prog->main_prog_cnt - relo->insn_idx;
3604 3605 3606
	return 0;
}

W
Wang Nan 已提交
3607
static int
3608
bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
W
Wang Nan 已提交
3609
{
3610
	int i, err;
W
Wang Nan 已提交
3611

3612 3613 3614
	if (!prog)
		return 0;

3615 3616 3617 3618
	if (obj->btf_ext) {
		err = bpf_program_reloc_btf_ext(prog, obj,
						prog->section_name, 0);
		if (err)
3619 3620 3621 3622
			return err;
	}

	if (!prog->reloc_desc)
W
Wang Nan 已提交
3623 3624 3625
		return 0;

	for (i = 0; i < prog->nr_reloc; i++) {
3626
		struct reloc_desc *relo = &prog->reloc_desc[i];
W
Wang Nan 已提交
3627

3628 3629
		if (relo->type == RELO_LD64 || relo->type == RELO_DATA) {
			struct bpf_insn *insn = &prog->insns[relo->insn_idx];
W
Wang Nan 已提交
3630

3631
			if (relo->insn_idx + 1 >= (int)prog->insns_cnt) {
3632 3633
				pr_warn("relocation out of range: '%s'\n",
					prog->section_name);
3634 3635
				return -LIBBPF_ERRNO__RELOC;
			}
3636

3637 3638
			if (relo->type != RELO_DATA) {
				insn[0].src_reg = BPF_PSEUDO_MAP_FD;
3639
			} else {
3640 3641
				insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
				insn[1].imm = insn[0].imm + relo->sym_off;
3642
			}
3643 3644 3645
			insn[0].imm = obj->maps[relo->map_idx].fd;
		} else if (relo->type == RELO_CALL) {
			err = bpf_program__reloc_text(prog, obj, relo);
3646 3647
			if (err)
				return err;
W
Wang Nan 已提交
3648 3649 3650 3651 3652 3653 3654 3655 3656
		}
	}

	zfree(&prog->reloc_desc);
	prog->nr_reloc = 0;
	return 0;
}

static int
3657
bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
W
Wang Nan 已提交
3658 3659 3660 3661 3662
{
	struct bpf_program *prog;
	size_t i;
	int err;

3663 3664 3665
	if (obj->btf_ext) {
		err = bpf_object__relocate_core(obj, targ_btf_path);
		if (err) {
3666 3667
			pr_warn("failed to perform CO-RE relocations: %d\n",
				err);
3668 3669 3670
			return err;
		}
	}
W
Wang Nan 已提交
3671 3672 3673
	for (i = 0; i < obj->nr_programs; i++) {
		prog = &obj->programs[i];

3674
		err = bpf_program__relocate(prog, obj);
W
Wang Nan 已提交
3675
		if (err) {
3676
			pr_warn("failed to relocate '%s'\n", prog->section_name);
W
Wang Nan 已提交
3677 3678 3679 3680 3681 3682
			return err;
		}
	}
	return 0;
}

3683 3684 3685 3686 3687
static int bpf_object__collect_reloc(struct bpf_object *obj)
{
	int i, err;

	if (!obj_elf_valid(obj)) {
3688
		pr_warn("Internal error: elf object is closed\n");
3689
		return -LIBBPF_ERRNO__INTERNAL;
3690 3691
	}

3692 3693 3694
	for (i = 0; i < obj->efile.nr_reloc_sects; i++) {
		GElf_Shdr *shdr = &obj->efile.reloc_sects[i].shdr;
		Elf_Data *data = obj->efile.reloc_sects[i].data;
3695 3696 3697 3698
		int idx = shdr->sh_info;
		struct bpf_program *prog;

		if (shdr->sh_type != SHT_REL) {
3699
			pr_warn("internal error at %d\n", __LINE__);
3700
			return -LIBBPF_ERRNO__INTERNAL;
3701 3702 3703 3704
		}

		prog = bpf_object__find_prog_by_idx(obj, idx);
		if (!prog) {
3705
			pr_warn("relocation failed: no section(%d)\n", idx);
3706
			return -LIBBPF_ERRNO__RELOC;
3707 3708
		}

3709
		err = bpf_program__collect_reloc(prog, shdr, data, obj);
3710
		if (err)
3711
			return err;
3712 3713 3714 3715
	}
	return 0;
}

3716
static int
3717
load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
3718
	     char *license, __u32 kern_version, int *pfd)
3719
{
3720
	struct bpf_load_program_attr load_attr;
3721
	char *cp, errmsg[STRERR_BUFSIZE];
3722
	int log_buf_size = BPF_LOG_BUF_SIZE;
3723
	char *log_buf;
3724
	int btf_fd, ret;
3725

3726 3727 3728
	if (!insns || !insns_cnt)
		return -EINVAL;

3729
	memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
3730 3731
	load_attr.prog_type = prog->type;
	load_attr.expected_attach_type = prog->expected_attach_type;
3732 3733
	if (prog->caps->name)
		load_attr.name = prog->name;
3734 3735 3736
	load_attr.insns = insns;
	load_attr.insns_cnt = insns_cnt;
	load_attr.license = license;
3737 3738 3739 3740 3741 3742 3743
	if (prog->type == BPF_PROG_TYPE_TRACING) {
		load_attr.attach_prog_fd = prog->attach_prog_fd;
		load_attr.attach_btf_id = prog->attach_btf_id;
	} else {
		load_attr.kern_version = kern_version;
		load_attr.prog_ifindex = prog->prog_ifindex;
	}
3744 3745 3746 3747 3748
	/* if .BTF.ext was loaded, kernel supports associated BTF for prog */
	if (prog->obj->btf_ext)
		btf_fd = bpf_object__btf_fd(prog->obj);
	else
		btf_fd = -1;
3749
	load_attr.prog_btf_fd = btf_fd >= 0 ? btf_fd : 0;
3750 3751
	load_attr.func_info = prog->func_info;
	load_attr.func_info_rec_size = prog->func_info_rec_size;
3752
	load_attr.func_info_cnt = prog->func_info_cnt;
3753 3754 3755
	load_attr.line_info = prog->line_info;
	load_attr.line_info_rec_size = prog->line_info_rec_size;
	load_attr.line_info_cnt = prog->line_info_cnt;
3756
	load_attr.log_level = prog->log_level;
3757
	load_attr.prog_flags = prog->prog_flags;
3758

3759 3760
retry_load:
	log_buf = malloc(log_buf_size);
3761
	if (!log_buf)
3762
		pr_warn("Alloc log buffer for bpf loader error, continue without log\n");
3763

3764
	ret = bpf_load_program_xattr(&load_attr, log_buf, log_buf_size);
3765 3766

	if (ret >= 0) {
3767 3768
		if (load_attr.log_level)
			pr_debug("verifier log:\n%s", log_buf);
3769 3770 3771 3772 3773
		*pfd = ret;
		ret = 0;
		goto out;
	}

3774 3775 3776 3777 3778
	if (errno == ENOSPC) {
		log_buf_size <<= 1;
		free(log_buf);
		goto retry_load;
	}
3779
	ret = -errno;
3780
	cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
3781
	pr_warn("load bpf program failed: %s\n", cp);
3782

3783 3784
	if (log_buf && log_buf[0] != '\0') {
		ret = -LIBBPF_ERRNO__VERIFY;
3785 3786 3787
		pr_warn("-- BEGIN DUMP LOG ---\n");
		pr_warn("\n%s\n", log_buf);
		pr_warn("-- END LOG --\n");
3788
	} else if (load_attr.insns_cnt >= BPF_MAXINSNS) {
3789 3790
		pr_warn("Program too large (%zu insns), at most %d insns\n",
			load_attr.insns_cnt, BPF_MAXINSNS);
3791
		ret = -LIBBPF_ERRNO__PROG2BIG;
3792
	} else if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) {
3793
		/* Wrong program type? */
3794
		int fd;
3795

3796 3797 3798 3799 3800 3801 3802 3803
		load_attr.prog_type = BPF_PROG_TYPE_KPROBE;
		load_attr.expected_attach_type = 0;
		fd = bpf_load_program_xattr(&load_attr, NULL, 0);
		if (fd >= 0) {
			close(fd);
			ret = -LIBBPF_ERRNO__PROGTYPE;
			goto out;
		}
3804 3805 3806 3807 3808 3809 3810
	}

out:
	free(log_buf);
	return ret;
}

3811
int
3812
bpf_program__load(struct bpf_program *prog,
3813
		  char *license, __u32 kern_version)
3814
{
3815
	int err = 0, fd, i;
3816

3817 3818
	if (prog->instances.nr < 0 || !prog->instances.fds) {
		if (prog->preprocessor) {
3819 3820
			pr_warn("Internal error: can't load program '%s'\n",
				prog->section_name);
3821 3822
			return -LIBBPF_ERRNO__INTERNAL;
		}
3823

3824 3825
		prog->instances.fds = malloc(sizeof(int));
		if (!prog->instances.fds) {
3826
			pr_warn("Not enough memory for BPF fds\n");
3827 3828 3829 3830 3831 3832 3833 3834
			return -ENOMEM;
		}
		prog->instances.nr = 1;
		prog->instances.fds[0] = -1;
	}

	if (!prog->preprocessor) {
		if (prog->instances.nr != 1) {
3835 3836
			pr_warn("Program '%s' is inconsistent: nr(%d) != 1\n",
				prog->section_name, prog->instances.nr);
3837
		}
3838
		err = load_program(prog, prog->insns, prog->insns_cnt,
3839
				   license, kern_version, &fd);
3840 3841 3842 3843 3844 3845 3846 3847 3848
		if (!err)
			prog->instances.fds[0] = fd;
		goto out;
	}

	for (i = 0; i < prog->instances.nr; i++) {
		struct bpf_prog_prep_result result;
		bpf_program_prep_t preprocessor = prog->preprocessor;

3849
		memset(&result, 0, sizeof(result));
3850 3851 3852
		err = preprocessor(prog, i, prog->insns,
				   prog->insns_cnt, &result);
		if (err) {
3853 3854
			pr_warn("Preprocessing the %dth instance of program '%s' failed\n",
				i, prog->section_name);
3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866
			goto out;
		}

		if (!result.new_insn_ptr || !result.new_insn_cnt) {
			pr_debug("Skip loading the %dth instance of program '%s'\n",
				 i, prog->section_name);
			prog->instances.fds[i] = -1;
			if (result.pfd)
				*result.pfd = -1;
			continue;
		}

3867
		err = load_program(prog, result.new_insn_ptr,
3868
				   result.new_insn_cnt,
3869
				   license, kern_version, &fd);
3870 3871

		if (err) {
3872 3873
			pr_warn("Loading the %dth instance of program '%s' failed\n",
				i, prog->section_name);
3874 3875 3876 3877 3878 3879 3880 3881
			goto out;
		}

		if (result.pfd)
			*result.pfd = fd;
		prog->instances.fds[i] = fd;
	}
out:
3882
	if (err)
3883
		pr_warn("failed to load program '%s'\n", prog->section_name);
3884 3885 3886 3887 3888
	zfree(&prog->insns);
	prog->insns_cnt = 0;
	return err;
}

A
Andrii Nakryiko 已提交
3889 3890
static bool bpf_program__is_function_storage(const struct bpf_program *prog,
					     const struct bpf_object *obj)
3891 3892 3893 3894
{
	return prog->idx == obj->efile.text_shndx && obj->has_pseudo_calls;
}

3895
static int
3896
bpf_object__load_progs(struct bpf_object *obj, int log_level)
3897 3898 3899 3900 3901
{
	size_t i;
	int err;

	for (i = 0; i < obj->nr_programs; i++) {
3902
		if (bpf_program__is_function_storage(&obj->programs[i], obj))
3903
			continue;
3904
		obj->programs[i].log_level |= log_level;
3905 3906 3907 3908 3909 3910 3911 3912 3913
		err = bpf_program__load(&obj->programs[i],
					obj->license,
					obj->kern_version);
		if (err)
			return err;
	}
	return 0;
}

3914 3915 3916
static int libbpf_find_attach_btf_id(const char *name,
				     enum bpf_attach_type attach_type,
				     __u32 attach_prog_fd);
3917
static struct bpf_object *
3918
__bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
3919
		   struct bpf_object_open_opts *opts)
3920
{
3921
	const char *pin_root_path;
3922
	struct bpf_program *prog;
3923
	struct bpf_object *obj;
3924 3925 3926
	const char *obj_name;
	char tmp_name[64];
	bool relaxed_maps;
3927
	__u32 attach_prog_fd;
3928
	int err;
3929 3930

	if (elf_version(EV_CURRENT) == EV_NONE) {
3931 3932
		pr_warn("failed to init libelf for %s\n",
			path ? : "(mem buf)");
3933
		return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
3934 3935
	}

3936 3937 3938
	if (!OPTS_VALID(opts, bpf_object_open_opts))
		return ERR_PTR(-EINVAL);

3939
	obj_name = OPTS_GET(opts, object_name, NULL);
3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950
	if (obj_buf) {
		if (!obj_name) {
			snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
				 (unsigned long)obj_buf,
				 (unsigned long)obj_buf_sz);
			obj_name = tmp_name;
		}
		path = obj_name;
		pr_debug("loading object '%s' from buffer\n", obj_name);
	}

3951
	obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name);
3952 3953
	if (IS_ERR(obj))
		return obj;
3954

3955
	obj->relaxed_core_relocs = OPTS_GET(opts, relaxed_core_relocs, false);
3956
	relaxed_maps = OPTS_GET(opts, relaxed_maps, false);
3957
	pin_root_path = OPTS_GET(opts, pin_root_path, NULL);
3958
	attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0);
3959

3960 3961
	CHECK_ERR(bpf_object__elf_init(obj), err, out);
	CHECK_ERR(bpf_object__check_endianness(obj), err, out);
3962
	CHECK_ERR(bpf_object__probe_caps(obj), err, out);
3963 3964
	CHECK_ERR(bpf_object__elf_collect(obj, relaxed_maps, pin_root_path),
		  err, out);
3965
	CHECK_ERR(bpf_object__collect_reloc(obj), err, out);
3966
	bpf_object__elf_finish(obj);
3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981

	bpf_object__for_each_program(prog, obj) {
		enum bpf_prog_type prog_type;
		enum bpf_attach_type attach_type;

		err = libbpf_prog_type_by_name(prog->section_name, &prog_type,
					       &attach_type);
		if (err == -ESRCH)
			/* couldn't guess, but user might manually specify */
			continue;
		if (err)
			goto out;

		bpf_program__set_type(prog, prog_type);
		bpf_program__set_expected_attach_type(prog, attach_type);
3982
		if (prog_type == BPF_PROG_TYPE_TRACING) {
3983 3984 3985
			err = libbpf_find_attach_btf_id(prog->section_name,
							attach_type,
							attach_prog_fd);
3986
			if (err <= 0)
3987
				goto out;
3988
			prog->attach_btf_id = err;
3989
			prog->attach_prog_fd = attach_prog_fd;
3990
		}
3991 3992
	}

3993 3994 3995
	return obj;
out:
	bpf_object__close(obj);
3996
	return ERR_PTR(err);
3997 3998
}

3999 4000
static struct bpf_object *
__bpf_object__open_xattr(struct bpf_object_open_attr *attr, int flags)
4001
{
4002
	DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
4003 4004 4005
		.relaxed_maps = flags & MAPS_RELAX_COMPAT,
	);

4006
	/* param validation */
4007
	if (!attr->file)
4008 4009
		return NULL;

4010
	pr_debug("loading %s\n", attr->file);
4011
	return __bpf_object__open(attr->file, NULL, 0, &opts);
4012 4013 4014 4015 4016
}

struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
{
	return __bpf_object__open_xattr(attr, 0);
4017 4018 4019 4020 4021 4022 4023 4024
}

struct bpf_object *bpf_object__open(const char *path)
{
	struct bpf_object_open_attr attr = {
		.file		= path,
		.prog_type	= BPF_PROG_TYPE_UNSPEC,
	};
4025

4026
	return bpf_object__open_xattr(&attr);
4027 4028
}

4029 4030 4031 4032 4033 4034 4035 4036
struct bpf_object *
bpf_object__open_file(const char *path, struct bpf_object_open_opts *opts)
{
	if (!path)
		return ERR_PTR(-EINVAL);

	pr_debug("loading %s\n", path);

4037
	return __bpf_object__open(path, NULL, 0, opts);
4038 4039 4040 4041 4042
}

struct bpf_object *
bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
		     struct bpf_object_open_opts *opts)
4043
{
4044 4045
	if (!obj_buf || obj_buf_sz == 0)
		return ERR_PTR(-EINVAL);
4046

4047
	return __bpf_object__open(NULL, obj_buf, obj_buf_sz, opts);
4048 4049 4050 4051 4052 4053
}

struct bpf_object *
bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz,
			const char *name)
{
4054
	DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
4055 4056 4057 4058 4059 4060 4061 4062
		.object_name = name,
		/* wrong default, but backwards-compatible */
		.relaxed_maps = true,
	);

	/* returning NULL is wrong, but backwards-compatible */
	if (!obj_buf || obj_buf_sz == 0)
		return NULL;
4063

4064
	return bpf_object__open_mem(obj_buf, obj_buf_sz, &opts);
4065 4066
}

4067 4068 4069 4070 4071 4072 4073
int bpf_object__unload(struct bpf_object *obj)
{
	size_t i;

	if (!obj)
		return -EINVAL;

4074 4075
	for (i = 0; i < obj->nr_maps; i++)
		zclose(obj->maps[i].fd);
4076

4077 4078 4079
	for (i = 0; i < obj->nr_programs; i++)
		bpf_program__unload(&obj->programs[i]);

4080 4081 4082
	return 0;
}

4083
int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
4084
{
4085
	struct bpf_object *obj;
4086
	int err, i;
4087

4088 4089 4090
	if (!attr)
		return -EINVAL;
	obj = attr->obj;
4091 4092 4093 4094
	if (!obj)
		return -EINVAL;

	if (obj->loaded) {
4095
		pr_warn("object should not be loaded twice\n");
4096 4097 4098 4099
		return -EINVAL;
	}

	obj->loaded = true;
4100 4101

	CHECK_ERR(bpf_object__create_maps(obj), err, out);
4102
	CHECK_ERR(bpf_object__relocate(obj, attr->target_btf_path), err, out);
4103
	CHECK_ERR(bpf_object__load_progs(obj, attr->log_level), err, out);
4104 4105 4106

	return 0;
out:
4107 4108 4109 4110 4111
	/* unpin any maps that were auto-pinned during load */
	for (i = 0; i < obj->nr_maps; i++)
		if (obj->maps[i].pinned && !obj->maps[i].reused)
			bpf_map__unpin(&obj->maps[i], NULL);

4112
	bpf_object__unload(obj);
4113
	pr_warn("failed to load object '%s'\n", obj->path);
4114
	return err;
4115 4116
}

4117 4118 4119 4120 4121 4122 4123 4124 4125
int bpf_object__load(struct bpf_object *obj)
{
	struct bpf_object_load_attr attr = {
		.obj = obj,
	};

	return bpf_object__load_xattr(&attr);
}

4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147
static int make_parent_dir(const char *path)
{
	char *cp, errmsg[STRERR_BUFSIZE];
	char *dname, *dir;
	int err = 0;

	dname = strdup(path);
	if (dname == NULL)
		return -ENOMEM;

	dir = dirname(dname);
	if (mkdir(dir, 0700) && errno != EEXIST)
		err = -errno;

	free(dname);
	if (err) {
		cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
		pr_warn("failed to mkdir %s: %s\n", path, cp);
	}
	return err;
}

4148 4149
static int check_path(const char *path)
{
4150
	char *cp, errmsg[STRERR_BUFSIZE];
4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163
	struct statfs st_fs;
	char *dname, *dir;
	int err = 0;

	if (path == NULL)
		return -EINVAL;

	dname = strdup(path);
	if (dname == NULL)
		return -ENOMEM;

	dir = dirname(dname);
	if (statfs(dir, &st_fs)) {
4164
		cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
4165
		pr_warn("failed to statfs %s: %s\n", dir, cp);
4166 4167 4168 4169 4170
		err = -errno;
	}
	free(dname);

	if (!err && st_fs.f_type != BPF_FS_MAGIC) {
4171
		pr_warn("specified path %s is not on BPF FS\n", path);
4172 4173 4174 4175 4176 4177 4178 4179 4180
		err = -EINVAL;
	}

	return err;
}

int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
			      int instance)
{
4181
	char *cp, errmsg[STRERR_BUFSIZE];
4182 4183
	int err;

4184 4185 4186 4187
	err = make_parent_dir(path);
	if (err)
		return err;

4188 4189 4190 4191 4192
	err = check_path(path);
	if (err)
		return err;

	if (prog == NULL) {
4193
		pr_warn("invalid program pointer\n");
4194 4195 4196 4197
		return -EINVAL;
	}

	if (instance < 0 || instance >= prog->instances.nr) {
4198 4199
		pr_warn("invalid prog instance %d of prog %s (max %d)\n",
			instance, prog->section_name, prog->instances.nr);
4200 4201 4202 4203
		return -EINVAL;
	}

	if (bpf_obj_pin(prog->instances.fds[instance], path)) {
4204
		cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
4205
		pr_warn("failed to pin program: %s\n", cp);
4206 4207 4208 4209 4210 4211 4212
		return -errno;
	}
	pr_debug("pinned program '%s'\n", path);

	return 0;
}

4213 4214 4215 4216 4217 4218 4219 4220 4221 4222
int bpf_program__unpin_instance(struct bpf_program *prog, const char *path,
				int instance)
{
	int err;

	err = check_path(path);
	if (err)
		return err;

	if (prog == NULL) {
4223
		pr_warn("invalid program pointer\n");
4224 4225 4226 4227
		return -EINVAL;
	}

	if (instance < 0 || instance >= prog->instances.nr) {
4228 4229
		pr_warn("invalid prog instance %d of prog %s (max %d)\n",
			instance, prog->section_name, prog->instances.nr);
4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240
		return -EINVAL;
	}

	err = unlink(path);
	if (err != 0)
		return -errno;
	pr_debug("unpinned program '%s'\n", path);

	return 0;
}

4241 4242 4243 4244
int bpf_program__pin(struct bpf_program *prog, const char *path)
{
	int i, err;

4245 4246 4247 4248
	err = make_parent_dir(path);
	if (err)
		return err;

4249 4250 4251 4252 4253
	err = check_path(path);
	if (err)
		return err;

	if (prog == NULL) {
4254
		pr_warn("invalid program pointer\n");
4255 4256 4257 4258
		return -EINVAL;
	}

	if (prog->instances.nr <= 0) {
4259
		pr_warn("no instances of prog %s to pin\n",
4260 4261 4262 4263
			   prog->section_name);
		return -EINVAL;
	}

4264 4265 4266 4267 4268
	if (prog->instances.nr == 1) {
		/* don't create subdirs when pinning single instance */
		return bpf_program__pin_instance(prog, path, 0);
	}

4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316
	for (i = 0; i < prog->instances.nr; i++) {
		char buf[PATH_MAX];
		int len;

		len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
		if (len < 0) {
			err = -EINVAL;
			goto err_unpin;
		} else if (len >= PATH_MAX) {
			err = -ENAMETOOLONG;
			goto err_unpin;
		}

		err = bpf_program__pin_instance(prog, buf, i);
		if (err)
			goto err_unpin;
	}

	return 0;

err_unpin:
	for (i = i - 1; i >= 0; i--) {
		char buf[PATH_MAX];
		int len;

		len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
		if (len < 0)
			continue;
		else if (len >= PATH_MAX)
			continue;

		bpf_program__unpin_instance(prog, buf, i);
	}

	rmdir(path);

	return err;
}

int bpf_program__unpin(struct bpf_program *prog, const char *path)
{
	int i, err;

	err = check_path(path);
	if (err)
		return err;

	if (prog == NULL) {
4317
		pr_warn("invalid program pointer\n");
4318 4319 4320 4321
		return -EINVAL;
	}

	if (prog->instances.nr <= 0) {
4322
		pr_warn("no instances of prog %s to pin\n",
4323 4324
			   prog->section_name);
		return -EINVAL;
4325 4326 4327 4328 4329
	}

	if (prog->instances.nr == 1) {
		/* don't create subdirs when pinning single instance */
		return bpf_program__unpin_instance(prog, path, 0);
4330 4331
	}

4332 4333 4334 4335 4336 4337 4338 4339 4340 4341
	for (i = 0; i < prog->instances.nr; i++) {
		char buf[PATH_MAX];
		int len;

		len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
		if (len < 0)
			return -EINVAL;
		else if (len >= PATH_MAX)
			return -ENAMETOOLONG;

4342
		err = bpf_program__unpin_instance(prog, buf, i);
4343 4344 4345 4346
		if (err)
			return err;
	}

4347 4348 4349 4350
	err = rmdir(path);
	if (err)
		return -errno;

4351 4352 4353
	return 0;
}

J
Joe Stringer 已提交
4354 4355
int bpf_map__pin(struct bpf_map *map, const char *path)
{
4356
	char *cp, errmsg[STRERR_BUFSIZE];
J
Joe Stringer 已提交
4357 4358 4359
	int err;

	if (map == NULL) {
4360
		pr_warn("invalid map pointer\n");
J
Joe Stringer 已提交
4361 4362 4363
		return -EINVAL;
	}

4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388
	if (map->pin_path) {
		if (path && strcmp(path, map->pin_path)) {
			pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
				bpf_map__name(map), map->pin_path, path);
			return -EINVAL;
		} else if (map->pinned) {
			pr_debug("map '%s' already pinned at '%s'; not re-pinning\n",
				 bpf_map__name(map), map->pin_path);
			return 0;
		}
	} else {
		if (!path) {
			pr_warn("missing a path to pin map '%s' at\n",
				bpf_map__name(map));
			return -EINVAL;
		} else if (map->pinned) {
			pr_warn("map '%s' already pinned\n", bpf_map__name(map));
			return -EEXIST;
		}

		map->pin_path = strdup(path);
		if (!map->pin_path) {
			err = -errno;
			goto out_err;
		}
J
Joe Stringer 已提交
4389 4390
	}

4391 4392 4393 4394
	err = make_parent_dir(map->pin_path);
	if (err)
		return err;

4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405
	err = check_path(map->pin_path);
	if (err)
		return err;

	if (bpf_obj_pin(map->fd, map->pin_path)) {
		err = -errno;
		goto out_err;
	}

	map->pinned = true;
	pr_debug("pinned map '%s'\n", map->pin_path);
4406

J
Joe Stringer 已提交
4407
	return 0;
4408 4409 4410 4411 4412

out_err:
	cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
	pr_warn("failed to pin map: %s\n", cp);
	return err;
J
Joe Stringer 已提交
4413 4414
}

4415 4416 4417 4418 4419
int bpf_map__unpin(struct bpf_map *map, const char *path)
{
	int err;

	if (map == NULL) {
4420
		pr_warn("invalid map pointer\n");
4421 4422 4423
		return -EINVAL;
	}

4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440
	if (map->pin_path) {
		if (path && strcmp(path, map->pin_path)) {
			pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
				bpf_map__name(map), map->pin_path, path);
			return -EINVAL;
		}
		path = map->pin_path;
	} else if (!path) {
		pr_warn("no path to unpin map '%s' from\n",
			bpf_map__name(map));
		return -EINVAL;
	}

	err = check_path(path);
	if (err)
		return err;

4441 4442 4443
	err = unlink(path);
	if (err != 0)
		return -errno;
4444 4445 4446

	map->pinned = false;
	pr_debug("unpinned map '%s' from '%s'\n", bpf_map__name(map), path);
4447 4448 4449 4450

	return 0;
}

4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475
int bpf_map__set_pin_path(struct bpf_map *map, const char *path)
{
	char *new = NULL;

	if (path) {
		new = strdup(path);
		if (!new)
			return -errno;
	}

	free(map->pin_path);
	map->pin_path = new;
	return 0;
}

const char *bpf_map__get_pin_path(const struct bpf_map *map)
{
	return map->pin_path;
}

bool bpf_map__is_pinned(const struct bpf_map *map)
{
	return map->pinned;
}

4476
int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
4477 4478 4479 4480 4481 4482 4483 4484
{
	struct bpf_map *map;
	int err;

	if (!obj)
		return -ENOENT;

	if (!obj->loaded) {
4485
		pr_warn("object not yet loaded; load it first\n");
4486 4487 4488
		return -ENOENT;
	}

4489
	bpf_object__for_each_map(map, obj) {
4490
		char *pin_path = NULL;
4491 4492
		char buf[PATH_MAX];

4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507
		if (path) {
			int len;

			len = snprintf(buf, PATH_MAX, "%s/%s", path,
				       bpf_map__name(map));
			if (len < 0) {
				err = -EINVAL;
				goto err_unpin_maps;
			} else if (len >= PATH_MAX) {
				err = -ENAMETOOLONG;
				goto err_unpin_maps;
			}
			pin_path = buf;
		} else if (!map->pin_path) {
			continue;
4508 4509
		}

4510
		err = bpf_map__pin(map, pin_path);
4511 4512 4513 4514 4515 4516 4517 4518
		if (err)
			goto err_unpin_maps;
	}

	return 0;

err_unpin_maps:
	while ((map = bpf_map__prev(map, obj))) {
4519
		if (!map->pin_path)
4520 4521
			continue;

4522
		bpf_map__unpin(map, NULL);
4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535
	}

	return err;
}

int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
{
	struct bpf_map *map;
	int err;

	if (!obj)
		return -ENOENT;

4536
	bpf_object__for_each_map(map, obj) {
4537
		char *pin_path = NULL;
4538 4539
		char buf[PATH_MAX];

4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552
		if (path) {
			int len;

			len = snprintf(buf, PATH_MAX, "%s/%s", path,
				       bpf_map__name(map));
			if (len < 0)
				return -EINVAL;
			else if (len >= PATH_MAX)
				return -ENAMETOOLONG;
			pin_path = buf;
		} else if (!map->pin_path) {
			continue;
		}
4553

4554
		err = bpf_map__unpin(map, pin_path);
4555 4556 4557 4558
		if (err)
			return err;
	}

4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570
	return 0;
}

int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
{
	struct bpf_program *prog;
	int err;

	if (!obj)
		return -ENOENT;

	if (!obj->loaded) {
4571
		pr_warn("object not yet loaded; load it first\n");
4572 4573 4574 4575 4576 4577 4578 4579
		return -ENOENT;
	}

	bpf_object__for_each_program(prog, obj) {
		char buf[PATH_MAX];
		int len;

		len = snprintf(buf, PATH_MAX, "%s/%s", path,
S
Stanislav Fomichev 已提交
4580
			       prog->pin_name);
4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601
		if (len < 0) {
			err = -EINVAL;
			goto err_unpin_programs;
		} else if (len >= PATH_MAX) {
			err = -ENAMETOOLONG;
			goto err_unpin_programs;
		}

		err = bpf_program__pin(prog, buf);
		if (err)
			goto err_unpin_programs;
	}

	return 0;

err_unpin_programs:
	while ((prog = bpf_program__prev(prog, obj))) {
		char buf[PATH_MAX];
		int len;

		len = snprintf(buf, PATH_MAX, "%s/%s", path,
S
Stanislav Fomichev 已提交
4602
			       prog->pin_name);
4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621
		if (len < 0)
			continue;
		else if (len >= PATH_MAX)
			continue;

		bpf_program__unpin(prog, buf);
	}

	return err;
}

int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
{
	struct bpf_program *prog;
	int err;

	if (!obj)
		return -ENOENT;

4622 4623 4624 4625 4626
	bpf_object__for_each_program(prog, obj) {
		char buf[PATH_MAX];
		int len;

		len = snprintf(buf, PATH_MAX, "%s/%s", path,
S
Stanislav Fomichev 已提交
4627
			       prog->pin_name);
4628 4629 4630 4631 4632
		if (len < 0)
			return -EINVAL;
		else if (len >= PATH_MAX)
			return -ENAMETOOLONG;

4633
		err = bpf_program__unpin(prog, buf);
4634 4635 4636 4637 4638 4639 4640
		if (err)
			return err;
	}

	return 0;
}

4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657
int bpf_object__pin(struct bpf_object *obj, const char *path)
{
	int err;

	err = bpf_object__pin_maps(obj, path);
	if (err)
		return err;

	err = bpf_object__pin_programs(obj, path);
	if (err) {
		bpf_object__unpin_maps(obj, path);
		return err;
	}

	return 0;
}

4658 4659
void bpf_object__close(struct bpf_object *obj)
{
4660 4661
	size_t i;

4662 4663 4664
	if (!obj)
		return;

4665 4666 4667
	if (obj->clear_priv)
		obj->clear_priv(obj, obj->priv);

4668
	bpf_object__elf_finish(obj);
4669
	bpf_object__unload(obj);
4670
	btf__free(obj->btf);
4671
	btf_ext__free(obj->btf_ext);
4672

4673
	for (i = 0; i < obj->nr_maps; i++) {
4674
		zfree(&obj->maps[i].name);
4675
		zfree(&obj->maps[i].pin_path);
4676 4677 4678 4679 4680 4681
		if (obj->maps[i].clear_priv)
			obj->maps[i].clear_priv(&obj->maps[i],
						obj->maps[i].priv);
		obj->maps[i].priv = NULL;
		obj->maps[i].clear_priv = NULL;
	}
4682 4683 4684

	zfree(&obj->sections.rodata);
	zfree(&obj->sections.data);
4685 4686
	zfree(&obj->maps);
	obj->nr_maps = 0;
4687 4688 4689 4690 4691 4692 4693

	if (obj->programs && obj->nr_programs) {
		for (i = 0; i < obj->nr_programs; i++)
			bpf_program__exit(&obj->programs[i]);
	}
	zfree(&obj->programs);

4694
	list_del(&obj->list);
4695 4696
	free(obj);
}
4697

4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716
struct bpf_object *
bpf_object__next(struct bpf_object *prev)
{
	struct bpf_object *next;

	if (!prev)
		next = list_first_entry(&bpf_objects_list,
					struct bpf_object,
					list);
	else
		next = list_next_entry(prev, list);

	/* Empty list is noticed here so don't need checking on entry. */
	if (&next->list == &bpf_objects_list)
		return NULL;

	return next;
}

A
Andrii Nakryiko 已提交
4717
const char *bpf_object__name(const struct bpf_object *obj)
4718
{
4719
	return obj ? obj->name : ERR_PTR(-EINVAL);
4720 4721
}

A
Andrii Nakryiko 已提交
4722
unsigned int bpf_object__kversion(const struct bpf_object *obj)
4723
{
4724
	return obj ? obj->kern_version : 0;
4725 4726
}

A
Andrii Nakryiko 已提交
4727
struct btf *bpf_object__btf(const struct bpf_object *obj)
4728 4729 4730 4731
{
	return obj ? obj->btf : NULL;
}

4732 4733 4734 4735 4736
int bpf_object__btf_fd(const struct bpf_object *obj)
{
	return obj->btf ? btf__fd(obj->btf) : -1;
}

4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747
int bpf_object__set_priv(struct bpf_object *obj, void *priv,
			 bpf_object_clear_priv_t clear_priv)
{
	if (obj->priv && obj->clear_priv)
		obj->clear_priv(obj, obj->priv);

	obj->priv = priv;
	obj->clear_priv = clear_priv;
	return 0;
}

A
Andrii Nakryiko 已提交
4748
void *bpf_object__priv(const struct bpf_object *obj)
4749 4750 4751 4752
{
	return obj ? obj->priv : ERR_PTR(-EINVAL);
}

4753
static struct bpf_program *
A
Andrii Nakryiko 已提交
4754 4755
__bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj,
		    bool forward)
4756
{
4757
	size_t nr_programs = obj->nr_programs;
4758
	ssize_t idx;
4759

4760
	if (!nr_programs)
4761 4762
		return NULL;

4763 4764 4765 4766 4767
	if (!p)
		/* Iter from the beginning */
		return forward ? &obj->programs[0] :
			&obj->programs[nr_programs - 1];

4768
	if (p->obj != obj) {
4769
		pr_warn("error: program handler doesn't match object\n");
4770 4771 4772
		return NULL;
	}

4773
	idx = (p - obj->programs) + (forward ? 1 : -1);
4774
	if (idx >= obj->nr_programs || idx < 0)
4775 4776 4777 4778
		return NULL;
	return &obj->programs[idx];
}

4779
struct bpf_program *
A
Andrii Nakryiko 已提交
4780
bpf_program__next(struct bpf_program *prev, const struct bpf_object *obj)
4781 4782 4783 4784
{
	struct bpf_program *prog = prev;

	do {
4785
		prog = __bpf_program__iter(prog, obj, true);
4786 4787 4788 4789 4790 4791
	} while (prog && bpf_program__is_function_storage(prog, obj));

	return prog;
}

struct bpf_program *
A
Andrii Nakryiko 已提交
4792
bpf_program__prev(struct bpf_program *next, const struct bpf_object *obj)
4793 4794 4795 4796
{
	struct bpf_program *prog = next;

	do {
4797
		prog = __bpf_program__iter(prog, obj, false);
4798 4799 4800 4801 4802
	} while (prog && bpf_program__is_function_storage(prog, obj));

	return prog;
}

4803 4804
int bpf_program__set_priv(struct bpf_program *prog, void *priv,
			  bpf_program_clear_priv_t clear_priv)
4805 4806 4807 4808 4809 4810 4811 4812 4813
{
	if (prog->priv && prog->clear_priv)
		prog->clear_priv(prog, prog->priv);

	prog->priv = priv;
	prog->clear_priv = clear_priv;
	return 0;
}

A
Andrii Nakryiko 已提交
4814
void *bpf_program__priv(const struct bpf_program *prog)
4815
{
4816
	return prog ? prog->priv : ERR_PTR(-EINVAL);
4817 4818
}

4819 4820 4821 4822 4823
void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
{
	prog->prog_ifindex = ifindex;
}

A
Andrii Nakryiko 已提交
4824
const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy)
4825 4826 4827 4828
{
	const char *title;

	title = prog->section_name;
4829
	if (needs_copy) {
4830 4831
		title = strdup(title);
		if (!title) {
4832
			pr_warn("failed to strdup program title\n");
4833
			return ERR_PTR(-ENOMEM);
4834 4835 4836 4837 4838 4839
		}
	}

	return title;
}

A
Andrii Nakryiko 已提交
4840
int bpf_program__fd(const struct bpf_program *prog)
4841
{
4842 4843 4844
	return bpf_program__nth_fd(prog, 0);
}

4845 4846 4847 4848 4849
size_t bpf_program__size(const struct bpf_program *prog)
{
	return prog->insns_cnt * sizeof(struct bpf_insn);
}

4850 4851 4852 4853 4854 4855 4856 4857 4858
int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
			  bpf_program_prep_t prep)
{
	int *instances_fds;

	if (nr_instances <= 0 || !prep)
		return -EINVAL;

	if (prog->instances.nr > 0 || prog->instances.fds) {
4859
		pr_warn("Can't set pre-processor after loading\n");
4860 4861 4862 4863 4864
		return -EINVAL;
	}

	instances_fds = malloc(sizeof(int) * nr_instances);
	if (!instances_fds) {
4865
		pr_warn("alloc memory failed for fds\n");
4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877
		return -ENOMEM;
	}

	/* fill all fd with -1 */
	memset(instances_fds, -1, sizeof(int) * nr_instances);

	prog->instances.nr = nr_instances;
	prog->instances.fds = instances_fds;
	prog->preprocessor = prep;
	return 0;
}

A
Andrii Nakryiko 已提交
4878
int bpf_program__nth_fd(const struct bpf_program *prog, int n)
4879 4880 4881
{
	int fd;

4882 4883 4884
	if (!prog)
		return -EINVAL;

4885
	if (n >= prog->instances.nr || n < 0) {
4886 4887
		pr_warn("Can't get the %dth fd from program %s: only %d instances\n",
			n, prog->section_name, prog->instances.nr);
4888 4889 4890 4891 4892
		return -EINVAL;
	}

	fd = prog->instances.fds[n];
	if (fd < 0) {
4893 4894
		pr_warn("%dth instance of program '%s' is invalid\n",
			n, prog->section_name);
4895 4896 4897 4898
		return -ENOENT;
	}

	return fd;
4899
}
4900

4901 4902 4903 4904 4905
enum bpf_prog_type bpf_program__get_type(struct bpf_program *prog)
{
	return prog->type;
}

4906
void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
4907 4908 4909 4910
{
	prog->type = type;
}

A
Andrii Nakryiko 已提交
4911
static bool bpf_program__is_type(const struct bpf_program *prog,
4912 4913 4914 4915 4916
				 enum bpf_prog_type type)
{
	return prog ? (prog->type == type) : false;
}

A
Andrii Nakryiko 已提交
4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929
#define BPF_PROG_TYPE_FNS(NAME, TYPE)				\
int bpf_program__set_##NAME(struct bpf_program *prog)		\
{								\
	if (!prog)						\
		return -EINVAL;					\
	bpf_program__set_type(prog, TYPE);			\
	return 0;						\
}								\
								\
bool bpf_program__is_##NAME(const struct bpf_program *prog)	\
{								\
	return bpf_program__is_type(prog, TYPE);		\
}								\
4930

4931
BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
4932
BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
4933 4934
BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS);
BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT);
4935
BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
4936
BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
4937 4938
BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
4939
BPF_PROG_TYPE_FNS(tracing, BPF_PROG_TYPE_TRACING);
4940

4941 4942 4943 4944 4945 4946
enum bpf_attach_type
bpf_program__get_expected_attach_type(struct bpf_program *prog)
{
	return prog->expected_attach_type;
}

J
John Fastabend 已提交
4947 4948
void bpf_program__set_expected_attach_type(struct bpf_program *prog,
					   enum bpf_attach_type type)
4949 4950 4951 4952
{
	prog->expected_attach_type = type;
}

4953 4954
#define BPF_PROG_SEC_IMPL(string, ptype, eatype, is_attachable, btf, atype) \
	{ string, sizeof(string) - 1, ptype, eatype, is_attachable, btf, atype }
4955

4956
/* Programs that can NOT be attached. */
4957
#define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0, 0)
4958

4959 4960
/* Programs that can be attached. */
#define BPF_APROG_SEC(string, ptype, atype) \
4961
	BPF_PROG_SEC_IMPL(string, ptype, 0, 1, 0, atype)
4962

4963 4964
/* Programs that must specify expected attach type at load time. */
#define BPF_EAPROG_SEC(string, ptype, eatype) \
4965 4966 4967
	BPF_PROG_SEC_IMPL(string, ptype, eatype, 1, 0, eatype)

/* Programs that use BTF to identify attach point */
4968 4969
#define BPF_PROG_BTF(string, ptype, eatype) \
	BPF_PROG_SEC_IMPL(string, ptype, eatype, 0, 1, 0)
4970 4971 4972 4973 4974

/* Programs that can be attached but attach type can't be identified by section
 * name. Kept for backward compatibility.
 */
#define BPF_APROG_COMPAT(string, ptype) BPF_PROG_SEC(string, ptype)
4975

4976 4977 4978 4979
static const struct {
	const char *sec;
	size_t len;
	enum bpf_prog_type prog_type;
4980
	enum bpf_attach_type expected_attach_type;
4981 4982
	bool is_attachable;
	bool is_attach_btf;
4983
	enum bpf_attach_type attach_type;
4984
} section_names[] = {
4985 4986
	BPF_PROG_SEC("socket",			BPF_PROG_TYPE_SOCKET_FILTER),
	BPF_PROG_SEC("kprobe/",			BPF_PROG_TYPE_KPROBE),
4987
	BPF_PROG_SEC("uprobe/",			BPF_PROG_TYPE_KPROBE),
4988
	BPF_PROG_SEC("kretprobe/",		BPF_PROG_TYPE_KPROBE),
4989
	BPF_PROG_SEC("uretprobe/",		BPF_PROG_TYPE_KPROBE),
4990 4991 4992
	BPF_PROG_SEC("classifier",		BPF_PROG_TYPE_SCHED_CLS),
	BPF_PROG_SEC("action",			BPF_PROG_TYPE_SCHED_ACT),
	BPF_PROG_SEC("tracepoint/",		BPF_PROG_TYPE_TRACEPOINT),
4993
	BPF_PROG_SEC("tp/",			BPF_PROG_TYPE_TRACEPOINT),
4994
	BPF_PROG_SEC("raw_tracepoint/",		BPF_PROG_TYPE_RAW_TRACEPOINT),
4995
	BPF_PROG_SEC("raw_tp/",			BPF_PROG_TYPE_RAW_TRACEPOINT),
4996 4997
	BPF_PROG_BTF("tp_btf/",			BPF_PROG_TYPE_TRACING,
						BPF_TRACE_RAW_TP),
4998 4999 5000 5001
	BPF_PROG_BTF("fentry/",			BPF_PROG_TYPE_TRACING,
						BPF_TRACE_FENTRY),
	BPF_PROG_BTF("fexit/",			BPF_PROG_TYPE_TRACING,
						BPF_TRACE_FEXIT),
5002 5003 5004 5005 5006 5007
	BPF_PROG_SEC("xdp",			BPF_PROG_TYPE_XDP),
	BPF_PROG_SEC("perf_event",		BPF_PROG_TYPE_PERF_EVENT),
	BPF_PROG_SEC("lwt_in",			BPF_PROG_TYPE_LWT_IN),
	BPF_PROG_SEC("lwt_out",			BPF_PROG_TYPE_LWT_OUT),
	BPF_PROG_SEC("lwt_xmit",		BPF_PROG_TYPE_LWT_XMIT),
	BPF_PROG_SEC("lwt_seg6local",		BPF_PROG_TYPE_LWT_SEG6LOCAL),
5008 5009 5010 5011
	BPF_APROG_SEC("cgroup_skb/ingress",	BPF_PROG_TYPE_CGROUP_SKB,
						BPF_CGROUP_INET_INGRESS),
	BPF_APROG_SEC("cgroup_skb/egress",	BPF_PROG_TYPE_CGROUP_SKB,
						BPF_CGROUP_INET_EGRESS),
5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022
	BPF_APROG_COMPAT("cgroup/skb",		BPF_PROG_TYPE_CGROUP_SKB),
	BPF_APROG_SEC("cgroup/sock",		BPF_PROG_TYPE_CGROUP_SOCK,
						BPF_CGROUP_INET_SOCK_CREATE),
	BPF_EAPROG_SEC("cgroup/post_bind4",	BPF_PROG_TYPE_CGROUP_SOCK,
						BPF_CGROUP_INET4_POST_BIND),
	BPF_EAPROG_SEC("cgroup/post_bind6",	BPF_PROG_TYPE_CGROUP_SOCK,
						BPF_CGROUP_INET6_POST_BIND),
	BPF_APROG_SEC("cgroup/dev",		BPF_PROG_TYPE_CGROUP_DEVICE,
						BPF_CGROUP_DEVICE),
	BPF_APROG_SEC("sockops",		BPF_PROG_TYPE_SOCK_OPS,
						BPF_CGROUP_SOCK_OPS),
5023 5024 5025 5026
	BPF_APROG_SEC("sk_skb/stream_parser",	BPF_PROG_TYPE_SK_SKB,
						BPF_SK_SKB_STREAM_PARSER),
	BPF_APROG_SEC("sk_skb/stream_verdict",	BPF_PROG_TYPE_SK_SKB,
						BPF_SK_SKB_STREAM_VERDICT),
5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045
	BPF_APROG_COMPAT("sk_skb",		BPF_PROG_TYPE_SK_SKB),
	BPF_APROG_SEC("sk_msg",			BPF_PROG_TYPE_SK_MSG,
						BPF_SK_MSG_VERDICT),
	BPF_APROG_SEC("lirc_mode2",		BPF_PROG_TYPE_LIRC_MODE2,
						BPF_LIRC_MODE2),
	BPF_APROG_SEC("flow_dissector",		BPF_PROG_TYPE_FLOW_DISSECTOR,
						BPF_FLOW_DISSECTOR),
	BPF_EAPROG_SEC("cgroup/bind4",		BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
						BPF_CGROUP_INET4_BIND),
	BPF_EAPROG_SEC("cgroup/bind6",		BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
						BPF_CGROUP_INET6_BIND),
	BPF_EAPROG_SEC("cgroup/connect4",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
						BPF_CGROUP_INET4_CONNECT),
	BPF_EAPROG_SEC("cgroup/connect6",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
						BPF_CGROUP_INET6_CONNECT),
	BPF_EAPROG_SEC("cgroup/sendmsg4",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
						BPF_CGROUP_UDP4_SENDMSG),
	BPF_EAPROG_SEC("cgroup/sendmsg6",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
						BPF_CGROUP_UDP6_SENDMSG),
5046 5047 5048 5049
	BPF_EAPROG_SEC("cgroup/recvmsg4",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
						BPF_CGROUP_UDP4_RECVMSG),
	BPF_EAPROG_SEC("cgroup/recvmsg6",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
						BPF_CGROUP_UDP6_RECVMSG),
A
Andrey Ignatov 已提交
5050 5051
	BPF_EAPROG_SEC("cgroup/sysctl",		BPF_PROG_TYPE_CGROUP_SYSCTL,
						BPF_CGROUP_SYSCTL),
5052 5053 5054 5055
	BPF_EAPROG_SEC("cgroup/getsockopt",	BPF_PROG_TYPE_CGROUP_SOCKOPT,
						BPF_CGROUP_GETSOCKOPT),
	BPF_EAPROG_SEC("cgroup/setsockopt",	BPF_PROG_TYPE_CGROUP_SOCKOPT,
						BPF_CGROUP_SETSOCKOPT),
5056
};
5057

5058
#undef BPF_PROG_SEC_IMPL
5059
#undef BPF_PROG_SEC
5060 5061 5062
#undef BPF_APROG_SEC
#undef BPF_EAPROG_SEC
#undef BPF_APROG_COMPAT
5063

5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091
#define MAX_TYPE_NAME_SIZE 32

static char *libbpf_get_type_names(bool attach_type)
{
	int i, len = ARRAY_SIZE(section_names) * MAX_TYPE_NAME_SIZE;
	char *buf;

	buf = malloc(len);
	if (!buf)
		return NULL;

	buf[0] = '\0';
	/* Forge string buf with all available names */
	for (i = 0; i < ARRAY_SIZE(section_names); i++) {
		if (attach_type && !section_names[i].is_attachable)
			continue;

		if (strlen(buf) + strlen(section_names[i].sec) + 2 > len) {
			free(buf);
			return NULL;
		}
		strcat(buf, " ");
		strcat(buf, section_names[i].sec);
	}

	return buf;
}

5092 5093
int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
			     enum bpf_attach_type *expected_attach_type)
5094
{
5095
	char *type_names;
5096 5097
	int i;

5098 5099
	if (!name)
		return -EINVAL;
5100

5101 5102 5103 5104 5105 5106 5107
	for (i = 0; i < ARRAY_SIZE(section_names); i++) {
		if (strncmp(name, section_names[i].sec, section_names[i].len))
			continue;
		*prog_type = section_names[i].prog_type;
		*expected_attach_type = section_names[i].expected_attach_type;
		return 0;
	}
5108
	pr_warn("failed to guess program type from ELF section '%s'\n", name);
5109 5110 5111 5112 5113 5114
	type_names = libbpf_get_type_names(false);
	if (type_names != NULL) {
		pr_info("supported section(type) names are:%s\n", type_names);
		free(type_names);
	}

5115
	return -ESRCH;
5116
}
5117

5118
#define BTF_PREFIX "btf_trace_"
5119 5120
int libbpf_find_vmlinux_btf_id(const char *name,
			       enum bpf_attach_type attach_type)
5121 5122
{
	struct btf *btf = bpf_core_find_kernel_btf();
5123 5124 5125 5126
	char raw_tp_btf[128] = BTF_PREFIX;
	char *dst = raw_tp_btf + sizeof(BTF_PREFIX) - 1;
	const char *btf_name;
	int err = -EINVAL;
5127
	__u32 kind;
5128 5129 5130 5131 5132 5133

	if (IS_ERR(btf)) {
		pr_warn("vmlinux BTF is not found\n");
		return -EINVAL;
	}

5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147
	if (attach_type == BPF_TRACE_RAW_TP) {
		/* prepend "btf_trace_" prefix per kernel convention */
		strncat(dst, name, sizeof(raw_tp_btf) - sizeof(BTF_PREFIX));
		btf_name = raw_tp_btf;
		kind = BTF_KIND_TYPEDEF;
	} else {
		btf_name = name;
		kind = BTF_KIND_FUNC;
	}
	err = btf__find_by_name_kind(btf, btf_name, kind);
	btf__free(btf);
	return err;
}

5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183
static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd)
{
	struct bpf_prog_info_linear *info_linear;
	struct bpf_prog_info *info;
	struct btf *btf = NULL;
	int err = -EINVAL;

	info_linear = bpf_program__get_prog_info_linear(attach_prog_fd, 0);
	if (IS_ERR_OR_NULL(info_linear)) {
		pr_warn("failed get_prog_info_linear for FD %d\n",
			attach_prog_fd);
		return -EINVAL;
	}
	info = &info_linear->info;
	if (!info->btf_id) {
		pr_warn("The target program doesn't have BTF\n");
		goto out;
	}
	if (btf__get_from_id(info->btf_id, &btf)) {
		pr_warn("Failed to get BTF of the program\n");
		goto out;
	}
	err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
	btf__free(btf);
	if (err <= 0) {
		pr_warn("%s is not found in prog's BTF\n", name);
		goto out;
	}
out:
	free(info_linear);
	return err;
}

static int libbpf_find_attach_btf_id(const char *name,
				     enum bpf_attach_type attach_type,
				     __u32 attach_prog_fd)
5184 5185 5186
{
	int i, err;

5187
	if (!name)
5188
		return -EINVAL;
5189 5190 5191 5192 5193 5194

	for (i = 0; i < ARRAY_SIZE(section_names); i++) {
		if (!section_names[i].is_attach_btf)
			continue;
		if (strncmp(name, section_names[i].sec, section_names[i].len))
			continue;
5195 5196 5197 5198 5199 5200
		if (attach_prog_fd)
			err = libbpf_find_prog_btf_id(name + section_names[i].len,
						      attach_prog_fd);
		else
			err = libbpf_find_vmlinux_btf_id(name + section_names[i].len,
							 attach_type);
5201 5202 5203
		if (err <= 0)
			pr_warn("%s is not found in vmlinux BTF\n", name);
		return err;
5204 5205
	}
	pr_warn("failed to identify btf_id based on ELF section name '%s'\n", name);
5206
	return -ESRCH;
5207 5208
}

5209 5210 5211
int libbpf_attach_type_by_name(const char *name,
			       enum bpf_attach_type *attach_type)
{
5212
	char *type_names;
5213 5214 5215 5216 5217 5218 5219 5220
	int i;

	if (!name)
		return -EINVAL;

	for (i = 0; i < ARRAY_SIZE(section_names); i++) {
		if (strncmp(name, section_names[i].sec, section_names[i].len))
			continue;
5221
		if (!section_names[i].is_attachable)
5222 5223 5224 5225
			return -EINVAL;
		*attach_type = section_names[i].attach_type;
		return 0;
	}
5226
	pr_warn("failed to guess attach type based on ELF section name '%s'\n", name);
5227 5228 5229 5230 5231 5232
	type_names = libbpf_get_type_names(true);
	if (type_names != NULL) {
		pr_info("attachable section(type) names are:%s\n", type_names);
		free(type_names);
	}

5233 5234 5235
	return -EINVAL;
}

A
Andrii Nakryiko 已提交
5236
int bpf_map__fd(const struct bpf_map *map)
5237
{
5238
	return map ? map->fd : -EINVAL;
5239 5240
}

A
Andrii Nakryiko 已提交
5241
const struct bpf_map_def *bpf_map__def(const struct bpf_map *map)
5242
{
5243
	return map ? &map->def : ERR_PTR(-EINVAL);
5244 5245
}

A
Andrii Nakryiko 已提交
5246
const char *bpf_map__name(const struct bpf_map *map)
5247
{
5248
	return map ? map->name : NULL;
5249 5250
}

5251
__u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
5252
{
5253
	return map ? map->btf_key_type_id : 0;
5254 5255
}

5256
__u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
5257
{
5258
	return map ? map->btf_value_type_id : 0;
5259 5260
}

5261 5262
int bpf_map__set_priv(struct bpf_map *map, void *priv,
		     bpf_map_clear_priv_t clear_priv)
5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276
{
	if (!map)
		return -EINVAL;

	if (map->priv) {
		if (map->clear_priv)
			map->clear_priv(map, map->priv);
	}

	map->priv = priv;
	map->clear_priv = clear_priv;
	return 0;
}

A
Andrii Nakryiko 已提交
5277
void *bpf_map__priv(const struct bpf_map *map)
5278
{
5279
	return map ? map->priv : ERR_PTR(-EINVAL);
5280 5281
}

A
Andrii Nakryiko 已提交
5282
bool bpf_map__is_offload_neutral(const struct bpf_map *map)
5283 5284 5285 5286
{
	return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
}

A
Andrii Nakryiko 已提交
5287
bool bpf_map__is_internal(const struct bpf_map *map)
5288 5289 5290 5291
{
	return map->libbpf_type != LIBBPF_MAP_UNSPEC;
}

5292 5293 5294 5295 5296
void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
{
	map->map_ifindex = ifindex;
}

5297 5298 5299
int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
{
	if (!bpf_map_type__is_map_in_map(map->def.type)) {
5300
		pr_warn("error: unsupported map type\n");
5301 5302 5303
		return -EINVAL;
	}
	if (map->inner_map_fd != -1) {
5304
		pr_warn("error: inner_map_fd already specified\n");
5305 5306 5307 5308 5309 5310
		return -EINVAL;
	}
	map->inner_map_fd = fd;
	return 0;
}

5311
static struct bpf_map *
A
Andrii Nakryiko 已提交
5312
__bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
5313
{
5314
	ssize_t idx;
5315 5316 5317 5318 5319 5320 5321 5322
	struct bpf_map *s, *e;

	if (!obj || !obj->maps)
		return NULL;

	s = obj->maps;
	e = obj->maps + obj->nr_maps;

5323
	if ((m < s) || (m >= e)) {
5324 5325
		pr_warn("error in %s: map handler doesn't belong to object\n",
			 __func__);
5326 5327 5328
		return NULL;
	}

5329 5330
	idx = (m - obj->maps) + i;
	if (idx >= obj->nr_maps || idx < 0)
5331 5332 5333
		return NULL;
	return &obj->maps[idx];
}
5334

5335
struct bpf_map *
A
Andrii Nakryiko 已提交
5336
bpf_map__next(const struct bpf_map *prev, const struct bpf_object *obj)
5337 5338 5339 5340 5341 5342 5343 5344
{
	if (prev == NULL)
		return obj->maps;

	return __bpf_map__iter(prev, obj, 1);
}

struct bpf_map *
A
Andrii Nakryiko 已提交
5345
bpf_map__prev(const struct bpf_map *next, const struct bpf_object *obj)
5346 5347 5348 5349 5350 5351 5352 5353 5354 5355
{
	if (next == NULL) {
		if (!obj->nr_maps)
			return NULL;
		return obj->maps + obj->nr_maps - 1;
	}

	return __bpf_map__iter(next, obj, -1);
}

5356
struct bpf_map *
A
Andrii Nakryiko 已提交
5357
bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name)
5358 5359 5360
{
	struct bpf_map *pos;

5361
	bpf_object__for_each_map(pos, obj) {
5362
		if (pos->name && !strcmp(pos->name, name))
5363 5364 5365 5366
			return pos;
	}
	return NULL;
}
5367

5368
int
A
Andrii Nakryiko 已提交
5369
bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name)
5370 5371 5372 5373
{
	return bpf_map__fd(bpf_object__find_map_by_name(obj, name));
}

5374 5375 5376
struct bpf_map *
bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
{
5377
	return ERR_PTR(-ENOTSUP);
5378
}
5379 5380 5381

long libbpf_get_error(const void *ptr)
{
5382
	return PTR_ERR_OR_ZERO(ptr);
5383
}
5384 5385 5386

int bpf_prog_load(const char *file, enum bpf_prog_type type,
		  struct bpf_object **pobj, int *prog_fd)
5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399
{
	struct bpf_prog_load_attr attr;

	memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
	attr.file = file;
	attr.prog_type = type;
	attr.expected_attach_type = 0;

	return bpf_prog_load_xattr(&attr, pobj, prog_fd);
}

int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
			struct bpf_object **pobj, int *prog_fd)
5400
{
5401
	struct bpf_object_open_attr open_attr = {};
5402
	struct bpf_program *prog, *first_prog = NULL;
5403
	struct bpf_object *obj;
5404
	struct bpf_map *map;
5405 5406
	int err;

5407 5408
	if (!attr)
		return -EINVAL;
5409 5410
	if (!attr->file)
		return -EINVAL;
5411

5412 5413 5414
	open_attr.file = attr->file;
	open_attr.prog_type = attr->prog_type;

5415
	obj = bpf_object__open_xattr(&open_attr);
5416
	if (IS_ERR_OR_NULL(obj))
5417 5418
		return -ENOENT;

5419
	bpf_object__for_each_program(prog, obj) {
5420
		enum bpf_attach_type attach_type = attr->expected_attach_type;
5421
		/*
5422 5423 5424
		 * to preserve backwards compatibility, bpf_prog_load treats
		 * attr->prog_type, if specified, as an override to whatever
		 * bpf_object__open guessed
5425
		 */
5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437
		if (attr->prog_type != BPF_PROG_TYPE_UNSPEC) {
			bpf_program__set_type(prog, attr->prog_type);
			bpf_program__set_expected_attach_type(prog,
							      attach_type);
		}
		if (bpf_program__get_type(prog) == BPF_PROG_TYPE_UNSPEC) {
			/*
			 * we haven't guessed from section name and user
			 * didn't provide a fallback type, too bad...
			 */
			bpf_object__close(obj);
			return -EINVAL;
5438
		}
5439

5440
		prog->prog_ifindex = attr->ifindex;
5441
		prog->log_level = attr->log_level;
5442
		prog->prog_flags = attr->prog_flags;
5443
		if (!first_prog)
5444 5445 5446
			first_prog = prog;
	}

5447
	bpf_object__for_each_map(map, obj) {
5448 5449
		if (!bpf_map__is_offload_neutral(map))
			map->map_ifindex = attr->ifindex;
5450 5451
	}

5452
	if (!first_prog) {
5453
		pr_warn("object file doesn't contain bpf program\n");
5454 5455
		bpf_object__close(obj);
		return -ENOENT;
5456 5457
	}

5458 5459 5460 5461 5462 5463 5464
	err = bpf_object__load(obj);
	if (err) {
		bpf_object__close(obj);
		return -EINVAL;
	}

	*pobj = obj;
5465
	*prog_fd = bpf_program__fd(first_prog);
5466 5467
	return 0;
}
5468

5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485
struct bpf_link {
	int (*destroy)(struct bpf_link *link);
};

int bpf_link__destroy(struct bpf_link *link)
{
	int err;

	if (!link)
		return 0;

	err = link->destroy(link);
	free(link);

	return err;
}

5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511
struct bpf_link_fd {
	struct bpf_link link; /* has to be at the top of struct */
	int fd; /* hook FD */
};

static int bpf_link__destroy_perf_event(struct bpf_link *link)
{
	struct bpf_link_fd *l = (void *)link;
	int err;

	err = ioctl(l->fd, PERF_EVENT_IOC_DISABLE, 0);
	if (err)
		err = -errno;

	close(l->fd);
	return err;
}

struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog,
						int pfd)
{
	char errmsg[STRERR_BUFSIZE];
	struct bpf_link_fd *link;
	int prog_fd, err;

	if (pfd < 0) {
5512 5513
		pr_warn("program '%s': invalid perf event FD %d\n",
			bpf_program__title(prog, false), pfd);
5514 5515 5516 5517
		return ERR_PTR(-EINVAL);
	}
	prog_fd = bpf_program__fd(prog);
	if (prog_fd < 0) {
5518 5519
		pr_warn("program '%s': can't attach BPF program w/o FD (did you load it?)\n",
			bpf_program__title(prog, false));
5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531
		return ERR_PTR(-EINVAL);
	}

	link = malloc(sizeof(*link));
	if (!link)
		return ERR_PTR(-ENOMEM);
	link->link.destroy = &bpf_link__destroy_perf_event;
	link->fd = pfd;

	if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) {
		err = -errno;
		free(link);
5532 5533
		pr_warn("program '%s': failed to attach to pfd %d: %s\n",
			bpf_program__title(prog, false), pfd,
5534 5535 5536 5537 5538 5539
			   libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
		return ERR_PTR(err);
	}
	if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
		err = -errno;
		free(link);
5540 5541
		pr_warn("program '%s': failed to enable pfd %d: %s\n",
			bpf_program__title(prog, false), pfd,
5542 5543 5544 5545 5546 5547
			   libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
		return ERR_PTR(err);
	}
	return (struct bpf_link *)link;
}

5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615
/*
 * this function is expected to parse integer in the range of [0, 2^31-1] from
 * given file using scanf format string fmt. If actual parsed value is
 * negative, the result might be indistinguishable from error
 */
static int parse_uint_from_file(const char *file, const char *fmt)
{
	char buf[STRERR_BUFSIZE];
	int err, ret;
	FILE *f;

	f = fopen(file, "r");
	if (!f) {
		err = -errno;
		pr_debug("failed to open '%s': %s\n", file,
			 libbpf_strerror_r(err, buf, sizeof(buf)));
		return err;
	}
	err = fscanf(f, fmt, &ret);
	if (err != 1) {
		err = err == EOF ? -EIO : -errno;
		pr_debug("failed to parse '%s': %s\n", file,
			libbpf_strerror_r(err, buf, sizeof(buf)));
		fclose(f);
		return err;
	}
	fclose(f);
	return ret;
}

static int determine_kprobe_perf_type(void)
{
	const char *file = "/sys/bus/event_source/devices/kprobe/type";

	return parse_uint_from_file(file, "%d\n");
}

static int determine_uprobe_perf_type(void)
{
	const char *file = "/sys/bus/event_source/devices/uprobe/type";

	return parse_uint_from_file(file, "%d\n");
}

static int determine_kprobe_retprobe_bit(void)
{
	const char *file = "/sys/bus/event_source/devices/kprobe/format/retprobe";

	return parse_uint_from_file(file, "config:%d\n");
}

static int determine_uprobe_retprobe_bit(void)
{
	const char *file = "/sys/bus/event_source/devices/uprobe/format/retprobe";

	return parse_uint_from_file(file, "config:%d\n");
}

static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
				 uint64_t offset, int pid)
{
	struct perf_event_attr attr = {};
	char errmsg[STRERR_BUFSIZE];
	int type, pfd, err;

	type = uprobe ? determine_uprobe_perf_type()
		      : determine_kprobe_perf_type();
	if (type < 0) {
5616 5617 5618
		pr_warn("failed to determine %s perf type: %s\n",
			uprobe ? "uprobe" : "kprobe",
			libbpf_strerror_r(type, errmsg, sizeof(errmsg)));
5619 5620 5621 5622 5623 5624 5625
		return type;
	}
	if (retprobe) {
		int bit = uprobe ? determine_uprobe_retprobe_bit()
				 : determine_kprobe_retprobe_bit();

		if (bit < 0) {
5626 5627 5628
			pr_warn("failed to determine %s retprobe bit: %s\n",
				uprobe ? "uprobe" : "kprobe",
				libbpf_strerror_r(bit, errmsg, sizeof(errmsg)));
5629 5630 5631 5632 5633 5634
			return bit;
		}
		attr.config |= 1 << bit;
	}
	attr.size = sizeof(attr);
	attr.type = type;
5635 5636
	attr.config1 = ptr_to_u64(name); /* kprobe_func or uprobe_path */
	attr.config2 = offset;		 /* kprobe_addr or probe_offset */
5637 5638 5639 5640 5641 5642 5643 5644

	/* pid filter is meaningful only for uprobes */
	pfd = syscall(__NR_perf_event_open, &attr,
		      pid < 0 ? -1 : pid /* pid */,
		      pid == -1 ? 0 : -1 /* cpu */,
		      -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
	if (pfd < 0) {
		err = -errno;
5645 5646 5647
		pr_warn("%s perf_event_open() failed: %s\n",
			uprobe ? "uprobe" : "kprobe",
			libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663
		return err;
	}
	return pfd;
}

struct bpf_link *bpf_program__attach_kprobe(struct bpf_program *prog,
					    bool retprobe,
					    const char *func_name)
{
	char errmsg[STRERR_BUFSIZE];
	struct bpf_link *link;
	int pfd, err;

	pfd = perf_event_open_probe(false /* uprobe */, retprobe, func_name,
				    0 /* offset */, -1 /* pid */);
	if (pfd < 0) {
5664 5665 5666 5667
		pr_warn("program '%s': failed to create %s '%s' perf event: %s\n",
			bpf_program__title(prog, false),
			retprobe ? "kretprobe" : "kprobe", func_name,
			libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
5668 5669 5670 5671 5672 5673
		return ERR_PTR(pfd);
	}
	link = bpf_program__attach_perf_event(prog, pfd);
	if (IS_ERR(link)) {
		close(pfd);
		err = PTR_ERR(link);
5674 5675 5676 5677
		pr_warn("program '%s': failed to attach to %s '%s': %s\n",
			bpf_program__title(prog, false),
			retprobe ? "kretprobe" : "kprobe", func_name,
			libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694
		return link;
	}
	return link;
}

struct bpf_link *bpf_program__attach_uprobe(struct bpf_program *prog,
					    bool retprobe, pid_t pid,
					    const char *binary_path,
					    size_t func_offset)
{
	char errmsg[STRERR_BUFSIZE];
	struct bpf_link *link;
	int pfd, err;

	pfd = perf_event_open_probe(true /* uprobe */, retprobe,
				    binary_path, func_offset, pid);
	if (pfd < 0) {
5695 5696 5697 5698 5699
		pr_warn("program '%s': failed to create %s '%s:0x%zx' perf event: %s\n",
			bpf_program__title(prog, false),
			retprobe ? "uretprobe" : "uprobe",
			binary_path, func_offset,
			libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
5700 5701 5702 5703 5704 5705
		return ERR_PTR(pfd);
	}
	link = bpf_program__attach_perf_event(prog, pfd);
	if (IS_ERR(link)) {
		close(pfd);
		err = PTR_ERR(link);
5706 5707 5708 5709 5710
		pr_warn("program '%s': failed to attach to %s '%s:0x%zx': %s\n",
			bpf_program__title(prog, false),
			retprobe ? "uretprobe" : "uprobe",
			binary_path, func_offset,
			libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
5711 5712 5713 5714 5715
		return link;
	}
	return link;
}

5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743
static int determine_tracepoint_id(const char *tp_category,
				   const char *tp_name)
{
	char file[PATH_MAX];
	int ret;

	ret = snprintf(file, sizeof(file),
		       "/sys/kernel/debug/tracing/events/%s/%s/id",
		       tp_category, tp_name);
	if (ret < 0)
		return -errno;
	if (ret >= sizeof(file)) {
		pr_debug("tracepoint %s/%s path is too long\n",
			 tp_category, tp_name);
		return -E2BIG;
	}
	return parse_uint_from_file(file, "%d\n");
}

static int perf_event_open_tracepoint(const char *tp_category,
				      const char *tp_name)
{
	struct perf_event_attr attr = {};
	char errmsg[STRERR_BUFSIZE];
	int tp_id, pfd, err;

	tp_id = determine_tracepoint_id(tp_category, tp_name);
	if (tp_id < 0) {
5744 5745 5746
		pr_warn("failed to determine tracepoint '%s/%s' perf event ID: %s\n",
			tp_category, tp_name,
			libbpf_strerror_r(tp_id, errmsg, sizeof(errmsg)));
5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757
		return tp_id;
	}

	attr.type = PERF_TYPE_TRACEPOINT;
	attr.size = sizeof(attr);
	attr.config = tp_id;

	pfd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu */,
		      -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
	if (pfd < 0) {
		err = -errno;
5758 5759 5760
		pr_warn("tracepoint '%s/%s' perf_event_open() failed: %s\n",
			tp_category, tp_name,
			libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775
		return err;
	}
	return pfd;
}

struct bpf_link *bpf_program__attach_tracepoint(struct bpf_program *prog,
						const char *tp_category,
						const char *tp_name)
{
	char errmsg[STRERR_BUFSIZE];
	struct bpf_link *link;
	int pfd, err;

	pfd = perf_event_open_tracepoint(tp_category, tp_name);
	if (pfd < 0) {
5776 5777 5778 5779
		pr_warn("program '%s': failed to create tracepoint '%s/%s' perf event: %s\n",
			bpf_program__title(prog, false),
			tp_category, tp_name,
			libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
5780 5781 5782 5783 5784 5785
		return ERR_PTR(pfd);
	}
	link = bpf_program__attach_perf_event(prog, pfd);
	if (IS_ERR(link)) {
		close(pfd);
		err = PTR_ERR(link);
5786 5787 5788 5789
		pr_warn("program '%s': failed to attach to tracepoint '%s/%s': %s\n",
			bpf_program__title(prog, false),
			tp_category, tp_name,
			libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
5790 5791 5792 5793 5794
		return link;
	}
	return link;
}

5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810
static int bpf_link__destroy_fd(struct bpf_link *link)
{
	struct bpf_link_fd *l = (void *)link;

	return close(l->fd);
}

struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
						    const char *tp_name)
{
	char errmsg[STRERR_BUFSIZE];
	struct bpf_link_fd *link;
	int prog_fd, pfd;

	prog_fd = bpf_program__fd(prog);
	if (prog_fd < 0) {
5811 5812
		pr_warn("program '%s': can't attach before loaded\n",
			bpf_program__title(prog, false));
5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824
		return ERR_PTR(-EINVAL);
	}

	link = malloc(sizeof(*link));
	if (!link)
		return ERR_PTR(-ENOMEM);
	link->link.destroy = &bpf_link__destroy_fd;

	pfd = bpf_raw_tracepoint_open(tp_name, prog_fd);
	if (pfd < 0) {
		pfd = -errno;
		free(link);
5825 5826 5827
		pr_warn("program '%s': failed to attach to raw tracepoint '%s': %s\n",
			bpf_program__title(prog, false), tp_name,
			libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
5828 5829 5830 5831 5832 5833
		return ERR_PTR(pfd);
	}
	link->fd = pfd;
	return (struct bpf_link *)link;
}

5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864
struct bpf_link *bpf_program__attach_trace(struct bpf_program *prog)
{
	char errmsg[STRERR_BUFSIZE];
	struct bpf_link_fd *link;
	int prog_fd, pfd;

	prog_fd = bpf_program__fd(prog);
	if (prog_fd < 0) {
		pr_warn("program '%s': can't attach before loaded\n",
			bpf_program__title(prog, false));
		return ERR_PTR(-EINVAL);
	}

	link = malloc(sizeof(*link));
	if (!link)
		return ERR_PTR(-ENOMEM);
	link->link.destroy = &bpf_link__destroy_fd;

	pfd = bpf_raw_tracepoint_open(NULL, prog_fd);
	if (pfd < 0) {
		pfd = -errno;
		free(link);
		pr_warn("program '%s': failed to attach to trace: %s\n",
			bpf_program__title(prog, false),
			libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
		return ERR_PTR(pfd);
	}
	link->fd = pfd;
	return (struct bpf_link *)link;
}

5865
enum bpf_perf_event_ret
5866 5867 5868
bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
			   void **copy_mem, size_t *copy_size,
			   bpf_perf_event_print_t fn, void *private_data)
5869
{
5870
	struct perf_event_mmap_page *header = mmap_mem;
5871
	__u64 data_head = ring_buffer_read_head(header);
5872
	__u64 data_tail = header->data_tail;
5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891
	void *base = ((__u8 *)header) + page_size;
	int ret = LIBBPF_PERF_EVENT_CONT;
	struct perf_event_header *ehdr;
	size_t ehdr_size;

	while (data_head != data_tail) {
		ehdr = base + (data_tail & (mmap_size - 1));
		ehdr_size = ehdr->size;

		if (((void *)ehdr) + ehdr_size > base + mmap_size) {
			void *copy_start = ehdr;
			size_t len_first = base + mmap_size - copy_start;
			size_t len_secnd = ehdr_size - len_first;

			if (*copy_size < ehdr_size) {
				free(*copy_mem);
				*copy_mem = malloc(ehdr_size);
				if (!*copy_mem) {
					*copy_size = 0;
5892 5893 5894
					ret = LIBBPF_PERF_EVENT_ERROR;
					break;
				}
5895
				*copy_size = ehdr_size;
5896 5897
			}

5898 5899 5900
			memcpy(*copy_mem, copy_start, len_first);
			memcpy(*copy_mem + len_first, base, len_secnd);
			ehdr = *copy_mem;
5901 5902
		}

5903 5904
		ret = fn(ehdr, private_data);
		data_tail += ehdr_size;
5905 5906 5907 5908
		if (ret != LIBBPF_PERF_EVENT_CONT)
			break;
	}

5909
	ring_buffer_write_tail(header, data_tail);
5910 5911
	return ret;
}
5912

A
Andrii Nakryiko 已提交
5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959
struct perf_buffer;

struct perf_buffer_params {
	struct perf_event_attr *attr;
	/* if event_cb is specified, it takes precendence */
	perf_buffer_event_fn event_cb;
	/* sample_cb and lost_cb are higher-level common-case callbacks */
	perf_buffer_sample_fn sample_cb;
	perf_buffer_lost_fn lost_cb;
	void *ctx;
	int cpu_cnt;
	int *cpus;
	int *map_keys;
};

struct perf_cpu_buf {
	struct perf_buffer *pb;
	void *base; /* mmap()'ed memory */
	void *buf; /* for reconstructing segmented data */
	size_t buf_size;
	int fd;
	int cpu;
	int map_key;
};

struct perf_buffer {
	perf_buffer_event_fn event_cb;
	perf_buffer_sample_fn sample_cb;
	perf_buffer_lost_fn lost_cb;
	void *ctx; /* passed into callbacks */

	size_t page_size;
	size_t mmap_size;
	struct perf_cpu_buf **cpu_bufs;
	struct epoll_event *events;
	int cpu_cnt;
	int epoll_fd; /* perf event FD */
	int map_fd; /* BPF_MAP_TYPE_PERF_EVENT_ARRAY BPF map FD */
};

static void perf_buffer__free_cpu_buf(struct perf_buffer *pb,
				      struct perf_cpu_buf *cpu_buf)
{
	if (!cpu_buf)
		return;
	if (cpu_buf->base &&
	    munmap(cpu_buf->base, pb->mmap_size + pb->page_size))
5960
		pr_warn("failed to munmap cpu_buf #%d\n", cpu_buf->cpu);
A
Andrii Nakryiko 已提交
5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995 5996 5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007 6008 6009
	if (cpu_buf->fd >= 0) {
		ioctl(cpu_buf->fd, PERF_EVENT_IOC_DISABLE, 0);
		close(cpu_buf->fd);
	}
	free(cpu_buf->buf);
	free(cpu_buf);
}

void perf_buffer__free(struct perf_buffer *pb)
{
	int i;

	if (!pb)
		return;
	if (pb->cpu_bufs) {
		for (i = 0; i < pb->cpu_cnt && pb->cpu_bufs[i]; i++) {
			struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];

			bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key);
			perf_buffer__free_cpu_buf(pb, cpu_buf);
		}
		free(pb->cpu_bufs);
	}
	if (pb->epoll_fd >= 0)
		close(pb->epoll_fd);
	free(pb->events);
	free(pb);
}

static struct perf_cpu_buf *
perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr,
			  int cpu, int map_key)
{
	struct perf_cpu_buf *cpu_buf;
	char msg[STRERR_BUFSIZE];
	int err;

	cpu_buf = calloc(1, sizeof(*cpu_buf));
	if (!cpu_buf)
		return ERR_PTR(-ENOMEM);

	cpu_buf->pb = pb;
	cpu_buf->cpu = cpu;
	cpu_buf->map_key = map_key;

	cpu_buf->fd = syscall(__NR_perf_event_open, attr, -1 /* pid */, cpu,
			      -1, PERF_FLAG_FD_CLOEXEC);
	if (cpu_buf->fd < 0) {
		err = -errno;
6010 6011
		pr_warn("failed to open perf buffer event on cpu #%d: %s\n",
			cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
A
Andrii Nakryiko 已提交
6012 6013 6014 6015 6016 6017 6018 6019 6020
		goto error;
	}

	cpu_buf->base = mmap(NULL, pb->mmap_size + pb->page_size,
			     PROT_READ | PROT_WRITE, MAP_SHARED,
			     cpu_buf->fd, 0);
	if (cpu_buf->base == MAP_FAILED) {
		cpu_buf->base = NULL;
		err = -errno;
6021 6022
		pr_warn("failed to mmap perf buffer on cpu #%d: %s\n",
			cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
A
Andrii Nakryiko 已提交
6023 6024 6025 6026 6027
		goto error;
	}

	if (ioctl(cpu_buf->fd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
		err = -errno;
6028 6029
		pr_warn("failed to enable perf buffer event on cpu #%d: %s\n",
			cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
A
Andrii Nakryiko 已提交
6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046
		goto error;
	}

	return cpu_buf;

error:
	perf_buffer__free_cpu_buf(pb, cpu_buf);
	return (struct perf_cpu_buf *)ERR_PTR(err);
}

static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
					      struct perf_buffer_params *p);

struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt,
				     const struct perf_buffer_opts *opts)
{
	struct perf_buffer_params p = {};
6047 6048 6049 6050 6051 6052 6053
	struct perf_event_attr attr = { 0, };

	attr.config = PERF_COUNT_SW_BPF_OUTPUT,
	attr.type = PERF_TYPE_SOFTWARE;
	attr.sample_type = PERF_SAMPLE_RAW;
	attr.sample_period = 1;
	attr.wakeup_events = 1;
A
Andrii Nakryiko 已提交
6054 6055 6056 6057 6058 6059 6060 6061 6062 6063 6064 6065 6066 6067 6068 6069 6070 6071 6072 6073 6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088

	p.attr = &attr;
	p.sample_cb = opts ? opts->sample_cb : NULL;
	p.lost_cb = opts ? opts->lost_cb : NULL;
	p.ctx = opts ? opts->ctx : NULL;

	return __perf_buffer__new(map_fd, page_cnt, &p);
}

struct perf_buffer *
perf_buffer__new_raw(int map_fd, size_t page_cnt,
		     const struct perf_buffer_raw_opts *opts)
{
	struct perf_buffer_params p = {};

	p.attr = opts->attr;
	p.event_cb = opts->event_cb;
	p.ctx = opts->ctx;
	p.cpu_cnt = opts->cpu_cnt;
	p.cpus = opts->cpus;
	p.map_keys = opts->map_keys;

	return __perf_buffer__new(map_fd, page_cnt, &p);
}

static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
					      struct perf_buffer_params *p)
{
	struct bpf_map_info map = {};
	char msg[STRERR_BUFSIZE];
	struct perf_buffer *pb;
	__u32 map_info_len;
	int err, i;

	if (page_cnt & (page_cnt - 1)) {
6089 6090
		pr_warn("page count should be power of two, but is %zu\n",
			page_cnt);
A
Andrii Nakryiko 已提交
6091 6092 6093 6094 6095 6096 6097
		return ERR_PTR(-EINVAL);
	}

	map_info_len = sizeof(map);
	err = bpf_obj_get_info_by_fd(map_fd, &map, &map_info_len);
	if (err) {
		err = -errno;
6098 6099
		pr_warn("failed to get map info for map FD %d: %s\n",
			map_fd, libbpf_strerror_r(err, msg, sizeof(msg)));
A
Andrii Nakryiko 已提交
6100 6101 6102 6103
		return ERR_PTR(err);
	}

	if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
6104 6105
		pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
			map.name);
A
Andrii Nakryiko 已提交
6106 6107 6108 6109 6110 6111 6112 6113 6114 6115 6116 6117 6118 6119 6120 6121 6122 6123 6124
		return ERR_PTR(-EINVAL);
	}

	pb = calloc(1, sizeof(*pb));
	if (!pb)
		return ERR_PTR(-ENOMEM);

	pb->event_cb = p->event_cb;
	pb->sample_cb = p->sample_cb;
	pb->lost_cb = p->lost_cb;
	pb->ctx = p->ctx;

	pb->page_size = getpagesize();
	pb->mmap_size = pb->page_size * page_cnt;
	pb->map_fd = map_fd;

	pb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
	if (pb->epoll_fd < 0) {
		err = -errno;
6125 6126
		pr_warn("failed to create epoll instance: %s\n",
			libbpf_strerror_r(err, msg, sizeof(msg)));
A
Andrii Nakryiko 已提交
6127 6128 6129 6130 6131 6132 6133 6134 6135 6136 6137 6138 6139 6140 6141 6142 6143 6144
		goto error;
	}

	if (p->cpu_cnt > 0) {
		pb->cpu_cnt = p->cpu_cnt;
	} else {
		pb->cpu_cnt = libbpf_num_possible_cpus();
		if (pb->cpu_cnt < 0) {
			err = pb->cpu_cnt;
			goto error;
		}
		if (map.max_entries < pb->cpu_cnt)
			pb->cpu_cnt = map.max_entries;
	}

	pb->events = calloc(pb->cpu_cnt, sizeof(*pb->events));
	if (!pb->events) {
		err = -ENOMEM;
6145
		pr_warn("failed to allocate events: out of memory\n");
A
Andrii Nakryiko 已提交
6146 6147 6148 6149 6150
		goto error;
	}
	pb->cpu_bufs = calloc(pb->cpu_cnt, sizeof(*pb->cpu_bufs));
	if (!pb->cpu_bufs) {
		err = -ENOMEM;
6151
		pr_warn("failed to allocate buffers: out of memory\n");
A
Andrii Nakryiko 已提交
6152 6153 6154 6155 6156 6157 6158 6159 6160 6161 6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173
		goto error;
	}

	for (i = 0; i < pb->cpu_cnt; i++) {
		struct perf_cpu_buf *cpu_buf;
		int cpu, map_key;

		cpu = p->cpu_cnt > 0 ? p->cpus[i] : i;
		map_key = p->cpu_cnt > 0 ? p->map_keys[i] : i;

		cpu_buf = perf_buffer__open_cpu_buf(pb, p->attr, cpu, map_key);
		if (IS_ERR(cpu_buf)) {
			err = PTR_ERR(cpu_buf);
			goto error;
		}

		pb->cpu_bufs[i] = cpu_buf;

		err = bpf_map_update_elem(pb->map_fd, &map_key,
					  &cpu_buf->fd, 0);
		if (err) {
			err = -errno;
6174 6175 6176
			pr_warn("failed to set cpu #%d, key %d -> perf FD %d: %s\n",
				cpu, map_key, cpu_buf->fd,
				libbpf_strerror_r(err, msg, sizeof(msg)));
A
Andrii Nakryiko 已提交
6177 6178 6179 6180 6181 6182 6183 6184
			goto error;
		}

		pb->events[i].events = EPOLLIN;
		pb->events[i].data.ptr = cpu_buf;
		if (epoll_ctl(pb->epoll_fd, EPOLL_CTL_ADD, cpu_buf->fd,
			      &pb->events[i]) < 0) {
			err = -errno;
6185 6186 6187
			pr_warn("failed to epoll_ctl cpu #%d perf FD %d: %s\n",
				cpu, cpu_buf->fd,
				libbpf_strerror_r(err, msg, sizeof(msg)));
A
Andrii Nakryiko 已提交
6188 6189 6190 6191 6192 6193 6194 6195 6196 6197 6198 6199 6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227 6228 6229 6230 6231 6232 6233 6234 6235 6236 6237 6238 6239
			goto error;
		}
	}

	return pb;

error:
	if (pb)
		perf_buffer__free(pb);
	return ERR_PTR(err);
}

struct perf_sample_raw {
	struct perf_event_header header;
	uint32_t size;
	char data[0];
};

struct perf_sample_lost {
	struct perf_event_header header;
	uint64_t id;
	uint64_t lost;
	uint64_t sample_id;
};

static enum bpf_perf_event_ret
perf_buffer__process_record(struct perf_event_header *e, void *ctx)
{
	struct perf_cpu_buf *cpu_buf = ctx;
	struct perf_buffer *pb = cpu_buf->pb;
	void *data = e;

	/* user wants full control over parsing perf event */
	if (pb->event_cb)
		return pb->event_cb(pb->ctx, cpu_buf->cpu, e);

	switch (e->type) {
	case PERF_RECORD_SAMPLE: {
		struct perf_sample_raw *s = data;

		if (pb->sample_cb)
			pb->sample_cb(pb->ctx, cpu_buf->cpu, s->data, s->size);
		break;
	}
	case PERF_RECORD_LOST: {
		struct perf_sample_lost *s = data;

		if (pb->lost_cb)
			pb->lost_cb(pb->ctx, cpu_buf->cpu, s->lost);
		break;
	}
	default:
6240
		pr_warn("unknown perf sample type %d\n", e->type);
A
Andrii Nakryiko 已提交
6241 6242 6243 6244 6245 6246 6247 6248 6249 6250 6251 6252 6253 6254 6255 6256 6257 6258 6259 6260 6261 6262 6263 6264 6265 6266 6267 6268 6269
		return LIBBPF_PERF_EVENT_ERROR;
	}
	return LIBBPF_PERF_EVENT_CONT;
}

static int perf_buffer__process_records(struct perf_buffer *pb,
					struct perf_cpu_buf *cpu_buf)
{
	enum bpf_perf_event_ret ret;

	ret = bpf_perf_event_read_simple(cpu_buf->base, pb->mmap_size,
					 pb->page_size, &cpu_buf->buf,
					 &cpu_buf->buf_size,
					 perf_buffer__process_record, cpu_buf);
	if (ret != LIBBPF_PERF_EVENT_CONT)
		return ret;
	return 0;
}

int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms)
{
	int i, cnt, err;

	cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms);
	for (i = 0; i < cnt; i++) {
		struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr;

		err = perf_buffer__process_records(pb, cpu_buf);
		if (err) {
6270
			pr_warn("error while processing records: %d\n", err);
A
Andrii Nakryiko 已提交
6271 6272 6273 6274 6275 6276
			return err;
		}
	}
	return cnt < 0 ? -errno : cnt;
}

6277 6278 6279 6280 6281 6282 6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317 6318 6319 6320 6321 6322 6323 6324 6325 6326 6327 6328 6329 6330 6331 6332 6333
struct bpf_prog_info_array_desc {
	int	array_offset;	/* e.g. offset of jited_prog_insns */
	int	count_offset;	/* e.g. offset of jited_prog_len */
	int	size_offset;	/* > 0: offset of rec size,
				 * < 0: fix size of -size_offset
				 */
};

static struct bpf_prog_info_array_desc bpf_prog_info_array_desc[] = {
	[BPF_PROG_INFO_JITED_INSNS] = {
		offsetof(struct bpf_prog_info, jited_prog_insns),
		offsetof(struct bpf_prog_info, jited_prog_len),
		-1,
	},
	[BPF_PROG_INFO_XLATED_INSNS] = {
		offsetof(struct bpf_prog_info, xlated_prog_insns),
		offsetof(struct bpf_prog_info, xlated_prog_len),
		-1,
	},
	[BPF_PROG_INFO_MAP_IDS] = {
		offsetof(struct bpf_prog_info, map_ids),
		offsetof(struct bpf_prog_info, nr_map_ids),
		-(int)sizeof(__u32),
	},
	[BPF_PROG_INFO_JITED_KSYMS] = {
		offsetof(struct bpf_prog_info, jited_ksyms),
		offsetof(struct bpf_prog_info, nr_jited_ksyms),
		-(int)sizeof(__u64),
	},
	[BPF_PROG_INFO_JITED_FUNC_LENS] = {
		offsetof(struct bpf_prog_info, jited_func_lens),
		offsetof(struct bpf_prog_info, nr_jited_func_lens),
		-(int)sizeof(__u32),
	},
	[BPF_PROG_INFO_FUNC_INFO] = {
		offsetof(struct bpf_prog_info, func_info),
		offsetof(struct bpf_prog_info, nr_func_info),
		offsetof(struct bpf_prog_info, func_info_rec_size),
	},
	[BPF_PROG_INFO_LINE_INFO] = {
		offsetof(struct bpf_prog_info, line_info),
		offsetof(struct bpf_prog_info, nr_line_info),
		offsetof(struct bpf_prog_info, line_info_rec_size),
	},
	[BPF_PROG_INFO_JITED_LINE_INFO] = {
		offsetof(struct bpf_prog_info, jited_line_info),
		offsetof(struct bpf_prog_info, nr_jited_line_info),
		offsetof(struct bpf_prog_info, jited_line_info_rec_size),
	},
	[BPF_PROG_INFO_PROG_TAGS] = {
		offsetof(struct bpf_prog_info, prog_tags),
		offsetof(struct bpf_prog_info, nr_prog_tags),
		-(int)sizeof(__u8) * BPF_TAG_SIZE,
	},

};

6334 6335
static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info,
					   int offset)
6336 6337 6338 6339 6340 6341 6342 6343
{
	__u32 *array = (__u32 *)info;

	if (offset >= 0)
		return array[offset / sizeof(__u32)];
	return -(int)offset;
}

6344 6345
static __u64 bpf_prog_info_read_offset_u64(struct bpf_prog_info *info,
					   int offset)
6346 6347 6348 6349 6350 6351 6352 6353 6354 6355 6356 6357 6358 6359 6360 6361 6362 6363 6364 6365 6366 6367 6368 6369 6370 6371 6372 6373 6374 6375 6376 6377 6378 6379 6380 6381 6382 6383 6384 6385 6386 6387 6388 6389 6390 6391 6392 6393 6394 6395 6396 6397 6398 6399 6400 6401 6402 6403 6404 6405 6406 6407 6408 6409 6410 6411 6412 6413 6414 6415 6416 6417 6418 6419 6420 6421 6422 6423 6424 6425 6426 6427 6428 6429 6430 6431 6432 6433 6434 6435 6436 6437 6438 6439 6440 6441 6442 6443 6444 6445 6446 6447 6448 6449 6450 6451 6452 6453 6454 6455 6456 6457 6458 6459 6460 6461 6462 6463 6464 6465 6466 6467 6468
{
	__u64 *array = (__u64 *)info;

	if (offset >= 0)
		return array[offset / sizeof(__u64)];
	return -(int)offset;
}

static void bpf_prog_info_set_offset_u32(struct bpf_prog_info *info, int offset,
					 __u32 val)
{
	__u32 *array = (__u32 *)info;

	if (offset >= 0)
		array[offset / sizeof(__u32)] = val;
}

static void bpf_prog_info_set_offset_u64(struct bpf_prog_info *info, int offset,
					 __u64 val)
{
	__u64 *array = (__u64 *)info;

	if (offset >= 0)
		array[offset / sizeof(__u64)] = val;
}

struct bpf_prog_info_linear *
bpf_program__get_prog_info_linear(int fd, __u64 arrays)
{
	struct bpf_prog_info_linear *info_linear;
	struct bpf_prog_info info = {};
	__u32 info_len = sizeof(info);
	__u32 data_len = 0;
	int i, err;
	void *ptr;

	if (arrays >> BPF_PROG_INFO_LAST_ARRAY)
		return ERR_PTR(-EINVAL);

	/* step 1: get array dimensions */
	err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
	if (err) {
		pr_debug("can't get prog info: %s", strerror(errno));
		return ERR_PTR(-EFAULT);
	}

	/* step 2: calculate total size of all arrays */
	for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
		bool include_array = (arrays & (1UL << i)) > 0;
		struct bpf_prog_info_array_desc *desc;
		__u32 count, size;

		desc = bpf_prog_info_array_desc + i;

		/* kernel is too old to support this field */
		if (info_len < desc->array_offset + sizeof(__u32) ||
		    info_len < desc->count_offset + sizeof(__u32) ||
		    (desc->size_offset > 0 && info_len < desc->size_offset))
			include_array = false;

		if (!include_array) {
			arrays &= ~(1UL << i);	/* clear the bit */
			continue;
		}

		count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
		size  = bpf_prog_info_read_offset_u32(&info, desc->size_offset);

		data_len += count * size;
	}

	/* step 3: allocate continuous memory */
	data_len = roundup(data_len, sizeof(__u64));
	info_linear = malloc(sizeof(struct bpf_prog_info_linear) + data_len);
	if (!info_linear)
		return ERR_PTR(-ENOMEM);

	/* step 4: fill data to info_linear->info */
	info_linear->arrays = arrays;
	memset(&info_linear->info, 0, sizeof(info));
	ptr = info_linear->data;

	for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
		struct bpf_prog_info_array_desc *desc;
		__u32 count, size;

		if ((arrays & (1UL << i)) == 0)
			continue;

		desc  = bpf_prog_info_array_desc + i;
		count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
		size  = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
		bpf_prog_info_set_offset_u32(&info_linear->info,
					     desc->count_offset, count);
		bpf_prog_info_set_offset_u32(&info_linear->info,
					     desc->size_offset, size);
		bpf_prog_info_set_offset_u64(&info_linear->info,
					     desc->array_offset,
					     ptr_to_u64(ptr));
		ptr += count * size;
	}

	/* step 5: call syscall again to get required arrays */
	err = bpf_obj_get_info_by_fd(fd, &info_linear->info, &info_len);
	if (err) {
		pr_debug("can't get prog info: %s", strerror(errno));
		free(info_linear);
		return ERR_PTR(-EFAULT);
	}

	/* step 6: verify the data */
	for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
		struct bpf_prog_info_array_desc *desc;
		__u32 v1, v2;

		if ((arrays & (1UL << i)) == 0)
			continue;

		desc = bpf_prog_info_array_desc + i;
		v1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
		v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
						   desc->count_offset);
		if (v1 != v2)
6469
			pr_warn("%s: mismatch in element count\n", __func__);
6470 6471 6472 6473 6474

		v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
		v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
						   desc->size_offset);
		if (v1 != v2)
6475
			pr_warn("%s: mismatch in rec size\n", __func__);
6476 6477 6478 6479 6480 6481 6482 6483 6484 6485 6486 6487 6488 6489 6490 6491 6492 6493 6494 6495 6496 6497 6498 6499 6500 6501 6502 6503 6504 6505 6506 6507 6508 6509 6510 6511 6512 6513 6514 6515 6516 6517 6518 6519 6520 6521 6522 6523
	}

	/* step 7: update info_len and data_len */
	info_linear->info_len = sizeof(struct bpf_prog_info);
	info_linear->data_len = data_len;

	return info_linear;
}

void bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear)
{
	int i;

	for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
		struct bpf_prog_info_array_desc *desc;
		__u64 addr, offs;

		if ((info_linear->arrays & (1UL << i)) == 0)
			continue;

		desc = bpf_prog_info_array_desc + i;
		addr = bpf_prog_info_read_offset_u64(&info_linear->info,
						     desc->array_offset);
		offs = addr - ptr_to_u64(info_linear->data);
		bpf_prog_info_set_offset_u64(&info_linear->info,
					     desc->array_offset, offs);
	}
}

void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear)
{
	int i;

	for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
		struct bpf_prog_info_array_desc *desc;
		__u64 addr, offs;

		if ((info_linear->arrays & (1UL << i)) == 0)
			continue;

		desc = bpf_prog_info_array_desc + i;
		offs = bpf_prog_info_read_offset_u64(&info_linear->info,
						     desc->array_offset);
		addr = offs + ptr_to_u64(info_linear->data);
		bpf_prog_info_set_offset_u64(&info_linear->info,
					     desc->array_offset, addr);
	}
}
6524 6525 6526 6527 6528 6529

int libbpf_num_possible_cpus(void)
{
	static const char *fcpu = "/sys/devices/system/cpu/possible";
	int len = 0, n = 0, il = 0, ir = 0;
	unsigned int start = 0, end = 0;
6530
	int tmp_cpus = 0;
6531 6532 6533 6534 6535
	static int cpus;
	char buf[128];
	int error = 0;
	int fd = -1;

6536 6537 6538
	tmp_cpus = READ_ONCE(cpus);
	if (tmp_cpus > 0)
		return tmp_cpus;
6539 6540 6541 6542

	fd = open(fcpu, O_RDONLY);
	if (fd < 0) {
		error = errno;
6543
		pr_warn("Failed to open file %s: %s\n", fcpu, strerror(error));
6544 6545 6546 6547 6548 6549
		return -error;
	}
	len = read(fd, buf, sizeof(buf));
	close(fd);
	if (len <= 0) {
		error = len ? errno : EINVAL;
6550 6551
		pr_warn("Failed to read # of possible cpus from %s: %s\n",
			fcpu, strerror(error));
6552 6553 6554
		return -error;
	}
	if (len == sizeof(buf)) {
6555
		pr_warn("File %s size overflow\n", fcpu);
6556 6557 6558 6559
		return -EOVERFLOW;
	}
	buf[len] = '\0';

6560
	for (ir = 0, tmp_cpus = 0; ir <= len; ir++) {
6561 6562 6563 6564 6565
		/* Each sub string separated by ',' has format \d+-\d+ or \d+ */
		if (buf[ir] == ',' || buf[ir] == '\0') {
			buf[ir] = '\0';
			n = sscanf(&buf[il], "%u-%u", &start, &end);
			if (n <= 0) {
6566 6567
				pr_warn("Failed to get # CPUs from %s\n",
					&buf[il]);
6568 6569 6570 6571
				return -EINVAL;
			} else if (n == 1) {
				end = start;
			}
6572
			tmp_cpus += end - start + 1;
6573 6574 6575
			il = ir + 1;
		}
	}
6576
	if (tmp_cpus <= 0) {
6577
		pr_warn("Invalid #CPUs %d from %s\n", tmp_cpus, fcpu);
6578 6579
		return -EINVAL;
	}
6580 6581 6582

	WRITE_ONCE(cpus, tmp_cpus);
	return tmp_cpus;
6583
}