libbpf.c 160.7 KB
Newer Older
1
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2

3 4 5 6 7 8
/*
 * Common eBPF ELF object loading operations.
 *
 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
 * Copyright (C) 2015 Huawei Inc.
9
 * Copyright (C) 2017 Nicira, Inc.
10
 * Copyright (C) 2019 Isovalent, Inc.
11 12
 */

13
#ifndef _GNU_SOURCE
14
#define _GNU_SOURCE
15
#endif
16
#include <stdlib.h>
17 18
#include <stdio.h>
#include <stdarg.h>
19
#include <libgen.h>
20
#include <inttypes.h>
21
#include <string.h>
22
#include <unistd.h>
23
#include <endian.h>
24 25
#include <fcntl.h>
#include <errno.h>
26
#include <asm/unistd.h>
27
#include <linux/err.h>
28
#include <linux/kernel.h>
29
#include <linux/bpf.h>
30
#include <linux/btf.h>
31
#include <linux/filter.h>
32
#include <linux/list.h>
33
#include <linux/limits.h>
34
#include <linux/perf_event.h>
35
#include <linux/ring_buffer.h>
36
#include <linux/version.h>
A
Andrii Nakryiko 已提交
37
#include <sys/epoll.h>
38
#include <sys/ioctl.h>
A
Andrii Nakryiko 已提交
39
#include <sys/mman.h>
40 41 42
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/vfs.h>
43
#include <sys/utsname.h>
44
#include <tools/libc_compat.h>
45 46
#include <libelf.h>
#include <gelf.h>
47 48

#include "libbpf.h"
49
#include "bpf.h"
50
#include "btf.h"
51
#include "str_error.h"
52
#include "libbpf_internal.h"
53
#include "hashmap.h"
54

55 56 57 58
#ifndef EM_BPF
#define EM_BPF 247
#endif

59 60 61 62
#ifndef BPF_FS_MAGIC
#define BPF_FS_MAGIC		0xcafe4a11
#endif

63 64 65 66 67
/* vsprintf() in __base_pr() uses nonliteral format string. It may break
 * compilation if user enables corresponding warning. Disable it explicitly.
 */
#pragma GCC diagnostic ignored "-Wformat-nonliteral"

68 69
#define __printf(a, b)	__attribute__((format(printf, a, b)))

S
Stanislav Fomichev 已提交
70 71
static int __base_pr(enum libbpf_print_level level, const char *format,
		     va_list args)
72
{
73 74 75
	if (level == LIBBPF_DEBUG)
		return 0;

S
Stanislav Fomichev 已提交
76
	return vfprintf(stderr, format, args);
77 78
}

S
Stanislav Fomichev 已提交
79
static libbpf_print_fn_t __libbpf_pr = __base_pr;
80

81
libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn)
82
{
83 84
	libbpf_print_fn_t old_print_fn = __libbpf_pr;

85
	__libbpf_pr = fn;
86
	return old_print_fn;
87
}
88

89 90 91 92 93
__printf(2, 3)
void libbpf_print(enum libbpf_print_level level, const char *format, ...)
{
	va_list args;

94 95 96
	if (!__libbpf_pr)
		return;

97
	va_start(args, format);
98
	__libbpf_pr(level, format, args);
99 100 101
	va_end(args);
}

102 103 104 105 106 107 108 109 110
#define STRERR_BUFSIZE  128

#define CHECK_ERR(action, err, out) do {	\
	err = action;			\
	if (err)			\
		goto out;		\
} while(0)


111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130
/* Copied from tools/perf/util/util.h */
#ifndef zfree
# define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
#endif

#ifndef zclose
# define zclose(fd) ({			\
	int ___err = 0;			\
	if ((fd) >= 0)			\
		___err = close((fd));	\
	fd = -1;			\
	___err; })
#endif

#ifdef HAVE_LIBELF_MMAP_SUPPORT
# define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP
#else
# define LIBBPF_ELF_C_READ_MMAP ELF_C_READ
#endif

131 132 133 134 135
static inline __u64 ptr_to_u64(const void *ptr)
{
	return (__u64) (unsigned long) ptr;
}

136 137 138
struct bpf_capabilities {
	/* v4.14: kernel support for program & map names. */
	__u32 name:1;
139 140
	/* v5.2: kernel support for global data sections. */
	__u32 global_data:1;
141 142 143 144
	/* BTF_KIND_FUNC and BTF_KIND_FUNC_PROTO support */
	__u32 btf_func:1;
	/* BTF_KIND_VAR and BTF_KIND_DATASEC support */
	__u32 btf_datasec:1;
145 146
};

147 148 149 150 151 152 153
/*
 * bpf_prog should be a better name but it has been used in
 * linux/filter.h.
 */
struct bpf_program {
	/* Index in elf obj file, for relocation use. */
	int idx;
154
	char *name;
155
	int prog_ifindex;
156
	char *section_name;
S
Stanislav Fomichev 已提交
157 158 159 160
	/* section_name with / replaced by _; makes recursive pinning
	 * in bpf_object__pin_programs easier
	 */
	char *pin_name;
161
	struct bpf_insn *insns;
162
	size_t insns_cnt, main_prog_cnt;
163
	enum bpf_prog_type type;
164

165 166 167 168
	struct reloc_desc {
		enum {
			RELO_LD64,
			RELO_CALL,
169
			RELO_DATA,
170
		} type;
171
		int insn_idx;
172 173 174 175
		union {
			int map_idx;
			int text_off;
		};
176 177
	} *reloc_desc;
	int nr_reloc;
178
	int log_level;
179

180 181 182 183 184
	struct {
		int nr;
		int *fds;
	} instances;
	bpf_program_prep_t preprocessor;
185 186 187 188

	struct bpf_object *obj;
	void *priv;
	bpf_program_clear_priv_t clear_priv;
189 190

	enum bpf_attach_type expected_attach_type;
191
	__u32 attach_btf_id;
192
	__u32 attach_prog_fd;
193 194
	void *func_info;
	__u32 func_info_rec_size;
195
	__u32 func_info_cnt;
196 197

	struct bpf_capabilities *caps;
198 199 200 201

	void *line_info;
	__u32 line_info_rec_size;
	__u32 line_info_cnt;
202
	__u32 prog_flags;
203 204
};

205 206 207 208 209 210 211 212 213 214 215 216 217
enum libbpf_map_type {
	LIBBPF_MAP_UNSPEC,
	LIBBPF_MAP_DATA,
	LIBBPF_MAP_BSS,
	LIBBPF_MAP_RODATA,
};

static const char * const libbpf_type_to_btf_name[] = {
	[LIBBPF_MAP_DATA]	= ".data",
	[LIBBPF_MAP_BSS]	= ".bss",
	[LIBBPF_MAP_RODATA]	= ".rodata",
};

218 219
struct bpf_map {
	int fd;
220
	char *name;
221 222
	int sec_idx;
	size_t sec_offset;
223
	int map_ifindex;
224
	int inner_map_fd;
225
	struct bpf_map_def def;
226 227
	__u32 btf_key_type_id;
	__u32 btf_value_type_id;
228 229
	void *priv;
	bpf_map_clear_priv_t clear_priv;
230
	enum libbpf_map_type libbpf_type;
231 232
	char *pin_path;
	bool pinned;
233
	bool reused;
234 235 236 237 238
};

struct bpf_secdata {
	void *rodata;
	void *data;
239 240
};

241 242
static LIST_HEAD(bpf_objects_list);

243
struct bpf_object {
244
	char name[BPF_OBJ_NAME_LEN];
245
	char license[64];
246
	__u32 kern_version;
247

248 249
	struct bpf_program *programs;
	size_t nr_programs;
250 251
	struct bpf_map *maps;
	size_t nr_maps;
252
	size_t maps_cap;
253
	struct bpf_secdata sections;
254

255
	bool loaded;
256
	bool has_pseudo_calls;
257
	bool relaxed_core_relocs;
258

259 260 261 262 263 264
	/*
	 * Information when doing elf related work. Only valid if fd
	 * is valid.
	 */
	struct {
		int fd;
265
		const void *obj_buf;
266
		size_t obj_buf_sz;
267 268
		Elf *elf;
		GElf_Ehdr ehdr;
269
		Elf_Data *symbols;
270 271 272
		Elf_Data *data;
		Elf_Data *rodata;
		Elf_Data *bss;
273
		size_t strtabidx;
274 275 276 277 278
		struct {
			GElf_Shdr shdr;
			Elf_Data *data;
		} *reloc;
		int nr_reloc;
279
		int maps_shndx;
280
		int btf_maps_shndx;
281
		int text_shndx;
282 283 284
		int data_shndx;
		int rodata_shndx;
		int bss_shndx;
285
	} efile;
286 287 288 289 290 291
	/*
	 * All loaded bpf_object is linked in a list, which is
	 * hidden to caller. bpf_objects__<func> handlers deal with
	 * all objects.
	 */
	struct list_head list;
292

293
	struct btf *btf;
294
	struct btf_ext *btf_ext;
295

296 297 298
	void *priv;
	bpf_object_clear_priv_t clear_priv;

299 300
	struct bpf_capabilities caps;

301 302 303 304
	char path[];
};
#define obj_elf_valid(o)	((o)->efile.elf)

305
void bpf_program__unload(struct bpf_program *prog)
306
{
307 308
	int i;

309 310 311
	if (!prog)
		return;

312 313 314 315 316 317 318 319
	/*
	 * If the object is opened but the program was never loaded,
	 * it is possible that prog->instances.nr == -1.
	 */
	if (prog->instances.nr > 0) {
		for (i = 0; i < prog->instances.nr; i++)
			zclose(prog->instances.fds[i]);
	} else if (prog->instances.nr != -1) {
320 321
		pr_warn("Internal error: instances.nr is %d\n",
			prog->instances.nr);
322 323 324 325
	}

	prog->instances.nr = -1;
	zfree(&prog->instances.fds);
326 327

	zfree(&prog->func_info);
328
	zfree(&prog->line_info);
329 330
}

331 332 333 334 335
static void bpf_program__exit(struct bpf_program *prog)
{
	if (!prog)
		return;

336 337 338 339 340 341
	if (prog->clear_priv)
		prog->clear_priv(prog, prog->priv);

	prog->priv = NULL;
	prog->clear_priv = NULL;

342
	bpf_program__unload(prog);
343
	zfree(&prog->name);
344
	zfree(&prog->section_name);
S
Stanislav Fomichev 已提交
345
	zfree(&prog->pin_name);
346
	zfree(&prog->insns);
347 348 349
	zfree(&prog->reloc_desc);

	prog->nr_reloc = 0;
350 351 352 353
	prog->insns_cnt = 0;
	prog->idx = -1;
}

S
Stanislav Fomichev 已提交
354 355 356 357 358 359 360 361 362 363 364
static char *__bpf_program__pin_name(struct bpf_program *prog)
{
	char *name, *p;

	name = p = strdup(prog->section_name);
	while ((p = strchr(p, '/')))
		*p = '_';

	return name;
}

365
static int
366 367
bpf_program__init(void *data, size_t size, char *section_name, int idx,
		  struct bpf_program *prog)
368
{
369 370 371
	const size_t bpf_insn_sz = sizeof(struct bpf_insn);

	if (size == 0 || size % bpf_insn_sz) {
372 373
		pr_warn("corrupted section '%s', size: %zu\n",
			section_name, size);
374 375 376
		return -EINVAL;
	}

377
	memset(prog, 0, sizeof(*prog));
378

379
	prog->section_name = strdup(section_name);
380
	if (!prog->section_name) {
381 382
		pr_warn("failed to alloc name for prog under section(%d) %s\n",
			idx, section_name);
383 384 385
		goto errout;
	}

S
Stanislav Fomichev 已提交
386 387
	prog->pin_name = __bpf_program__pin_name(prog);
	if (!prog->pin_name) {
388 389
		pr_warn("failed to alloc pin name for prog under section(%d) %s\n",
			idx, section_name);
S
Stanislav Fomichev 已提交
390 391 392
		goto errout;
	}

393 394
	prog->insns = malloc(size);
	if (!prog->insns) {
395 396
		pr_warn("failed to alloc insns for prog under section %s\n",
			section_name);
397 398
		goto errout;
	}
399 400
	prog->insns_cnt = size / bpf_insn_sz;
	memcpy(prog->insns, data, size);
401
	prog->idx = idx;
402 403
	prog->instances.fds = NULL;
	prog->instances.nr = -1;
404
	prog->type = BPF_PROG_TYPE_UNSPEC;
405 406 407 408 409 410 411 412 413

	return 0;
errout:
	bpf_program__exit(prog);
	return -ENOMEM;
}

static int
bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
414
			char *section_name, int idx)
415 416 417 418
{
	struct bpf_program prog, *progs;
	int nr_progs, err;

419
	err = bpf_program__init(data, size, section_name, idx, &prog);
420 421 422
	if (err)
		return err;

423
	prog.caps = &obj->caps;
424 425 426
	progs = obj->programs;
	nr_progs = obj->nr_programs;

427
	progs = reallocarray(progs, nr_progs + 1, sizeof(progs[0]));
428 429 430 431 432 433
	if (!progs) {
		/*
		 * In this case the original obj->programs
		 * is still valid, so don't need special treat for
		 * bpf_close_object().
		 */
434 435
		pr_warn("failed to alloc a new program under section '%s'\n",
			section_name);
436 437 438 439 440 441 442
		bpf_program__exit(&prog);
		return -ENOMEM;
	}

	pr_debug("found program %s\n", prog.section_name);
	obj->programs = progs;
	obj->nr_programs = nr_progs + 1;
443
	prog.obj = obj;
444 445 446 447
	progs[nr_progs] = prog;
	return 0;
}

448 449 450 451 452 453 454 455
static int
bpf_object__init_prog_names(struct bpf_object *obj)
{
	Elf_Data *symbols = obj->efile.symbols;
	struct bpf_program *prog;
	size_t pi, si;

	for (pi = 0; pi < obj->nr_programs; pi++) {
456
		const char *name = NULL;
457 458 459 460 461 462 463 464 465 466 467

		prog = &obj->programs[pi];

		for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name;
		     si++) {
			GElf_Sym sym;

			if (!gelf_getsym(symbols, si, &sym))
				continue;
			if (sym.st_shndx != prog->idx)
				continue;
468 469
			if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL)
				continue;
470 471 472 473 474

			name = elf_strptr(obj->efile.elf,
					  obj->efile.strtabidx,
					  sym.st_name);
			if (!name) {
475 476
				pr_warn("failed to get sym name string for prog %s\n",
					prog->section_name);
477 478 479 480
				return -LIBBPF_ERRNO__LIBELF;
			}
		}

481 482 483
		if (!name && prog->idx == obj->efile.text_shndx)
			name = ".text";

484
		if (!name) {
485 486
			pr_warn("failed to find sym for prog %s\n",
				prog->section_name);
487 488
			return -EINVAL;
		}
489

490 491
		prog->name = strdup(name);
		if (!prog->name) {
492 493
			pr_warn("failed to allocate memory for prog sym %s\n",
				name);
494 495 496 497 498 499 500
			return -ENOMEM;
		}
	}

	return 0;
}

501 502 503 504 505 506 507 508 509 510 511
static __u32 get_kernel_version(void)
{
	__u32 major, minor, patch;
	struct utsname info;

	uname(&info);
	if (sscanf(info.release, "%u.%u.%u", &major, &minor, &patch) != 3)
		return 0;
	return KERNEL_VERSION(major, minor, patch);
}

512
static struct bpf_object *bpf_object__new(const char *path,
513
					  const void *obj_buf,
514 515
					  size_t obj_buf_sz,
					  const char *obj_name)
516 517
{
	struct bpf_object *obj;
518
	char *end;
519 520 521

	obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
	if (!obj) {
522
		pr_warn("alloc memory failed for %s\n", path);
523
		return ERR_PTR(-ENOMEM);
524 525 526
	}

	strcpy(obj->path, path);
527 528 529 530 531 532 533 534 535 536 537
	if (obj_name) {
		strncpy(obj->name, obj_name, sizeof(obj->name) - 1);
		obj->name[sizeof(obj->name) - 1] = 0;
	} else {
		/* Using basename() GNU version which doesn't modify arg. */
		strncpy(obj->name, basename((void *)path),
			sizeof(obj->name) - 1);
		end = strchr(obj->name, '.');
		if (end)
			*end = 0;
	}
538

539
	obj->efile.fd = -1;
540
	/*
541
	 * Caller of this function should also call
542 543 544 545 546 547
	 * bpf_object__elf_finish() after data collection to return
	 * obj_buf to user. If not, we should duplicate the buffer to
	 * avoid user freeing them before elf finish.
	 */
	obj->efile.obj_buf = obj_buf;
	obj->efile.obj_buf_sz = obj_buf_sz;
548
	obj->efile.maps_shndx = -1;
549
	obj->efile.btf_maps_shndx = -1;
550 551 552
	obj->efile.data_shndx = -1;
	obj->efile.rodata_shndx = -1;
	obj->efile.bss_shndx = -1;
553

554
	obj->kern_version = get_kernel_version();
555
	obj->loaded = false;
556 557 558

	INIT_LIST_HEAD(&obj->list);
	list_add(&obj->list, &bpf_objects_list);
559 560 561 562 563 564 565 566 567 568 569 570
	return obj;
}

static void bpf_object__elf_finish(struct bpf_object *obj)
{
	if (!obj_elf_valid(obj))
		return;

	if (obj->efile.elf) {
		elf_end(obj->efile.elf);
		obj->efile.elf = NULL;
	}
571
	obj->efile.symbols = NULL;
572 573 574
	obj->efile.data = NULL;
	obj->efile.rodata = NULL;
	obj->efile.bss = NULL;
575 576 577

	zfree(&obj->efile.reloc);
	obj->efile.nr_reloc = 0;
578
	zclose(obj->efile.fd);
579 580
	obj->efile.obj_buf = NULL;
	obj->efile.obj_buf_sz = 0;
581 582 583 584 585 586 587 588
}

static int bpf_object__elf_init(struct bpf_object *obj)
{
	int err = 0;
	GElf_Ehdr *ep;

	if (obj_elf_valid(obj)) {
589
		pr_warn("elf init: internal error\n");
590
		return -LIBBPF_ERRNO__LIBELF;
591 592
	}

593 594 595 596 597
	if (obj->efile.obj_buf_sz > 0) {
		/*
		 * obj_buf should have been validated by
		 * bpf_object__open_buffer().
		 */
598
		obj->efile.elf = elf_memory((char *)obj->efile.obj_buf,
599 600 601 602
					    obj->efile.obj_buf_sz);
	} else {
		obj->efile.fd = open(obj->path, O_RDONLY);
		if (obj->efile.fd < 0) {
603
			char errmsg[STRERR_BUFSIZE], *cp;
604

605 606
			err = -errno;
			cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
607
			pr_warn("failed to open %s: %s\n", obj->path, cp);
608
			return err;
609 610 611
		}

		obj->efile.elf = elf_begin(obj->efile.fd,
612
					   LIBBPF_ELF_C_READ_MMAP, NULL);
613 614 615
	}

	if (!obj->efile.elf) {
616
		pr_warn("failed to open %s as ELF file\n", obj->path);
617
		err = -LIBBPF_ERRNO__LIBELF;
618 619 620 621
		goto errout;
	}

	if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
622
		pr_warn("failed to get EHDR from %s\n", obj->path);
623
		err = -LIBBPF_ERRNO__FORMAT;
624 625 626 627
		goto errout;
	}
	ep = &obj->efile.ehdr;

628
	/* Old LLVM set e_machine to EM_NONE */
629 630
	if (ep->e_type != ET_REL ||
	    (ep->e_machine && ep->e_machine != EM_BPF)) {
631
		pr_warn("%s is not an eBPF object file\n", obj->path);
632
		err = -LIBBPF_ERRNO__FORMAT;
633 634 635 636 637 638 639 640 641
		goto errout;
	}

	return 0;
errout:
	bpf_object__elf_finish(obj);
	return err;
}

642
static int bpf_object__check_endianness(struct bpf_object *obj)
643
{
644
#if __BYTE_ORDER == __LITTLE_ENDIAN
645 646
	if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2LSB)
		return 0;
647
#elif __BYTE_ORDER == __BIG_ENDIAN
648 649 650 651 652
	if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2MSB)
		return 0;
#else
# error "Unrecognized __BYTE_ORDER__"
#endif
653
	pr_warn("endianness mismatch.\n");
654
	return -LIBBPF_ERRNO__ENDIAN;
655 656
}

657
static int
658
bpf_object__init_license(struct bpf_object *obj, void *data, size_t size)
659
{
660
	memcpy(obj->license, data, min(size, sizeof(obj->license) - 1));
661 662 663 664
	pr_debug("license of %s is %s\n", obj->path, obj->license);
	return 0;
}

665 666 667 668 669 670
static int
bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size)
{
	__u32 kver;

	if (size != sizeof(kver)) {
671
		pr_warn("invalid kver section in %s\n", obj->path);
672 673 674 675 676 677 678 679
		return -LIBBPF_ERRNO__FORMAT;
	}
	memcpy(&kver, data, sizeof(kver));
	obj->kern_version = kver;
	pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version);
	return 0;
}

E
Eric Leblond 已提交
680 681 682 683
static int compare_bpf_map(const void *_a, const void *_b)
{
	const struct bpf_map *a = _a;
	const struct bpf_map *b = _b;
684

685 686 687
	if (a->sec_idx != b->sec_idx)
		return a->sec_idx - b->sec_idx;
	return a->sec_offset - b->sec_offset;
688 689
}

690 691 692 693 694 695 696 697
static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
{
	if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
	    type == BPF_MAP_TYPE_HASH_OF_MAPS)
		return true;
	return false;
}

698 699 700 701 702 703 704 705 706 707 708 709 710 711 712
static int bpf_object_search_section_size(const struct bpf_object *obj,
					  const char *name, size_t *d_size)
{
	const GElf_Ehdr *ep = &obj->efile.ehdr;
	Elf *elf = obj->efile.elf;
	Elf_Scn *scn = NULL;
	int idx = 0;

	while ((scn = elf_nextscn(elf, scn)) != NULL) {
		const char *sec_name;
		Elf_Data *data;
		GElf_Shdr sh;

		idx++;
		if (gelf_getshdr(scn, &sh) != &sh) {
713 714
			pr_warn("failed to get section(%d) header from %s\n",
				idx, obj->path);
715 716 717 718 719
			return -EIO;
		}

		sec_name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
		if (!sec_name) {
720 721
			pr_warn("failed to get section(%d) name from %s\n",
				idx, obj->path);
722 723 724 725 726 727 728 729
			return -EIO;
		}

		if (strcmp(name, sec_name))
			continue;

		data = elf_getdata(scn, 0);
		if (!data) {
730 731
			pr_warn("failed to get section(%d) data from %s(%s)\n",
				idx, name, obj->path);
732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790
			return -EIO;
		}

		*d_size = data->d_size;
		return 0;
	}

	return -ENOENT;
}

int bpf_object__section_size(const struct bpf_object *obj, const char *name,
			     __u32 *size)
{
	int ret = -ENOENT;
	size_t d_size;

	*size = 0;
	if (!name) {
		return -EINVAL;
	} else if (!strcmp(name, ".data")) {
		if (obj->efile.data)
			*size = obj->efile.data->d_size;
	} else if (!strcmp(name, ".bss")) {
		if (obj->efile.bss)
			*size = obj->efile.bss->d_size;
	} else if (!strcmp(name, ".rodata")) {
		if (obj->efile.rodata)
			*size = obj->efile.rodata->d_size;
	} else {
		ret = bpf_object_search_section_size(obj, name, &d_size);
		if (!ret)
			*size = d_size;
	}

	return *size ? 0 : ret;
}

int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
				__u32 *off)
{
	Elf_Data *symbols = obj->efile.symbols;
	const char *sname;
	size_t si;

	if (!name || !off)
		return -EINVAL;

	for (si = 0; si < symbols->d_size / sizeof(GElf_Sym); si++) {
		GElf_Sym sym;

		if (!gelf_getsym(symbols, si, &sym))
			continue;
		if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
		    GELF_ST_TYPE(sym.st_info) != STT_OBJECT)
			continue;

		sname = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
				   sym.st_name);
		if (!sname) {
791 792
			pr_warn("failed to get sym name string for var %s\n",
				name);
793 794 795 796 797 798 799 800 801 802 803
			return -EIO;
		}
		if (strcmp(name, sname) == 0) {
			*off = sym.st_value;
			return 0;
		}
	}

	return -ENOENT;
}

804
static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
805
{
806 807 808 809 810 811 812
	struct bpf_map *new_maps;
	size_t new_cap;
	int i;

	if (obj->nr_maps < obj->maps_cap)
		return &obj->maps[obj->nr_maps++];

813
	new_cap = max((size_t)4, obj->maps_cap * 3 / 2);
814 815
	new_maps = realloc(obj->maps, new_cap * sizeof(*obj->maps));
	if (!new_maps) {
816
		pr_warn("alloc maps for object failed\n");
817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835
		return ERR_PTR(-ENOMEM);
	}

	obj->maps_cap = new_cap;
	obj->maps = new_maps;

	/* zero out new maps */
	memset(obj->maps + obj->nr_maps, 0,
	       (obj->maps_cap - obj->nr_maps) * sizeof(*obj->maps));
	/*
	 * fill all fd with -1 so won't close incorrect fd (fd=0 is stdin)
	 * when failure (zclose won't close negative fd)).
	 */
	for (i = obj->nr_maps; i < obj->maps_cap; i++) {
		obj->maps[i].fd = -1;
		obj->maps[i].inner_map_fd = -1;
	}

	return &obj->maps[obj->nr_maps++];
836 837 838
}

static int
839
bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
840
			      int sec_idx, Elf_Data *data, void **data_buff)
841 842
{
	char map_name[BPF_OBJ_NAME_LEN];
843 844 845 846 847 848
	struct bpf_map_def *def;
	struct bpf_map *map;

	map = bpf_object__add_map(obj);
	if (IS_ERR(map))
		return PTR_ERR(map);
849 850

	map->libbpf_type = type;
851 852
	map->sec_idx = sec_idx;
	map->sec_offset = 0;
853 854 855 856
	snprintf(map_name, sizeof(map_name), "%.8s%.7s", obj->name,
		 libbpf_type_to_btf_name[type]);
	map->name = strdup(map_name);
	if (!map->name) {
857
		pr_warn("failed to alloc map name\n");
858 859
		return -ENOMEM;
	}
860 861
	pr_debug("map '%s' (global data): at sec_idx %d, offset %zu.\n",
		 map_name, map->sec_idx, map->sec_offset);
862

863
	def = &map->def;
864 865 866 867
	def->type = BPF_MAP_TYPE_ARRAY;
	def->key_size = sizeof(int);
	def->value_size = data->d_size;
	def->max_entries = 1;
868
	def->map_flags = type == LIBBPF_MAP_RODATA ? BPF_F_RDONLY_PROG : 0;
869 870 871 872
	if (data_buff) {
		*data_buff = malloc(data->d_size);
		if (!*data_buff) {
			zfree(&map->name);
873
			pr_warn("failed to alloc map content buffer\n");
874 875 876 877 878
			return -ENOMEM;
		}
		memcpy(*data_buff, data->d_buf, data->d_size);
	}

879
	pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name);
880 881 882
	return 0;
}

883 884 885 886 887 888 889 890 891 892 893
static int bpf_object__init_global_data_maps(struct bpf_object *obj)
{
	int err;

	if (!obj->caps.global_data)
		return 0;
	/*
	 * Populate obj->maps with libbpf internal maps.
	 */
	if (obj->efile.data_shndx >= 0) {
		err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA,
894
						    obj->efile.data_shndx,
895 896 897 898 899 900 901
						    obj->efile.data,
						    &obj->sections.data);
		if (err)
			return err;
	}
	if (obj->efile.rodata_shndx >= 0) {
		err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA,
902
						    obj->efile.rodata_shndx,
903 904 905 906 907 908 909
						    obj->efile.rodata,
						    &obj->sections.rodata);
		if (err)
			return err;
	}
	if (obj->efile.bss_shndx >= 0) {
		err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS,
910
						    obj->efile.bss_shndx,
911 912 913 914 915 916 917 918
						    obj->efile.bss, NULL);
		if (err)
			return err;
	}
	return 0;
}

static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
919 920
{
	Elf_Data *symbols = obj->efile.symbols;
921
	int i, map_def_sz = 0, nr_maps = 0, nr_syms;
922
	Elf_Data *data = NULL;
923 924 925 926
	Elf_Scn *scn;

	if (obj->efile.maps_shndx < 0)
		return 0;
927

E
Eric Leblond 已提交
928 929 930
	if (!symbols)
		return -EINVAL;

931 932 933 934
	scn = elf_getscn(obj->efile.elf, obj->efile.maps_shndx);
	if (scn)
		data = elf_getdata(scn, NULL);
	if (!scn || !data) {
935 936
		pr_warn("failed to get Elf_Data from map section %d\n",
			obj->efile.maps_shndx);
937
		return -EINVAL;
E
Eric Leblond 已提交
938
	}
939

E
Eric Leblond 已提交
940 941 942 943 944 945 946
	/*
	 * Count number of maps. Each map has a name.
	 * Array of maps is not supported: only the first element is
	 * considered.
	 *
	 * TODO: Detect array of map and report error.
	 */
947 948
	nr_syms = symbols->d_size / sizeof(GElf_Sym);
	for (i = 0; i < nr_syms; i++) {
949
		GElf_Sym sym;
E
Eric Leblond 已提交
950 951 952 953 954 955 956

		if (!gelf_getsym(symbols, i, &sym))
			continue;
		if (sym.st_shndx != obj->efile.maps_shndx)
			continue;
		nr_maps++;
	}
957
	/* Assume equally sized map definitions */
958 959 960
	pr_debug("maps in %s: %d maps in %zd bytes\n",
		 obj->path, nr_maps, data->d_size);

961
	if (!data->d_size || nr_maps == 0 || (data->d_size % nr_maps) != 0) {
962 963 964
		pr_warn("unable to determine map definition size "
			"section %s, %d maps in %zd bytes\n",
			obj->path, nr_maps, data->d_size);
965
		return -EINVAL;
966
	}
967
	map_def_sz = data->d_size / nr_maps;
E
Eric Leblond 已提交
968

969 970
	/* Fill obj->maps using data in "maps" section.  */
	for (i = 0; i < nr_syms; i++) {
E
Eric Leblond 已提交
971
		GElf_Sym sym;
972
		const char *map_name;
E
Eric Leblond 已提交
973
		struct bpf_map_def *def;
974
		struct bpf_map *map;
975 976 977

		if (!gelf_getsym(symbols, i, &sym))
			continue;
978
		if (sym.st_shndx != obj->efile.maps_shndx)
979 980
			continue;

981 982 983 984 985
		map = bpf_object__add_map(obj);
		if (IS_ERR(map))
			return PTR_ERR(map);

		map_name = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
986
				      sym.st_name);
987
		if (!map_name) {
988 989
			pr_warn("failed to get map #%d name sym string for obj %s\n",
				i, obj->path);
990 991
			return -LIBBPF_ERRNO__FORMAT;
		}
992

993
		map->libbpf_type = LIBBPF_MAP_UNSPEC;
994 995 996 997
		map->sec_idx = sym.st_shndx;
		map->sec_offset = sym.st_value;
		pr_debug("map '%s' (legacy): at sec_idx %d, offset %zu.\n",
			 map_name, map->sec_idx, map->sec_offset);
998
		if (sym.st_value + map_def_sz > data->d_size) {
999 1000
			pr_warn("corrupted maps section in %s: last map \"%s\" too small\n",
				obj->path, map_name);
E
Eric Leblond 已提交
1001
			return -EINVAL;
1002
		}
E
Eric Leblond 已提交
1003

1004 1005
		map->name = strdup(map_name);
		if (!map->name) {
1006
			pr_warn("failed to alloc map name\n");
1007 1008
			return -ENOMEM;
		}
1009
		pr_debug("map %d is \"%s\"\n", i, map->name);
E
Eric Leblond 已提交
1010
		def = (struct bpf_map_def *)(data->d_buf + sym.st_value);
1011 1012 1013 1014 1015 1016 1017
		/*
		 * If the definition of the map in the object file fits in
		 * bpf_map_def, copy it.  Any extra fields in our version
		 * of bpf_map_def will default to zero as a result of the
		 * calloc above.
		 */
		if (map_def_sz <= sizeof(struct bpf_map_def)) {
1018
			memcpy(&map->def, def, map_def_sz);
1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029
		} else {
			/*
			 * Here the map structure being read is bigger than what
			 * we expect, truncate if the excess bits are all zero.
			 * If they are not zero, reject this map as
			 * incompatible.
			 */
			char *b;
			for (b = ((char *)def) + sizeof(struct bpf_map_def);
			     b < ((char *)def) + map_def_sz; b++) {
				if (*b != 0) {
1030 1031 1032 1033
					pr_warn("maps section in %s: \"%s\" "
						"has unrecognized, non-zero "
						"options\n",
						obj->path, map_name);
1034 1035
					if (strict)
						return -EINVAL;
1036 1037
				}
			}
1038
			memcpy(&map->def, def, sizeof(struct bpf_map_def));
1039
		}
1040
	}
1041 1042
	return 0;
}
E
Eric Leblond 已提交
1043

1044 1045
static const struct btf_type *
skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id)
1046 1047
{
	const struct btf_type *t = btf__type_by_id(btf, id);
1048

1049 1050 1051 1052 1053 1054 1055
	if (res_id)
		*res_id = id;

	while (btf_is_mod(t) || btf_is_typedef(t)) {
		if (res_id)
			*res_id = t->type;
		t = btf__type_by_id(btf, t->type);
1056
	}
1057 1058

	return t;
1059 1060
}

1061 1062 1063 1064 1065 1066 1067 1068
/*
 * Fetch integer attribute of BTF map definition. Such attributes are
 * represented using a pointer to an array, in which dimensionality of array
 * encodes specified integer value. E.g., int (*type)[BPF_MAP_TYPE_ARRAY];
 * encodes `type => BPF_MAP_TYPE_ARRAY` key/value pair completely using BTF
 * type definition, while using only sizeof(void *) space in ELF data section.
 */
static bool get_map_field_int(const char *map_name, const struct btf *btf,
1069
			      const struct btf_type *def,
1070
			      const struct btf_member *m, __u32 *res) {
1071
	const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
1072
	const char *name = btf__name_by_offset(btf, m->name_off);
1073 1074
	const struct btf_array *arr_info;
	const struct btf_type *arr_t;
1075

1076
	if (!btf_is_ptr(t)) {
1077 1078
		pr_warn("map '%s': attr '%s': expected PTR, got %u.\n",
			map_name, name, btf_kind(t));
1079 1080
		return false;
	}
1081 1082 1083

	arr_t = btf__type_by_id(btf, t->type);
	if (!arr_t) {
1084 1085
		pr_warn("map '%s': attr '%s': type [%u] not found.\n",
			map_name, name, t->type);
1086 1087
		return false;
	}
1088
	if (!btf_is_array(arr_t)) {
1089 1090
		pr_warn("map '%s': attr '%s': expected ARRAY, got %u.\n",
			map_name, name, btf_kind(arr_t));
1091 1092
		return false;
	}
1093
	arr_info = btf_array(arr_t);
1094
	*res = arr_info->nelems;
1095 1096 1097
	return true;
}

1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118
static int build_map_pin_path(struct bpf_map *map, const char *path)
{
	char buf[PATH_MAX];
	int err, len;

	if (!path)
		path = "/sys/fs/bpf";

	len = snprintf(buf, PATH_MAX, "%s/%s", path, bpf_map__name(map));
	if (len < 0)
		return -EINVAL;
	else if (len >= PATH_MAX)
		return -ENAMETOOLONG;

	err = bpf_map__set_pin_path(map, buf);
	if (err)
		return err;

	return 0;
}

1119 1120 1121
static int bpf_object__init_user_btf_map(struct bpf_object *obj,
					 const struct btf_type *sec,
					 int var_idx, int sec_idx,
1122 1123
					 const Elf_Data *data, bool strict,
					 const char *pin_root_path)
1124 1125 1126 1127 1128 1129 1130 1131 1132
{
	const struct btf_type *var, *def, *t;
	const struct btf_var_secinfo *vi;
	const struct btf_var *var_extra;
	const struct btf_member *m;
	const char *map_name;
	struct bpf_map *map;
	int vlen, i;

1133
	vi = btf_var_secinfos(sec) + var_idx;
1134
	var = btf__type_by_id(obj->btf, vi->type);
1135
	var_extra = btf_var(var);
1136
	map_name = btf__name_by_offset(obj->btf, var->name_off);
1137
	vlen = btf_vlen(var);
1138 1139

	if (map_name == NULL || map_name[0] == '\0') {
1140
		pr_warn("map #%d: empty name.\n", var_idx);
1141 1142 1143
		return -EINVAL;
	}
	if ((__u64)vi->offset + vi->size > data->d_size) {
1144
		pr_warn("map '%s' BTF data is corrupted.\n", map_name);
1145 1146
		return -EINVAL;
	}
1147
	if (!btf_is_var(var)) {
1148 1149
		pr_warn("map '%s': unexpected var kind %u.\n",
			map_name, btf_kind(var));
1150 1151 1152 1153
		return -EINVAL;
	}
	if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED &&
	    var_extra->linkage != BTF_VAR_STATIC) {
1154 1155
		pr_warn("map '%s': unsupported var linkage %u.\n",
			map_name, var_extra->linkage);
1156 1157 1158
		return -EOPNOTSUPP;
	}

1159
	def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
1160
	if (!btf_is_struct(def)) {
1161 1162
		pr_warn("map '%s': unexpected def kind %u.\n",
			map_name, btf_kind(var));
1163 1164 1165
		return -EINVAL;
	}
	if (def->size > vi->size) {
1166
		pr_warn("map '%s': invalid def size.\n", map_name);
1167 1168 1169 1170 1171 1172 1173 1174
		return -EINVAL;
	}

	map = bpf_object__add_map(obj);
	if (IS_ERR(map))
		return PTR_ERR(map);
	map->name = strdup(map_name);
	if (!map->name) {
1175
		pr_warn("map '%s': failed to alloc map name.\n", map_name);
1176 1177 1178 1179 1180 1181 1182 1183 1184
		return -ENOMEM;
	}
	map->libbpf_type = LIBBPF_MAP_UNSPEC;
	map->def.type = BPF_MAP_TYPE_UNSPEC;
	map->sec_idx = sec_idx;
	map->sec_offset = vi->offset;
	pr_debug("map '%s': at sec_idx %d, offset %zu.\n",
		 map_name, map->sec_idx, map->sec_offset);

1185 1186
	vlen = btf_vlen(def);
	m = btf_members(def);
1187 1188 1189 1190
	for (i = 0; i < vlen; i++, m++) {
		const char *name = btf__name_by_offset(obj->btf, m->name_off);

		if (!name) {
1191
			pr_warn("map '%s': invalid field #%d.\n", map_name, i);
1192 1193 1194 1195
			return -EINVAL;
		}
		if (strcmp(name, "type") == 0) {
			if (!get_map_field_int(map_name, obj->btf, def, m,
1196
					       &map->def.type))
1197 1198 1199 1200 1201
				return -EINVAL;
			pr_debug("map '%s': found type = %u.\n",
				 map_name, map->def.type);
		} else if (strcmp(name, "max_entries") == 0) {
			if (!get_map_field_int(map_name, obj->btf, def, m,
1202
					       &map->def.max_entries))
1203 1204 1205 1206 1207
				return -EINVAL;
			pr_debug("map '%s': found max_entries = %u.\n",
				 map_name, map->def.max_entries);
		} else if (strcmp(name, "map_flags") == 0) {
			if (!get_map_field_int(map_name, obj->btf, def, m,
1208
					       &map->def.map_flags))
1209 1210 1211 1212 1213 1214 1215
				return -EINVAL;
			pr_debug("map '%s': found map_flags = %u.\n",
				 map_name, map->def.map_flags);
		} else if (strcmp(name, "key_size") == 0) {
			__u32 sz;

			if (!get_map_field_int(map_name, obj->btf, def, m,
1216
					       &sz))
1217 1218 1219 1220
				return -EINVAL;
			pr_debug("map '%s': found key_size = %u.\n",
				 map_name, sz);
			if (map->def.key_size && map->def.key_size != sz) {
1221 1222
				pr_warn("map '%s': conflicting key size %u != %u.\n",
					map_name, map->def.key_size, sz);
1223 1224 1225 1226 1227 1228 1229 1230
				return -EINVAL;
			}
			map->def.key_size = sz;
		} else if (strcmp(name, "key") == 0) {
			__s64 sz;

			t = btf__type_by_id(obj->btf, m->type);
			if (!t) {
1231 1232
				pr_warn("map '%s': key type [%d] not found.\n",
					map_name, m->type);
1233 1234
				return -EINVAL;
			}
1235
			if (!btf_is_ptr(t)) {
1236 1237
				pr_warn("map '%s': key spec is not PTR: %u.\n",
					map_name, btf_kind(t));
1238 1239 1240 1241
				return -EINVAL;
			}
			sz = btf__resolve_size(obj->btf, t->type);
			if (sz < 0) {
1242 1243
				pr_warn("map '%s': can't determine key size for type [%u]: %lld.\n",
					map_name, t->type, sz);
1244 1245 1246 1247 1248
				return sz;
			}
			pr_debug("map '%s': found key [%u], sz = %lld.\n",
				 map_name, t->type, sz);
			if (map->def.key_size && map->def.key_size != sz) {
1249 1250
				pr_warn("map '%s': conflicting key size %u != %lld.\n",
					map_name, map->def.key_size, sz);
1251 1252 1253 1254 1255 1256 1257 1258
				return -EINVAL;
			}
			map->def.key_size = sz;
			map->btf_key_type_id = t->type;
		} else if (strcmp(name, "value_size") == 0) {
			__u32 sz;

			if (!get_map_field_int(map_name, obj->btf, def, m,
1259
					       &sz))
1260 1261 1262 1263
				return -EINVAL;
			pr_debug("map '%s': found value_size = %u.\n",
				 map_name, sz);
			if (map->def.value_size && map->def.value_size != sz) {
1264 1265
				pr_warn("map '%s': conflicting value size %u != %u.\n",
					map_name, map->def.value_size, sz);
1266 1267 1268 1269 1270 1271 1272 1273
				return -EINVAL;
			}
			map->def.value_size = sz;
		} else if (strcmp(name, "value") == 0) {
			__s64 sz;

			t = btf__type_by_id(obj->btf, m->type);
			if (!t) {
1274 1275
				pr_warn("map '%s': value type [%d] not found.\n",
					map_name, m->type);
1276 1277
				return -EINVAL;
			}
1278
			if (!btf_is_ptr(t)) {
1279 1280
				pr_warn("map '%s': value spec is not PTR: %u.\n",
					map_name, btf_kind(t));
1281 1282 1283 1284
				return -EINVAL;
			}
			sz = btf__resolve_size(obj->btf, t->type);
			if (sz < 0) {
1285 1286
				pr_warn("map '%s': can't determine value size for type [%u]: %lld.\n",
					map_name, t->type, sz);
1287 1288 1289 1290 1291
				return sz;
			}
			pr_debug("map '%s': found value [%u], sz = %lld.\n",
				 map_name, t->type, sz);
			if (map->def.value_size && map->def.value_size != sz) {
1292 1293
				pr_warn("map '%s': conflicting value size %u != %lld.\n",
					map_name, map->def.value_size, sz);
1294 1295 1296 1297
				return -EINVAL;
			}
			map->def.value_size = sz;
			map->btf_value_type_id = t->type;
1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321
		} else if (strcmp(name, "pinning") == 0) {
			__u32 val;
			int err;

			if (!get_map_field_int(map_name, obj->btf, def, m,
					       &val))
				return -EINVAL;
			pr_debug("map '%s': found pinning = %u.\n",
				 map_name, val);

			if (val != LIBBPF_PIN_NONE &&
			    val != LIBBPF_PIN_BY_NAME) {
				pr_warn("map '%s': invalid pinning value %u.\n",
					map_name, val);
				return -EINVAL;
			}
			if (val == LIBBPF_PIN_BY_NAME) {
				err = build_map_pin_path(map, pin_root_path);
				if (err) {
					pr_warn("map '%s': couldn't build pin path.\n",
						map_name);
					return err;
				}
			}
1322 1323
		} else {
			if (strict) {
1324 1325
				pr_warn("map '%s': unknown field '%s'.\n",
					map_name, name);
1326 1327 1328 1329 1330 1331 1332 1333
				return -ENOTSUP;
			}
			pr_debug("map '%s': ignoring unknown field '%s'.\n",
				 map_name, name);
		}
	}

	if (map->def.type == BPF_MAP_TYPE_UNSPEC) {
1334
		pr_warn("map '%s': map type isn't specified.\n", map_name);
1335 1336 1337 1338 1339 1340
		return -EINVAL;
	}

	return 0;
}

1341 1342
static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
					  const char *pin_root_path)
1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357
{
	const struct btf_type *sec = NULL;
	int nr_types, i, vlen, err;
	const struct btf_type *t;
	const char *name;
	Elf_Data *data;
	Elf_Scn *scn;

	if (obj->efile.btf_maps_shndx < 0)
		return 0;

	scn = elf_getscn(obj->efile.elf, obj->efile.btf_maps_shndx);
	if (scn)
		data = elf_getdata(scn, NULL);
	if (!scn || !data) {
1358 1359
		pr_warn("failed to get Elf_Data from map section %d (%s)\n",
			obj->efile.maps_shndx, MAPS_ELF_SEC);
1360 1361 1362 1363 1364 1365
		return -EINVAL;
	}

	nr_types = btf__get_nr_types(obj->btf);
	for (i = 1; i <= nr_types; i++) {
		t = btf__type_by_id(obj->btf, i);
1366
		if (!btf_is_datasec(t))
1367 1368 1369 1370 1371 1372 1373 1374 1375
			continue;
		name = btf__name_by_offset(obj->btf, t->name_off);
		if (strcmp(name, MAPS_ELF_SEC) == 0) {
			sec = t;
			break;
		}
	}

	if (!sec) {
1376
		pr_warn("DATASEC '%s' not found.\n", MAPS_ELF_SEC);
1377 1378 1379
		return -ENOENT;
	}

1380
	vlen = btf_vlen(sec);
1381 1382 1383
	for (i = 0; i < vlen; i++) {
		err = bpf_object__init_user_btf_map(obj, sec, i,
						    obj->efile.btf_maps_shndx,
1384
						    data, strict, pin_root_path);
1385 1386 1387 1388 1389 1390 1391
		if (err)
			return err;
	}

	return 0;
}

1392 1393
static int bpf_object__init_maps(struct bpf_object *obj, bool relaxed_maps,
				 const char *pin_root_path)
1394
{
1395
	bool strict = !relaxed_maps;
1396
	int err;
1397

1398 1399 1400 1401
	err = bpf_object__init_user_maps(obj, strict);
	if (err)
		return err;

1402
	err = bpf_object__init_user_btf_maps(obj, strict, pin_root_path);
1403 1404 1405
	if (err)
		return err;

1406 1407 1408 1409 1410
	err = bpf_object__init_global_data_maps(obj);
	if (err)
		return err;

	if (obj->nr_maps) {
1411 1412
		qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]),
		      compare_bpf_map);
1413 1414
	}
	return 0;
1415 1416
}

1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434
static bool section_have_execinstr(struct bpf_object *obj, int idx)
{
	Elf_Scn *scn;
	GElf_Shdr sh;

	scn = elf_getscn(obj->efile.elf, idx);
	if (!scn)
		return false;

	if (gelf_getshdr(scn, &sh) != &sh)
		return false;

	if (sh.sh_flags & SHF_EXECINSTR)
		return true;

	return false;
}

1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448
static void bpf_object__sanitize_btf(struct bpf_object *obj)
{
	bool has_datasec = obj->caps.btf_datasec;
	bool has_func = obj->caps.btf_func;
	struct btf *btf = obj->btf;
	struct btf_type *t;
	int i, j, vlen;

	if (!obj->btf || (has_func && has_datasec))
		return;

	for (i = 1; i <= btf__get_nr_types(btf); i++) {
		t = (struct btf_type *)btf__type_by_id(btf, i);

1449
		if (!has_datasec && btf_is_var(t)) {
1450 1451
			/* replace VAR with INT */
			t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0);
1452 1453 1454 1455 1456 1457
			/*
			 * using size = 1 is the safest choice, 4 will be too
			 * big and cause kernel BTF validation failure if
			 * original variable took less than 4 bytes
			 */
			t->size = 1;
1458
			*(int *)(t + 1) = BTF_INT_ENC(0, 0, 8);
1459
		} else if (!has_datasec && btf_is_datasec(t)) {
1460
			/* replace DATASEC with STRUCT */
1461 1462
			const struct btf_var_secinfo *v = btf_var_secinfos(t);
			struct btf_member *m = btf_members(t);
1463 1464 1465 1466 1467 1468 1469 1470 1471 1472
			struct btf_type *vt;
			char *name;

			name = (char *)btf__name_by_offset(btf, t->name_off);
			while (*name) {
				if (*name == '.')
					*name = '_';
				name++;
			}

1473
			vlen = btf_vlen(t);
1474 1475 1476 1477 1478 1479 1480 1481 1482
			t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen);
			for (j = 0; j < vlen; j++, v++, m++) {
				/* order of field assignments is important */
				m->offset = v->offset * 8;
				m->type = v->type;
				/* preserve variable name as member name */
				vt = (void *)btf__type_by_id(btf, v->type);
				m->name_off = vt->name_off;
			}
1483
		} else if (!has_func && btf_is_func_proto(t)) {
1484
			/* replace FUNC_PROTO with ENUM */
1485
			vlen = btf_vlen(t);
1486 1487
			t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen);
			t->size = sizeof(__u32); /* kernel enforced */
1488
		} else if (!has_func && btf_is_func(t)) {
1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505
			/* replace FUNC with TYPEDEF */
			t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0);
		}
	}
}

static void bpf_object__sanitize_btf_ext(struct bpf_object *obj)
{
	if (!obj->btf_ext)
		return;

	if (!obj->caps.btf_func) {
		btf_ext__free(obj->btf_ext);
		obj->btf_ext = NULL;
	}
}

1506 1507 1508 1509 1510
static bool bpf_object__is_btf_mandatory(const struct bpf_object *obj)
{
	return obj->efile.btf_maps_shndx >= 0;
}

1511
static int bpf_object__init_btf(struct bpf_object *obj,
1512 1513 1514
				Elf_Data *btf_data,
				Elf_Data *btf_ext_data)
{
1515
	bool btf_required = bpf_object__is_btf_mandatory(obj);
1516 1517 1518 1519 1520
	int err = 0;

	if (btf_data) {
		obj->btf = btf__new(btf_data->d_buf, btf_data->d_size);
		if (IS_ERR(obj->btf)) {
1521 1522
			pr_warn("Error loading ELF section %s: %d.\n",
				BTF_ELF_SEC, err);
1523 1524 1525 1526
			goto out;
		}
		err = btf__finalize_data(obj, obj->btf);
		if (err) {
1527
			pr_warn("Error finalizing %s: %d.\n", BTF_ELF_SEC, err);
1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539
			goto out;
		}
	}
	if (btf_ext_data) {
		if (!obj->btf) {
			pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
				 BTF_EXT_ELF_SEC, BTF_ELF_SEC);
			goto out;
		}
		obj->btf_ext = btf_ext__new(btf_ext_data->d_buf,
					    btf_ext_data->d_size);
		if (IS_ERR(obj->btf_ext)) {
1540 1541
			pr_warn("Error loading ELF section %s: %ld. Ignored and continue.\n",
				BTF_EXT_ELF_SEC, PTR_ERR(obj->btf_ext));
1542 1543 1544 1545 1546 1547
			obj->btf_ext = NULL;
			goto out;
		}
	}
out:
	if (err || IS_ERR(obj->btf)) {
1548 1549 1550 1551
		if (btf_required)
			err = err ? : PTR_ERR(obj->btf);
		else
			err = 0;
1552 1553 1554 1555
		if (!IS_ERR_OR_NULL(obj->btf))
			btf__free(obj->btf);
		obj->btf = NULL;
	}
1556
	if (btf_required && !obj->btf) {
1557
		pr_warn("BTF is required, but is missing or corrupted.\n");
1558 1559
		return err == 0 ? -ENOENT : err;
	}
1560 1561 1562
	return 0;
}

1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574
static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
{
	int err = 0;

	if (!obj->btf)
		return 0;

	bpf_object__sanitize_btf(obj);
	bpf_object__sanitize_btf_ext(obj);

	err = btf__load(obj->btf);
	if (err) {
1575 1576
		pr_warn("Error loading %s into kernel: %d.\n",
			BTF_ELF_SEC, err);
1577 1578
		btf__free(obj->btf);
		obj->btf = NULL;
1579 1580 1581 1582 1583 1584
		/* btf_ext can't exist without btf, so free it as well */
		if (obj->btf_ext) {
			btf_ext__free(obj->btf_ext);
			obj->btf_ext = NULL;
		}

1585 1586
		if (bpf_object__is_btf_mandatory(obj))
			return err;
1587 1588 1589 1590
	}
	return 0;
}

1591 1592
static int bpf_object__elf_collect(struct bpf_object *obj, bool relaxed_maps,
				   const char *pin_root_path)
1593 1594 1595
{
	Elf *elf = obj->efile.elf;
	GElf_Ehdr *ep = &obj->efile.ehdr;
1596
	Elf_Data *btf_ext_data = NULL;
1597
	Elf_Data *btf_data = NULL;
1598
	Elf_Scn *scn = NULL;
1599
	int idx = 0, err = 0;
1600 1601 1602

	/* Elf is corrupted/truncated, avoid calling elf_strptr. */
	if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
1603
		pr_warn("failed to get e_shstrndx from %s\n", obj->path);
1604
		return -LIBBPF_ERRNO__FORMAT;
1605 1606 1607 1608 1609 1610 1611 1612 1613
	}

	while ((scn = elf_nextscn(elf, scn)) != NULL) {
		char *name;
		GElf_Shdr sh;
		Elf_Data *data;

		idx++;
		if (gelf_getshdr(scn, &sh) != &sh) {
1614 1615
			pr_warn("failed to get section(%d) header from %s\n",
				idx, obj->path);
1616
			return -LIBBPF_ERRNO__FORMAT;
1617 1618 1619 1620
		}

		name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
		if (!name) {
1621 1622
			pr_warn("failed to get section(%d) name from %s\n",
				idx, obj->path);
1623
			return -LIBBPF_ERRNO__FORMAT;
1624 1625 1626 1627
		}

		data = elf_getdata(scn, 0);
		if (!data) {
1628 1629
			pr_warn("failed to get section(%d) data from %s(%s)\n",
				idx, name, obj->path);
1630
			return -LIBBPF_ERRNO__FORMAT;
1631
		}
1632 1633
		pr_debug("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
			 idx, name, (unsigned long)data->d_size,
1634 1635
			 (int)sh.sh_link, (unsigned long)sh.sh_flags,
			 (int)sh.sh_type);
1636

1637
		if (strcmp(name, "license") == 0) {
1638 1639 1640
			err = bpf_object__init_license(obj,
						       data->d_buf,
						       data->d_size);
1641 1642
			if (err)
				return err;
1643
		} else if (strcmp(name, "version") == 0) {
1644 1645 1646 1647 1648
			err = bpf_object__init_kversion(obj,
							data->d_buf,
							data->d_size);
			if (err)
				return err;
1649
		} else if (strcmp(name, "maps") == 0) {
1650
			obj->efile.maps_shndx = idx;
1651 1652
		} else if (strcmp(name, MAPS_ELF_SEC) == 0) {
			obj->efile.btf_maps_shndx = idx;
1653 1654
		} else if (strcmp(name, BTF_ELF_SEC) == 0) {
			btf_data = data;
1655
		} else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
1656
			btf_ext_data = data;
1657
		} else if (sh.sh_type == SHT_SYMTAB) {
1658
			if (obj->efile.symbols) {
1659 1660
				pr_warn("bpf: multiple SYMTAB in %s\n",
					obj->path);
1661
				return -LIBBPF_ERRNO__FORMAT;
1662
			}
1663 1664
			obj->efile.symbols = data;
			obj->efile.strtabidx = sh.sh_link;
1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675
		} else if (sh.sh_type == SHT_PROGBITS && data->d_size > 0) {
			if (sh.sh_flags & SHF_EXECINSTR) {
				if (strcmp(name, ".text") == 0)
					obj->efile.text_shndx = idx;
				err = bpf_object__add_program(obj, data->d_buf,
							      data->d_size, name, idx);
				if (err) {
					char errmsg[STRERR_BUFSIZE];
					char *cp = libbpf_strerror_r(-err, errmsg,
								     sizeof(errmsg));

1676 1677
					pr_warn("failed to alloc program %s (%s): %s",
						name, obj->path, cp);
1678
					return err;
1679
				}
1680 1681 1682 1683 1684 1685 1686 1687
			} else if (strcmp(name, ".data") == 0) {
				obj->efile.data = data;
				obj->efile.data_shndx = idx;
			} else if (strcmp(name, ".rodata") == 0) {
				obj->efile.rodata = data;
				obj->efile.rodata_shndx = idx;
			} else {
				pr_debug("skip section(%d) %s\n", idx, name);
1688
			}
1689
		} else if (sh.sh_type == SHT_REL) {
1690
			int nr_reloc = obj->efile.nr_reloc;
1691
			void *reloc = obj->efile.reloc;
1692 1693 1694 1695 1696 1697 1698 1699
			int sec = sh.sh_info; /* points to other section */

			/* Only do relo for section with exec instructions */
			if (!section_have_execinstr(obj, sec)) {
				pr_debug("skip relo %s(%d) for section(%d)\n",
					 name, idx, sec);
				continue;
			}
1700

1701
			reloc = reallocarray(reloc, nr_reloc + 1,
1702
					     sizeof(*obj->efile.reloc));
1703
			if (!reloc) {
1704
				pr_warn("realloc failed\n");
1705 1706
				return -ENOMEM;
			}
1707

1708 1709
			obj->efile.reloc = reloc;
			obj->efile.nr_reloc++;
1710

1711 1712
			obj->efile.reloc[nr_reloc].shdr = sh;
			obj->efile.reloc[nr_reloc].data = data;
1713 1714 1715
		} else if (sh.sh_type == SHT_NOBITS && strcmp(name, ".bss") == 0) {
			obj->efile.bss = data;
			obj->efile.bss_shndx = idx;
1716 1717
		} else {
			pr_debug("skip section(%d) %s\n", idx, name);
1718
		}
1719
	}
1720

1721
	if (!obj->efile.strtabidx || obj->efile.strtabidx > idx) {
1722
		pr_warn("Corrupted ELF file: index of strtab invalid\n");
1723
		return -LIBBPF_ERRNO__FORMAT;
1724
	}
1725
	err = bpf_object__init_btf(obj, btf_data, btf_ext_data);
1726
	if (!err)
1727
		err = bpf_object__init_maps(obj, relaxed_maps, pin_root_path);
1728 1729
	if (!err)
		err = bpf_object__sanitize_and_load_btf(obj);
1730 1731
	if (!err)
		err = bpf_object__init_prog_names(obj);
1732 1733 1734
	return err;
}

1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748
static struct bpf_program *
bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
{
	struct bpf_program *prog;
	size_t i;

	for (i = 0; i < obj->nr_programs; i++) {
		prog = &obj->programs[i];
		if (prog->idx == idx)
			return prog;
	}
	return NULL;
}

1749
struct bpf_program *
A
Andrii Nakryiko 已提交
1750 1751
bpf_object__find_program_by_title(const struct bpf_object *obj,
				  const char *title)
1752 1753 1754 1755 1756 1757 1758 1759 1760 1761
{
	struct bpf_program *pos;

	bpf_object__for_each_program(pos, obj) {
		if (pos->section_name && !strcmp(pos->section_name, title))
			return pos;
	}
	return NULL;
}

1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772
static bool bpf_object__shndx_is_data(const struct bpf_object *obj,
				      int shndx)
{
	return shndx == obj->efile.data_shndx ||
	       shndx == obj->efile.bss_shndx ||
	       shndx == obj->efile.rodata_shndx;
}

static bool bpf_object__shndx_is_maps(const struct bpf_object *obj,
				      int shndx)
{
1773 1774
	return shndx == obj->efile.maps_shndx ||
	       shndx == obj->efile.btf_maps_shndx;
1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797
}

static bool bpf_object__relo_in_known_section(const struct bpf_object *obj,
					      int shndx)
{
	return shndx == obj->efile.text_shndx ||
	       bpf_object__shndx_is_maps(obj, shndx) ||
	       bpf_object__shndx_is_data(obj, shndx);
}

static enum libbpf_map_type
bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx)
{
	if (shndx == obj->efile.data_shndx)
		return LIBBPF_MAP_DATA;
	else if (shndx == obj->efile.bss_shndx)
		return LIBBPF_MAP_BSS;
	else if (shndx == obj->efile.rodata_shndx)
		return LIBBPF_MAP_RODATA;
	else
		return LIBBPF_MAP_UNSPEC;
}

1798
static int
1799 1800
bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
			   Elf_Data *data, struct bpf_object *obj)
1801
{
1802 1803 1804
	Elf_Data *symbols = obj->efile.symbols;
	struct bpf_map *maps = obj->maps;
	size_t nr_maps = obj->nr_maps;
1805 1806
	int i, nrels;

1807
	pr_debug("collecting relocating info for: '%s'\n", prog->section_name);
1808 1809 1810 1811
	nrels = shdr->sh_size / shdr->sh_entsize;

	prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels);
	if (!prog->reloc_desc) {
1812
		pr_warn("failed to alloc memory in relocation\n");
1813 1814 1815 1816 1817 1818
		return -ENOMEM;
	}
	prog->nr_reloc = nrels;

	for (i = 0; i < nrels; i++) {
		struct bpf_insn *insns = prog->insns;
1819
		enum libbpf_map_type type;
1820 1821
		unsigned int insn_idx;
		unsigned int shdr_idx;
1822
		const char *name;
1823
		size_t map_idx;
1824 1825
		GElf_Sym sym;
		GElf_Rel rel;
1826 1827

		if (!gelf_getrel(data, i, &rel)) {
1828
			pr_warn("relocation: failed to get %d reloc\n", i);
1829
			return -LIBBPF_ERRNO__FORMAT;
1830 1831
		}

1832
		if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
1833 1834
			pr_warn("relocation: symbol %"PRIx64" not found\n",
				GELF_R_SYM(rel.r_info));
1835
			return -LIBBPF_ERRNO__FORMAT;
1836
		}
1837 1838 1839 1840 1841

		name = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
				  sym.st_name) ? : "<?>";

		pr_debug("relo for %lld value %lld name %d (\'%s\')\n",
D
David Miller 已提交
1842
			 (long long) (rel.r_info >> 32),
1843
			 (long long) sym.st_value, sym.st_name, name);
1844

1845
		shdr_idx = sym.st_shndx;
1846 1847 1848 1849 1850
		insn_idx = rel.r_offset / sizeof(struct bpf_insn);
		pr_debug("relocation: insn_idx=%u, shdr_idx=%u\n",
			 insn_idx, shdr_idx);

		if (shdr_idx >= SHN_LORESERVE) {
1851 1852 1853
			pr_warn("relocation: not yet supported relo for non-static global \'%s\' variable in special section (0x%x) found in insns[%d].code 0x%x\n",
				name, shdr_idx, insn_idx,
				insns[insn_idx].code);
1854 1855
			return -LIBBPF_ERRNO__RELOC;
		}
1856
		if (!bpf_object__relo_in_known_section(obj, shdr_idx)) {
1857 1858
			pr_warn("Program '%s' contains unrecognized relo data pointing to section %u\n",
				prog->section_name, shdr_idx);
1859 1860 1861
			return -LIBBPF_ERRNO__RELOC;
		}

1862 1863
		if (insns[insn_idx].code == (BPF_JMP | BPF_CALL)) {
			if (insns[insn_idx].src_reg != BPF_PSEUDO_CALL) {
1864
				pr_warn("incorrect bpf_call opcode\n");
1865 1866 1867 1868 1869
				return -LIBBPF_ERRNO__RELOC;
			}
			prog->reloc_desc[i].type = RELO_CALL;
			prog->reloc_desc[i].insn_idx = insn_idx;
			prog->reloc_desc[i].text_off = sym.st_value;
1870
			obj->has_pseudo_calls = true;
1871 1872 1873
			continue;
		}

1874
		if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
1875 1876
			pr_warn("bpf: relocation: invalid relo for insns[%d].code 0x%x\n",
				insn_idx, insns[insn_idx].code);
1877
			return -LIBBPF_ERRNO__RELOC;
1878 1879
		}

1880 1881 1882
		if (bpf_object__shndx_is_maps(obj, shdr_idx) ||
		    bpf_object__shndx_is_data(obj, shdr_idx)) {
			type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx);
1883 1884
			if (type != LIBBPF_MAP_UNSPEC) {
				if (GELF_ST_BIND(sym.st_info) == STB_GLOBAL) {
1885 1886
					pr_warn("bpf: relocation: not yet supported relo for non-static global \'%s\' variable found in insns[%d].code 0x%x\n",
						name, insn_idx, insns[insn_idx].code);
1887 1888 1889
					return -LIBBPF_ERRNO__RELOC;
				}
				if (!obj->caps.global_data) {
1890 1891
					pr_warn("bpf: relocation: kernel does not support global \'%s\' variable access in insns[%d]\n",
						name, insn_idx);
1892 1893
					return -LIBBPF_ERRNO__RELOC;
				}
1894 1895
			}

1896
			for (map_idx = 0; map_idx < nr_maps; map_idx++) {
1897 1898 1899
				if (maps[map_idx].libbpf_type != type)
					continue;
				if (type != LIBBPF_MAP_UNSPEC ||
1900 1901 1902 1903 1904 1905 1906
				    (maps[map_idx].sec_idx == sym.st_shndx &&
				     maps[map_idx].sec_offset == sym.st_value)) {
					pr_debug("relocation: found map %zd (%s, sec_idx %d, offset %zu) for insn %u\n",
						 map_idx, maps[map_idx].name,
						 maps[map_idx].sec_idx,
						 maps[map_idx].sec_offset,
						 insn_idx);
1907 1908
					break;
				}
1909 1910
			}

1911
			if (map_idx >= nr_maps) {
1912 1913
				pr_warn("bpf relocation: map_idx %d larger than %d\n",
					(int)map_idx, (int)nr_maps - 1);
1914 1915
				return -LIBBPF_ERRNO__RELOC;
			}
1916

1917 1918
			prog->reloc_desc[i].type = type != LIBBPF_MAP_UNSPEC ?
						   RELO_DATA : RELO_LD64;
1919 1920 1921
			prog->reloc_desc[i].insn_idx = insn_idx;
			prog->reloc_desc[i].map_idx = map_idx;
		}
1922 1923 1924 1925
	}
	return 0;
}

1926
static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map)
1927 1928
{
	struct bpf_map_def *def = &map->def;
1929
	__u32 key_type_id = 0, value_type_id = 0;
1930
	int ret;
1931

1932 1933 1934 1935
	/* if it's BTF-defined map, we don't need to search for type IDs */
	if (map->sec_idx == obj->efile.btf_maps_shndx)
		return 0;

1936
	if (!bpf_map__is_internal(map)) {
1937
		ret = btf__get_map_kv_tids(obj->btf, map->name, def->key_size,
1938 1939 1940 1941 1942 1943 1944
					   def->value_size, &key_type_id,
					   &value_type_id);
	} else {
		/*
		 * LLVM annotates global data differently in BTF, that is,
		 * only as '.data', '.bss' or '.rodata'.
		 */
1945
		ret = btf__find_by_name(obj->btf,
1946 1947 1948
				libbpf_type_to_btf_name[map->libbpf_type]);
	}
	if (ret < 0)
1949
		return ret;
1950

1951
	map->btf_key_type_id = key_type_id;
1952 1953
	map->btf_value_type_id = bpf_map__is_internal(map) ?
				 ret : value_type_id;
1954 1955 1956
	return 0;
}

J
Jakub Kicinski 已提交
1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972
int bpf_map__reuse_fd(struct bpf_map *map, int fd)
{
	struct bpf_map_info info = {};
	__u32 len = sizeof(info);
	int new_fd, err;
	char *new_name;

	err = bpf_obj_get_info_by_fd(fd, &info, &len);
	if (err)
		return err;

	new_name = strdup(info.name);
	if (!new_name)
		return -errno;

	new_fd = open("/", O_RDONLY | O_CLOEXEC);
1973 1974
	if (new_fd < 0) {
		err = -errno;
J
Jakub Kicinski 已提交
1975
		goto err_free_new_name;
1976
	}
J
Jakub Kicinski 已提交
1977 1978

	new_fd = dup3(fd, new_fd, O_CLOEXEC);
1979 1980
	if (new_fd < 0) {
		err = -errno;
J
Jakub Kicinski 已提交
1981
		goto err_close_new_fd;
1982
	}
J
Jakub Kicinski 已提交
1983 1984

	err = zclose(map->fd);
1985 1986
	if (err) {
		err = -errno;
J
Jakub Kicinski 已提交
1987
		goto err_close_new_fd;
1988
	}
J
Jakub Kicinski 已提交
1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999
	free(map->name);

	map->fd = new_fd;
	map->name = new_name;
	map->def.type = info.type;
	map->def.key_size = info.key_size;
	map->def.value_size = info.value_size;
	map->def.max_entries = info.max_entries;
	map->def.map_flags = info.map_flags;
	map->btf_key_type_id = info.btf_key_type_id;
	map->btf_value_type_id = info.btf_value_type_id;
2000
	map->reused = true;
J
Jakub Kicinski 已提交
2001 2002 2003 2004 2005 2006 2007

	return 0;

err_close_new_fd:
	close(new_fd);
err_free_new_name:
	free(new_name);
2008
	return err;
J
Jakub Kicinski 已提交
2009 2010
}

2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024
int bpf_map__resize(struct bpf_map *map, __u32 max_entries)
{
	if (!map || !max_entries)
		return -EINVAL;

	/* If map already created, its attributes can't be changed. */
	if (map->fd >= 0)
		return -EBUSY;

	map->def.max_entries = max_entries;

	return 0;
}

2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046
static int
bpf_object__probe_name(struct bpf_object *obj)
{
	struct bpf_load_program_attr attr;
	char *cp, errmsg[STRERR_BUFSIZE];
	struct bpf_insn insns[] = {
		BPF_MOV64_IMM(BPF_REG_0, 0),
		BPF_EXIT_INSN(),
	};
	int ret;

	/* make sure basic loading works */

	memset(&attr, 0, sizeof(attr));
	attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
	attr.insns = insns;
	attr.insns_cnt = ARRAY_SIZE(insns);
	attr.license = "GPL";

	ret = bpf_load_program_xattr(&attr, NULL, 0);
	if (ret < 0) {
		cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
2047 2048
		pr_warn("Error in %s():%s(%d). Couldn't load basic 'r0 = 0' BPF program.\n",
			__func__, cp, errno);
2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064
		return -errno;
	}
	close(ret);

	/* now try the same program, but with the name */

	attr.name = "test";
	ret = bpf_load_program_xattr(&attr, NULL, 0);
	if (ret >= 0) {
		obj->caps.name = 1;
		close(ret);
	}

	return 0;
}

2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087
static int
bpf_object__probe_global_data(struct bpf_object *obj)
{
	struct bpf_load_program_attr prg_attr;
	struct bpf_create_map_attr map_attr;
	char *cp, errmsg[STRERR_BUFSIZE];
	struct bpf_insn insns[] = {
		BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16),
		BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
		BPF_MOV64_IMM(BPF_REG_0, 0),
		BPF_EXIT_INSN(),
	};
	int ret, map;

	memset(&map_attr, 0, sizeof(map_attr));
	map_attr.map_type = BPF_MAP_TYPE_ARRAY;
	map_attr.key_size = sizeof(int);
	map_attr.value_size = 32;
	map_attr.max_entries = 1;

	map = bpf_create_map_xattr(&map_attr);
	if (map < 0) {
		cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
2088 2089
		pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
			__func__, cp, errno);
2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110
		return -errno;
	}

	insns[0].imm = map;

	memset(&prg_attr, 0, sizeof(prg_attr));
	prg_attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
	prg_attr.insns = insns;
	prg_attr.insns_cnt = ARRAY_SIZE(insns);
	prg_attr.license = "GPL";

	ret = bpf_load_program_xattr(&prg_attr, NULL, 0);
	if (ret >= 0) {
		obj->caps.global_data = 1;
		close(ret);
	}

	close(map);
	return 0;
}

2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123
static int bpf_object__probe_btf_func(struct bpf_object *obj)
{
	const char strs[] = "\0int\0x\0a";
	/* void x(int a) {} */
	__u32 types[] = {
		/* int */
		BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
		/* FUNC_PROTO */                                /* [2] */
		BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
		BTF_PARAM_ENC(7, 1),
		/* FUNC x */                                    /* [3] */
		BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2),
	};
2124
	int btf_fd;
2125

2126 2127 2128
	btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types),
				      strs, sizeof(strs));
	if (btf_fd >= 0) {
2129
		obj->caps.btf_func = 1;
2130 2131 2132 2133
		close(btf_fd);
		return 1;
	}

2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150
	return 0;
}

static int bpf_object__probe_btf_datasec(struct bpf_object *obj)
{
	const char strs[] = "\0x\0.data";
	/* static int a; */
	__u32 types[] = {
		/* int */
		BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
		/* VAR x */                                     /* [2] */
		BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
		BTF_VAR_STATIC,
		/* DATASEC val */                               /* [3] */
		BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
		BTF_VAR_SECINFO_ENC(2, 0, 4),
	};
2151
	int btf_fd;
2152

2153 2154 2155
	btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types),
				      strs, sizeof(strs));
	if (btf_fd >= 0) {
2156
		obj->caps.btf_datasec = 1;
2157 2158 2159 2160
		close(btf_fd);
		return 1;
	}

2161 2162 2163
	return 0;
}

2164 2165 2166
static int
bpf_object__probe_caps(struct bpf_object *obj)
{
2167 2168 2169
	int (*probe_fn[])(struct bpf_object *obj) = {
		bpf_object__probe_name,
		bpf_object__probe_global_data,
2170 2171
		bpf_object__probe_btf_func,
		bpf_object__probe_btf_datasec,
2172 2173 2174 2175 2176 2177
	};
	int i, ret;

	for (i = 0; i < ARRAY_SIZE(probe_fn); i++) {
		ret = probe_fn[i](obj);
		if (ret < 0)
2178
			pr_debug("Probe #%d failed with %d.\n", i, ret);
2179 2180 2181
	}

	return 0;
2182 2183
}

2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243
static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
{
	struct bpf_map_info map_info = {};
	char msg[STRERR_BUFSIZE];
	__u32 map_info_len;

	map_info_len = sizeof(map_info);

	if (bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len)) {
		pr_warn("failed to get map info for map FD %d: %s\n",
			map_fd, libbpf_strerror_r(errno, msg, sizeof(msg)));
		return false;
	}

	return (map_info.type == map->def.type &&
		map_info.key_size == map->def.key_size &&
		map_info.value_size == map->def.value_size &&
		map_info.max_entries == map->def.max_entries &&
		map_info.map_flags == map->def.map_flags);
}

static int
bpf_object__reuse_map(struct bpf_map *map)
{
	char *cp, errmsg[STRERR_BUFSIZE];
	int err, pin_fd;

	pin_fd = bpf_obj_get(map->pin_path);
	if (pin_fd < 0) {
		err = -errno;
		if (err == -ENOENT) {
			pr_debug("found no pinned map to reuse at '%s'\n",
				 map->pin_path);
			return 0;
		}

		cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
		pr_warn("couldn't retrieve pinned map '%s': %s\n",
			map->pin_path, cp);
		return err;
	}

	if (!map_is_reuse_compat(map, pin_fd)) {
		pr_warn("couldn't reuse pinned map at '%s': parameter mismatch\n",
			map->pin_path);
		close(pin_fd);
		return -EINVAL;
	}

	err = bpf_map__reuse_fd(map, pin_fd);
	if (err) {
		close(pin_fd);
		return err;
	}
	map->pinned = true;
	pr_debug("reused pinned map at '%s'\n", map->pin_path);

	return 0;
}

2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263
static int
bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
{
	char *cp, errmsg[STRERR_BUFSIZE];
	int err, zero = 0;
	__u8 *data;

	/* Nothing to do here since kernel already zero-initializes .bss map. */
	if (map->libbpf_type == LIBBPF_MAP_BSS)
		return 0;

	data = map->libbpf_type == LIBBPF_MAP_DATA ?
	       obj->sections.data : obj->sections.rodata;

	err = bpf_map_update_elem(map->fd, &zero, data, 0);
	/* Freeze .rodata map as read-only from syscall side. */
	if (!err && map->libbpf_type == LIBBPF_MAP_RODATA) {
		err = bpf_map_freeze(map->fd);
		if (err) {
			cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
2264 2265
			pr_warn("Error freezing map(%s) as read-only: %s\n",
				map->name, cp);
2266 2267 2268 2269 2270 2271
			err = 0;
		}
	}
	return err;
}

2272 2273 2274
static int
bpf_object__create_maps(struct bpf_object *obj)
{
2275
	struct bpf_create_map_attr create_attr = {};
2276
	int nr_cpus = 0;
2277
	unsigned int i;
2278
	int err;
2279

2280
	for (i = 0; i < obj->nr_maps; i++) {
2281 2282
		struct bpf_map *map = &obj->maps[i];
		struct bpf_map_def *def = &map->def;
2283
		char *cp, errmsg[STRERR_BUFSIZE];
2284 2285
		int *pfd = &map->fd;

2286 2287 2288 2289 2290 2291 2292 2293 2294
		if (map->pin_path) {
			err = bpf_object__reuse_map(map);
			if (err) {
				pr_warn("error reusing pinned map %s\n",
					map->name);
				return err;
			}
		}

J
Jakub Kicinski 已提交
2295 2296 2297 2298 2299 2300
		if (map->fd >= 0) {
			pr_debug("skip map create (preset) %s: fd=%d\n",
				 map->name, map->fd);
			continue;
		}

2301 2302
		if (obj->caps.name)
			create_attr.name = map->name;
2303
		create_attr.map_ifindex = map->map_ifindex;
2304 2305 2306 2307
		create_attr.map_type = def->type;
		create_attr.map_flags = def->map_flags;
		create_attr.key_size = def->key_size;
		create_attr.value_size = def->value_size;
2308 2309 2310 2311 2312
		if (def->type == BPF_MAP_TYPE_PERF_EVENT_ARRAY &&
		    !def->max_entries) {
			if (!nr_cpus)
				nr_cpus = libbpf_num_possible_cpus();
			if (nr_cpus < 0) {
2313 2314
				pr_warn("failed to determine number of system CPUs: %d\n",
					nr_cpus);
2315 2316 2317 2318 2319 2320 2321 2322 2323
				err = nr_cpus;
				goto err_out;
			}
			pr_debug("map '%s': setting size to %d\n",
				 map->name, nr_cpus);
			create_attr.max_entries = nr_cpus;
		} else {
			create_attr.max_entries = def->max_entries;
		}
2324
		create_attr.btf_fd = 0;
2325 2326
		create_attr.btf_key_type_id = 0;
		create_attr.btf_value_type_id = 0;
2327 2328 2329
		if (bpf_map_type__is_map_in_map(def->type) &&
		    map->inner_map_fd >= 0)
			create_attr.inner_map_fd = map->inner_map_fd;
2330

2331
		if (obj->btf && !bpf_map_find_btf_info(obj, map)) {
2332
			create_attr.btf_fd = btf__fd(obj->btf);
2333 2334
			create_attr.btf_key_type_id = map->btf_key_type_id;
			create_attr.btf_value_type_id = map->btf_value_type_id;
2335 2336 2337
		}

		*pfd = bpf_create_map_xattr(&create_attr);
2338 2339
		if (*pfd < 0 && (create_attr.btf_key_type_id ||
				 create_attr.btf_value_type_id)) {
2340 2341
			err = -errno;
			cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
2342 2343
			pr_warn("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
				map->name, cp, err);
2344
			create_attr.btf_fd = 0;
2345 2346 2347 2348
			create_attr.btf_key_type_id = 0;
			create_attr.btf_value_type_id = 0;
			map->btf_key_type_id = 0;
			map->btf_value_type_id = 0;
2349 2350 2351
			*pfd = bpf_create_map_xattr(&create_attr);
		}

2352 2353 2354
		if (*pfd < 0) {
			size_t j;

2355
			err = -errno;
2356
err_out:
2357
			cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
2358 2359
			pr_warn("failed to create map (name: '%s'): %s(%d)\n",
				map->name, cp, err);
2360
			for (j = 0; j < i; j++)
2361
				zclose(obj->maps[j].fd);
2362 2363
			return err;
		}
2364 2365 2366 2367 2368 2369 2370 2371 2372

		if (bpf_map__is_internal(map)) {
			err = bpf_object__populate_internal_map(obj, map);
			if (err < 0) {
				zclose(*pfd);
				goto err_out;
			}
		}

2373 2374 2375 2376 2377 2378 2379 2380 2381
		if (map->pin_path && !map->pinned) {
			err = bpf_map__pin(map, NULL);
			if (err) {
				pr_warn("failed to auto-pin map name '%s' at '%s'\n",
					map->name, map->pin_path);
				return err;
			}
		}

2382
		pr_debug("created map %s: fd=%d\n", map->name, *pfd);
2383 2384 2385 2386 2387
	}

	return 0;
}

2388 2389 2390 2391 2392
static int
check_btf_ext_reloc_err(struct bpf_program *prog, int err,
			void *btf_prog_info, const char *info_name)
{
	if (err != -ENOENT) {
2393 2394
		pr_warn("Error in loading %s for sec %s.\n",
			info_name, prog->section_name);
2395 2396 2397 2398 2399 2400 2401 2402
		return err;
	}

	/* err == -ENOENT (i.e. prog->section_name not found in btf_ext) */

	if (btf_prog_info) {
		/*
		 * Some info has already been found but has problem
2403
		 * in the last btf_ext reloc. Must have to error out.
2404
		 */
2405 2406
		pr_warn("Error in relocating %s for sec %s.\n",
			info_name, prog->section_name);
2407 2408 2409
		return err;
	}

2410
	/* Have problem loading the very first info. Ignore the rest. */
2411 2412
	pr_warn("Cannot find %s for main program sec %s. Ignore all %s.\n",
		info_name, prog->section_name, info_name);
2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440
	return 0;
}

static int
bpf_program_reloc_btf_ext(struct bpf_program *prog, struct bpf_object *obj,
			  const char *section_name,  __u32 insn_offset)
{
	int err;

	if (!insn_offset || prog->func_info) {
		/*
		 * !insn_offset => main program
		 *
		 * For sub prog, the main program's func_info has to
		 * be loaded first (i.e. prog->func_info != NULL)
		 */
		err = btf_ext__reloc_func_info(obj->btf, obj->btf_ext,
					       section_name, insn_offset,
					       &prog->func_info,
					       &prog->func_info_cnt);
		if (err)
			return check_btf_ext_reloc_err(prog, err,
						       prog->func_info,
						       "bpf_func_info");

		prog->func_info_rec_size = btf_ext__func_info_rec_size(obj->btf_ext);
	}

2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453
	if (!insn_offset || prog->line_info) {
		err = btf_ext__reloc_line_info(obj->btf, obj->btf_ext,
					       section_name, insn_offset,
					       &prog->line_info,
					       &prog->line_info_cnt);
		if (err)
			return check_btf_ext_reloc_err(prog, err,
						       prog->line_info,
						       "bpf_line_info");

		prog->line_info_rec_size = btf_ext__line_info_rec_size(obj->btf_ext);
	}

2454 2455 2456
	return 0;
}

2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475
#define BPF_CORE_SPEC_MAX_LEN 64

/* represents BPF CO-RE field or array element accessor */
struct bpf_core_accessor {
	__u32 type_id;		/* struct/union type or array element type */
	__u32 idx;		/* field index or array index */
	const char *name;	/* field name or NULL for array accessor */
};

struct bpf_core_spec {
	const struct btf *btf;
	/* high-level spec: named fields and array indices only */
	struct bpf_core_accessor spec[BPF_CORE_SPEC_MAX_LEN];
	/* high-level spec length */
	int len;
	/* raw, low-level spec: 1-to-1 with accessor spec string */
	int raw_spec[BPF_CORE_SPEC_MAX_LEN];
	/* raw spec length */
	int raw_len;
2476 2477
	/* field bit offset represented by spec */
	__u32 bit_offset;
2478 2479 2480 2481 2482 2483 2484 2485
};

static bool str_is_empty(const char *s)
{
	return !s || !s[0];
}

/*
2486
 * Turn bpf_field_reloc into a low- and high-level spec representation,
2487
 * validating correctness along the way, as well as calculating resulting
2488 2489
 * field bit offset, specified by accessor string. Low-level spec captures
 * every single level of nestedness, including traversing anonymous
2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560
 * struct/union members. High-level one only captures semantically meaningful
 * "turning points": named fields and array indicies.
 * E.g., for this case:
 *
 *   struct sample {
 *       int __unimportant;
 *       struct {
 *           int __1;
 *           int __2;
 *           int a[7];
 *       };
 *   };
 *
 *   struct sample *s = ...;
 *
 *   int x = &s->a[3]; // access string = '0:1:2:3'
 *
 * Low-level spec has 1:1 mapping with each element of access string (it's
 * just a parsed access string representation): [0, 1, 2, 3].
 *
 * High-level spec will capture only 3 points:
 *   - intial zero-index access by pointer (&s->... is the same as &s[0]...);
 *   - field 'a' access (corresponds to '2' in low-level spec);
 *   - array element #3 access (corresponds to '3' in low-level spec).
 *
 */
static int bpf_core_spec_parse(const struct btf *btf,
			       __u32 type_id,
			       const char *spec_str,
			       struct bpf_core_spec *spec)
{
	int access_idx, parsed_len, i;
	const struct btf_type *t;
	const char *name;
	__u32 id;
	__s64 sz;

	if (str_is_empty(spec_str) || *spec_str == ':')
		return -EINVAL;

	memset(spec, 0, sizeof(*spec));
	spec->btf = btf;

	/* parse spec_str="0:1:2:3:4" into array raw_spec=[0, 1, 2, 3, 4] */
	while (*spec_str) {
		if (*spec_str == ':')
			++spec_str;
		if (sscanf(spec_str, "%d%n", &access_idx, &parsed_len) != 1)
			return -EINVAL;
		if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
			return -E2BIG;
		spec_str += parsed_len;
		spec->raw_spec[spec->raw_len++] = access_idx;
	}

	if (spec->raw_len == 0)
		return -EINVAL;

	/* first spec value is always reloc type array index */
	t = skip_mods_and_typedefs(btf, type_id, &id);
	if (!t)
		return -EINVAL;

	access_idx = spec->raw_spec[0];
	spec->spec[0].type_id = id;
	spec->spec[0].idx = access_idx;
	spec->len++;

	sz = btf__resolve_size(btf, id);
	if (sz < 0)
		return sz;
2561
	spec->bit_offset = access_idx * sz * 8;
2562 2563 2564 2565 2566 2567 2568 2569 2570 2571

	for (i = 1; i < spec->raw_len; i++) {
		t = skip_mods_and_typedefs(btf, id, &id);
		if (!t)
			return -EINVAL;

		access_idx = spec->raw_spec[i];

		if (btf_is_composite(t)) {
			const struct btf_member *m;
2572
			__u32 bit_offset;
2573 2574 2575 2576

			if (access_idx >= btf_vlen(t))
				return -EINVAL;

2577 2578
			bit_offset = btf_member_bit_offset(t, access_idx);
			spec->bit_offset += bit_offset;
2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606

			m = btf_members(t) + access_idx;
			if (m->name_off) {
				name = btf__name_by_offset(btf, m->name_off);
				if (str_is_empty(name))
					return -EINVAL;

				spec->spec[spec->len].type_id = id;
				spec->spec[spec->len].idx = access_idx;
				spec->spec[spec->len].name = name;
				spec->len++;
			}

			id = m->type;
		} else if (btf_is_array(t)) {
			const struct btf_array *a = btf_array(t);

			t = skip_mods_and_typedefs(btf, a->type, &id);
			if (!t || access_idx >= a->nelems)
				return -EINVAL;

			spec->spec[spec->len].type_id = id;
			spec->spec[spec->len].idx = access_idx;
			spec->len++;

			sz = btf__resolve_size(btf, id);
			if (sz < 0)
				return sz;
2607
			spec->bit_offset += access_idx * sz * 8;
2608
		} else {
2609 2610
			pr_warn("relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %d\n",
				type_id, spec_str, i, id, btf_kind(t));
2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707
			return -EINVAL;
		}
	}

	return 0;
}

static bool bpf_core_is_flavor_sep(const char *s)
{
	/* check X___Y name pattern, where X and Y are not underscores */
	return s[0] != '_' &&				      /* X */
	       s[1] == '_' && s[2] == '_' && s[3] == '_' &&   /* ___ */
	       s[4] != '_';				      /* Y */
}

/* Given 'some_struct_name___with_flavor' return the length of a name prefix
 * before last triple underscore. Struct name part after last triple
 * underscore is ignored by BPF CO-RE relocation during relocation matching.
 */
static size_t bpf_core_essential_name_len(const char *name)
{
	size_t n = strlen(name);
	int i;

	for (i = n - 5; i >= 0; i--) {
		if (bpf_core_is_flavor_sep(name + i))
			return i + 1;
	}
	return n;
}

/* dynamically sized list of type IDs */
struct ids_vec {
	__u32 *data;
	int len;
};

static void bpf_core_free_cands(struct ids_vec *cand_ids)
{
	free(cand_ids->data);
	free(cand_ids);
}

static struct ids_vec *bpf_core_find_cands(const struct btf *local_btf,
					   __u32 local_type_id,
					   const struct btf *targ_btf)
{
	size_t local_essent_len, targ_essent_len;
	const char *local_name, *targ_name;
	const struct btf_type *t;
	struct ids_vec *cand_ids;
	__u32 *new_ids;
	int i, err, n;

	t = btf__type_by_id(local_btf, local_type_id);
	if (!t)
		return ERR_PTR(-EINVAL);

	local_name = btf__name_by_offset(local_btf, t->name_off);
	if (str_is_empty(local_name))
		return ERR_PTR(-EINVAL);
	local_essent_len = bpf_core_essential_name_len(local_name);

	cand_ids = calloc(1, sizeof(*cand_ids));
	if (!cand_ids)
		return ERR_PTR(-ENOMEM);

	n = btf__get_nr_types(targ_btf);
	for (i = 1; i <= n; i++) {
		t = btf__type_by_id(targ_btf, i);
		targ_name = btf__name_by_offset(targ_btf, t->name_off);
		if (str_is_empty(targ_name))
			continue;

		targ_essent_len = bpf_core_essential_name_len(targ_name);
		if (targ_essent_len != local_essent_len)
			continue;

		if (strncmp(local_name, targ_name, local_essent_len) == 0) {
			pr_debug("[%d] %s: found candidate [%d] %s\n",
				 local_type_id, local_name, i, targ_name);
			new_ids = realloc(cand_ids->data, cand_ids->len + 1);
			if (!new_ids) {
				err = -ENOMEM;
				goto err_out;
			}
			cand_ids->data = new_ids;
			cand_ids->data[cand_ids->len++] = i;
		}
	}
	return cand_ids;
err_out:
	bpf_core_free_cands(cand_ids);
	return ERR_PTR(err);
}

/* Check two types for compatibility, skipping const/volatile/restrict and
2708
 * typedefs, to ensure we are relocating compatible entities:
2709
 *   - any two STRUCTs/UNIONs are compatible and can be mixed;
2710
 *   - any two FWDs are compatible, if their names match (modulo flavor suffix);
2711
 *   - any two PTRs are always compatible;
2712 2713
 *   - for ENUMs, names should be the same (ignoring flavor suffix) or at
 *     least one of enums should be anonymous;
2714
 *   - for ENUMs, check sizes, names are ignored;
2715
 *   - for INT, size and signedness are ignored;
2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742
 *   - for ARRAY, dimensionality is ignored, element types are checked for
 *     compatibility recursively;
 *   - everything else shouldn't be ever a target of relocation.
 * These rules are not set in stone and probably will be adjusted as we get
 * more experience with using BPF CO-RE relocations.
 */
static int bpf_core_fields_are_compat(const struct btf *local_btf,
				      __u32 local_id,
				      const struct btf *targ_btf,
				      __u32 targ_id)
{
	const struct btf_type *local_type, *targ_type;

recur:
	local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
	targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
	if (!local_type || !targ_type)
		return -EINVAL;

	if (btf_is_composite(local_type) && btf_is_composite(targ_type))
		return 1;
	if (btf_kind(local_type) != btf_kind(targ_type))
		return 0;

	switch (btf_kind(local_type)) {
	case BTF_KIND_PTR:
		return 1;
2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757
	case BTF_KIND_FWD:
	case BTF_KIND_ENUM: {
		const char *local_name, *targ_name;
		size_t local_len, targ_len;

		local_name = btf__name_by_offset(local_btf,
						 local_type->name_off);
		targ_name = btf__name_by_offset(targ_btf, targ_type->name_off);
		local_len = bpf_core_essential_name_len(local_name);
		targ_len = bpf_core_essential_name_len(targ_name);
		/* one of them is anonymous or both w/ same flavor-less names */
		return local_len == 0 || targ_len == 0 ||
		       (local_len == targ_len &&
			strncmp(local_name, targ_name, local_len) == 0);
	}
2758
	case BTF_KIND_INT:
2759 2760 2761
		/* just reject deprecated bitfield-like integers; all other
		 * integers are by default compatible between each other
		 */
2762
		return btf_int_offset(local_type) == 0 &&
2763
		       btf_int_offset(targ_type) == 0;
2764 2765 2766 2767 2768
	case BTF_KIND_ARRAY:
		local_id = btf_array(local_type)->type;
		targ_id = btf_array(targ_type)->type;
		goto recur;
	default:
2769 2770
		pr_warn("unexpected kind %d relocated, local [%d], target [%d]\n",
			btf_kind(local_type), local_id, targ_id);
2771 2772 2773 2774 2775 2776 2777 2778
		return 0;
	}
}

/*
 * Given single high-level named field accessor in local type, find
 * corresponding high-level accessor for a target type. Along the way,
 * maintain low-level spec for target as well. Also keep updating target
2779
 * bit offset.
2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817
 *
 * Searching is performed through recursive exhaustive enumeration of all
 * fields of a struct/union. If there are any anonymous (embedded)
 * structs/unions, they are recursively searched as well. If field with
 * desired name is found, check compatibility between local and target types,
 * before returning result.
 *
 * 1 is returned, if field is found.
 * 0 is returned if no compatible field is found.
 * <0 is returned on error.
 */
static int bpf_core_match_member(const struct btf *local_btf,
				 const struct bpf_core_accessor *local_acc,
				 const struct btf *targ_btf,
				 __u32 targ_id,
				 struct bpf_core_spec *spec,
				 __u32 *next_targ_id)
{
	const struct btf_type *local_type, *targ_type;
	const struct btf_member *local_member, *m;
	const char *local_name, *targ_name;
	__u32 local_id;
	int i, n, found;

	targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
	if (!targ_type)
		return -EINVAL;
	if (!btf_is_composite(targ_type))
		return 0;

	local_id = local_acc->type_id;
	local_type = btf__type_by_id(local_btf, local_id);
	local_member = btf_members(local_type) + local_acc->idx;
	local_name = btf__name_by_offset(local_btf, local_member->name_off);

	n = btf_vlen(targ_type);
	m = btf_members(targ_type);
	for (i = 0; i < n; i++, m++) {
2818
		__u32 bit_offset;
2819

2820
		bit_offset = btf_member_bit_offset(targ_type, i);
2821 2822 2823 2824 2825 2826

		/* too deep struct/union/array nesting */
		if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
			return -E2BIG;

		/* speculate this member will be the good one */
2827
		spec->bit_offset += bit_offset;
2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855
		spec->raw_spec[spec->raw_len++] = i;

		targ_name = btf__name_by_offset(targ_btf, m->name_off);
		if (str_is_empty(targ_name)) {
			/* embedded struct/union, we need to go deeper */
			found = bpf_core_match_member(local_btf, local_acc,
						      targ_btf, m->type,
						      spec, next_targ_id);
			if (found) /* either found or error */
				return found;
		} else if (strcmp(local_name, targ_name) == 0) {
			/* matching named field */
			struct bpf_core_accessor *targ_acc;

			targ_acc = &spec->spec[spec->len++];
			targ_acc->type_id = targ_id;
			targ_acc->idx = i;
			targ_acc->name = targ_name;

			*next_targ_id = m->type;
			found = bpf_core_fields_are_compat(local_btf,
							   local_member->type,
							   targ_btf, m->type);
			if (!found)
				spec->len--; /* pop accessor */
			return found;
		}
		/* member turned out not to be what we looked for */
2856
		spec->bit_offset -= bit_offset;
2857 2858 2859 2860 2861 2862 2863 2864
		spec->raw_len--;
	}

	return 0;
}

/*
 * Try to match local spec to a target type and, if successful, produce full
2865
 * target spec (high-level, low-level + bit offset).
2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927
 */
static int bpf_core_spec_match(struct bpf_core_spec *local_spec,
			       const struct btf *targ_btf, __u32 targ_id,
			       struct bpf_core_spec *targ_spec)
{
	const struct btf_type *targ_type;
	const struct bpf_core_accessor *local_acc;
	struct bpf_core_accessor *targ_acc;
	int i, sz, matched;

	memset(targ_spec, 0, sizeof(*targ_spec));
	targ_spec->btf = targ_btf;

	local_acc = &local_spec->spec[0];
	targ_acc = &targ_spec->spec[0];

	for (i = 0; i < local_spec->len; i++, local_acc++, targ_acc++) {
		targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id,
						   &targ_id);
		if (!targ_type)
			return -EINVAL;

		if (local_acc->name) {
			matched = bpf_core_match_member(local_spec->btf,
							local_acc,
							targ_btf, targ_id,
							targ_spec, &targ_id);
			if (matched <= 0)
				return matched;
		} else {
			/* for i=0, targ_id is already treated as array element
			 * type (because it's the original struct), for others
			 * we should find array element type first
			 */
			if (i > 0) {
				const struct btf_array *a;

				if (!btf_is_array(targ_type))
					return 0;

				a = btf_array(targ_type);
				if (local_acc->idx >= a->nelems)
					return 0;
				if (!skip_mods_and_typedefs(targ_btf, a->type,
							    &targ_id))
					return -EINVAL;
			}

			/* too deep struct/union/array nesting */
			if (targ_spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
				return -E2BIG;

			targ_acc->type_id = targ_id;
			targ_acc->idx = local_acc->idx;
			targ_acc->name = NULL;
			targ_spec->len++;
			targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx;
			targ_spec->raw_len++;

			sz = btf__resolve_size(targ_btf, targ_id);
			if (sz < 0)
				return sz;
2928
			targ_spec->bit_offset += local_acc->idx * sz * 8;
2929 2930 2931 2932 2933 2934
		}
	}

	return 1;
}

2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945
static int bpf_core_calc_field_relo(const struct bpf_program *prog,
				    const struct bpf_field_reloc *relo,
				    const struct bpf_core_spec *spec,
				    __u32 *val, bool *validate)
{
	const struct bpf_core_accessor *acc = &spec->spec[spec->len - 1];
	const struct btf_type *t = btf__type_by_id(spec->btf, acc->type_id);
	__u32 byte_off, byte_sz, bit_off, bit_sz;
	const struct btf_member *m;
	const struct btf_type *mt;
	bool bitfield;
2946
	__s64 sz;
2947 2948 2949

	/* a[n] accessor needs special handling */
	if (!acc->name) {
2950 2951 2952 2953 2954 2955 2956 2957 2958
		if (relo->kind == BPF_FIELD_BYTE_OFFSET) {
			*val = spec->bit_offset / 8;
		} else if (relo->kind == BPF_FIELD_BYTE_SIZE) {
			sz = btf__resolve_size(spec->btf, acc->type_id);
			if (sz < 0)
				return -EINVAL;
			*val = sz;
		} else {
			pr_warn("prog '%s': relo %d at insn #%d can't be applied to array access\n",
2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989
				bpf_program__title(prog, false),
				relo->kind, relo->insn_off / 8);
			return -EINVAL;
		}
		if (validate)
			*validate = true;
		return 0;
	}

	m = btf_members(t) + acc->idx;
	mt = skip_mods_and_typedefs(spec->btf, m->type, NULL);
	bit_off = spec->bit_offset;
	bit_sz = btf_member_bitfield_size(t, acc->idx);

	bitfield = bit_sz > 0;
	if (bitfield) {
		byte_sz = mt->size;
		byte_off = bit_off / 8 / byte_sz * byte_sz;
		/* figure out smallest int size necessary for bitfield load */
		while (bit_off + bit_sz - byte_off * 8 > byte_sz * 8) {
			if (byte_sz >= 8) {
				/* bitfield can't be read with 64-bit read */
				pr_warn("prog '%s': relo %d at insn #%d can't be satisfied for bitfield\n",
					bpf_program__title(prog, false),
					relo->kind, relo->insn_off / 8);
				return -E2BIG;
			}
			byte_sz *= 2;
			byte_off = bit_off / 8 / byte_sz * byte_sz;
		}
	} else {
2990 2991 2992 2993
		sz = btf__resolve_size(spec->btf, m->type);
		if (sz < 0)
			return -EINVAL;
		byte_sz = sz;
2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041
		byte_off = spec->bit_offset / 8;
		bit_sz = byte_sz * 8;
	}

	/* for bitfields, all the relocatable aspects are ambiguous and we
	 * might disagree with compiler, so turn off validation of expected
	 * value, except for signedness
	 */
	if (validate)
		*validate = !bitfield;

	switch (relo->kind) {
	case BPF_FIELD_BYTE_OFFSET:
		*val = byte_off;
		break;
	case BPF_FIELD_BYTE_SIZE:
		*val = byte_sz;
		break;
	case BPF_FIELD_SIGNED:
		/* enums will be assumed unsigned */
		*val = btf_is_enum(mt) ||
		       (btf_int_encoding(mt) & BTF_INT_SIGNED);
		if (validate)
			*validate = true; /* signedness is never ambiguous */
		break;
	case BPF_FIELD_LSHIFT_U64:
#if __BYTE_ORDER == __LITTLE_ENDIAN
		*val = 64 - (bit_off + bit_sz - byte_off  * 8);
#else
		*val = (8 - byte_sz) * 8 + (bit_off - byte_off * 8);
#endif
		break;
	case BPF_FIELD_RSHIFT_U64:
		*val = 64 - bit_sz;
		if (validate)
			*validate = true; /* right shift is never ambiguous */
		break;
	case BPF_FIELD_EXISTS:
	default:
		pr_warn("prog '%s': unknown relo %d at insn #%d\n",
			bpf_program__title(prog, false),
			relo->kind, relo->insn_off / 8);
		return -EINVAL;
	}

	return 0;
}

3042 3043
/*
 * Patch relocatable BPF instruction.
3044 3045 3046 3047 3048 3049 3050
 *
 * Patched value is determined by relocation kind and target specification.
 * For field existence relocation target spec will be NULL if field is not
 * found.
 * Expected insn->imm value is determined using relocation kind and local
 * spec, and is checked before patching instruction. If actual insn->imm value
 * is wrong, bail out with error.
3051 3052 3053 3054 3055
 *
 * Currently three kinds of BPF instructions are supported:
 * 1. rX = <imm> (assignment with immediate operand);
 * 2. rX += <imm> (arithmetic operations with immediate operand);
 */
3056 3057 3058 3059
static int bpf_core_reloc_insn(struct bpf_program *prog,
			       const struct bpf_field_reloc *relo,
			       const struct bpf_core_spec *local_spec,
			       const struct bpf_core_spec *targ_spec)
3060
{
3061
	bool failed = false, validate = true;
3062
	__u32 orig_val, new_val;
3063
	struct bpf_insn *insn;
3064
	int insn_idx, err;
3065 3066
	__u8 class;

3067 3068 3069 3070
	if (relo->insn_off % sizeof(struct bpf_insn))
		return -EINVAL;
	insn_idx = relo->insn_off / sizeof(struct bpf_insn);

3071
	if (relo->kind == BPF_FIELD_EXISTS) {
3072 3073
		orig_val = 1; /* can't generate EXISTS relo w/o local field */
		new_val = targ_spec ? 1 : 0;
3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085
	} else if (!targ_spec) {
		failed = true;
		new_val = (__u32)-1;
	} else {
		err = bpf_core_calc_field_relo(prog, relo, local_spec,
					       &orig_val, &validate);
		if (err)
			return err;
		err = bpf_core_calc_field_relo(prog, relo, targ_spec,
					       &new_val, NULL);
		if (err)
			return err;
3086
	}
3087 3088 3089 3090 3091 3092 3093

	insn = &prog->insns[insn_idx];
	class = BPF_CLASS(insn->code);

	if (class == BPF_ALU || class == BPF_ALU64) {
		if (BPF_SRC(insn->code) != BPF_K)
			return -EINVAL;
3094 3095 3096 3097
		if (!failed && validate && insn->imm != orig_val) {
			pr_warn("prog '%s': unexpected insn #%d value: got %u, exp %u -> %u\n",
				bpf_program__title(prog, false), insn_idx,
				insn->imm, orig_val, new_val);
3098
			return -EINVAL;
3099 3100
		}
		orig_val = insn->imm;
3101
		insn->imm = new_val;
3102 3103 3104
		pr_debug("prog '%s': patched insn #%d (ALU/ALU64)%s imm %u -> %u\n",
			 bpf_program__title(prog, false), insn_idx,
			 failed ? " w/ failed reloc" : "", orig_val, new_val);
3105
	} else {
3106 3107 3108 3109
		pr_warn("prog '%s': trying to relocate unrecognized insn #%d, code:%x, src:%x, dst:%x, off:%x, imm:%x\n",
			bpf_program__title(prog, false),
			insn_idx, insn->code, insn->src_reg, insn->dst_reg,
			insn->off, insn->imm);
3110 3111
		return -EINVAL;
	}
3112

3113 3114 3115
	return 0;
}

3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150
static struct btf *btf_load_raw(const char *path)
{
	struct btf *btf;
	size_t read_cnt;
	struct stat st;
	void *data;
	FILE *f;

	if (stat(path, &st))
		return ERR_PTR(-errno);

	data = malloc(st.st_size);
	if (!data)
		return ERR_PTR(-ENOMEM);

	f = fopen(path, "rb");
	if (!f) {
		btf = ERR_PTR(-errno);
		goto cleanup;
	}

	read_cnt = fread(data, 1, st.st_size, f);
	fclose(f);
	if (read_cnt < st.st_size) {
		btf = ERR_PTR(-EBADF);
		goto cleanup;
	}

	btf = btf__new(data, read_cnt);

cleanup:
	free(data);
	return btf;
}

3151 3152 3153 3154 3155 3156
/*
 * Probe few well-known locations for vmlinux kernel image and try to load BTF
 * data out of it to use for target BTF.
 */
static struct btf *bpf_core_find_kernel_btf(void)
{
3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170
	struct {
		const char *path_fmt;
		bool raw_btf;
	} locations[] = {
		/* try canonical vmlinux BTF through sysfs first */
		{ "/sys/kernel/btf/vmlinux", true /* raw BTF */ },
		/* fall back to trying to find vmlinux ELF on disk otherwise */
		{ "/boot/vmlinux-%1$s" },
		{ "/lib/modules/%1$s/vmlinux-%1$s" },
		{ "/lib/modules/%1$s/build/vmlinux" },
		{ "/usr/lib/modules/%1$s/kernel/vmlinux" },
		{ "/usr/lib/debug/boot/vmlinux-%1$s" },
		{ "/usr/lib/debug/boot/vmlinux-%1$s.debug" },
		{ "/usr/lib/debug/lib/modules/%1$s/vmlinux" },
3171 3172 3173 3174 3175 3176 3177 3178 3179
	};
	char path[PATH_MAX + 1];
	struct utsname buf;
	struct btf *btf;
	int i;

	uname(&buf);

	for (i = 0; i < ARRAY_SIZE(locations); i++) {
3180
		snprintf(path, PATH_MAX, locations[i].path_fmt, buf.release);
3181 3182 3183 3184

		if (access(path, R_OK))
			continue;

3185 3186 3187 3188 3189 3190 3191
		if (locations[i].raw_btf)
			btf = btf_load_raw(path);
		else
			btf = btf__parse_elf(path, NULL);

		pr_debug("loading kernel BTF '%s': %ld\n",
			 path, IS_ERR(btf) ? PTR_ERR(btf) : 0);
3192 3193 3194 3195 3196 3197
		if (IS_ERR(btf))
			continue;

		return btf;
	}

3198
	pr_warn("failed to find valid kernel BTF\n");
3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221
	return ERR_PTR(-ESRCH);
}

/* Output spec definition in the format:
 * [<type-id>] (<type-name>) + <raw-spec> => <offset>@<spec>,
 * where <spec> is a C-syntax view of recorded field access, e.g.: x.a[3].b
 */
static void bpf_core_dump_spec(int level, const struct bpf_core_spec *spec)
{
	const struct btf_type *t;
	const char *s;
	__u32 type_id;
	int i;

	type_id = spec->spec[0].type_id;
	t = btf__type_by_id(spec->btf, type_id);
	s = btf__name_by_offset(spec->btf, t->name_off);
	libbpf_print(level, "[%u] %s + ", type_id, s);

	for (i = 0; i < spec->raw_len; i++)
		libbpf_print(level, "%d%s", spec->raw_spec[i],
			     i == spec->raw_len - 1 ? " => " : ":");

3222 3223
	libbpf_print(level, "%u.%u @ &x",
		     spec->bit_offset / 8, spec->bit_offset % 8);
3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279

	for (i = 0; i < spec->len; i++) {
		if (spec->spec[i].name)
			libbpf_print(level, ".%s", spec->spec[i].name);
		else
			libbpf_print(level, "[%u]", spec->spec[i].idx);
	}

}

static size_t bpf_core_hash_fn(const void *key, void *ctx)
{
	return (size_t)key;
}

static bool bpf_core_equal_fn(const void *k1, const void *k2, void *ctx)
{
	return k1 == k2;
}

static void *u32_as_hash_key(__u32 x)
{
	return (void *)(uintptr_t)x;
}

/*
 * CO-RE relocate single instruction.
 *
 * The outline and important points of the algorithm:
 * 1. For given local type, find corresponding candidate target types.
 *    Candidate type is a type with the same "essential" name, ignoring
 *    everything after last triple underscore (___). E.g., `sample`,
 *    `sample___flavor_one`, `sample___flavor_another_one`, are all candidates
 *    for each other. Names with triple underscore are referred to as
 *    "flavors" and are useful, among other things, to allow to
 *    specify/support incompatible variations of the same kernel struct, which
 *    might differ between different kernel versions and/or build
 *    configurations.
 *
 *    N.B. Struct "flavors" could be generated by bpftool's BTF-to-C
 *    converter, when deduplicated BTF of a kernel still contains more than
 *    one different types with the same name. In that case, ___2, ___3, etc
 *    are appended starting from second name conflict. But start flavors are
 *    also useful to be defined "locally", in BPF program, to extract same
 *    data from incompatible changes between different kernel
 *    versions/configurations. For instance, to handle field renames between
 *    kernel versions, one can use two flavors of the struct name with the
 *    same common name and use conditional relocations to extract that field,
 *    depending on target kernel version.
 * 2. For each candidate type, try to match local specification to this
 *    candidate target type. Matching involves finding corresponding
 *    high-level spec accessors, meaning that all named fields should match,
 *    as well as all array accesses should be within the actual bounds. Also,
 *    types should be compatible (see bpf_core_fields_are_compat for details).
 * 3. It is supported and expected that there might be multiple flavors
 *    matching the spec. As long as all the specs resolve to the same set of
3280
 *    offsets across all candidates, there is no error. If there is any
3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294
 *    ambiguity, CO-RE relocation will fail. This is necessary to accomodate
 *    imprefection of BTF deduplication, which can cause slight duplication of
 *    the same BTF type, if some directly or indirectly referenced (by
 *    pointer) type gets resolved to different actual types in different
 *    object files. If such situation occurs, deduplicated BTF will end up
 *    with two (or more) structurally identical types, which differ only in
 *    types they refer to through pointer. This should be OK in most cases and
 *    is not an error.
 * 4. Candidate types search is performed by linearly scanning through all
 *    types in target BTF. It is anticipated that this is overall more
 *    efficient memory-wise and not significantly worse (if not better)
 *    CPU-wise compared to prebuilding a map from all local type names to
 *    a list of candidate type names. It's also sped up by caching resolved
 *    list of matching candidates per each local "root" type ID, that has at
3295
 *    least one bpf_field_reloc associated with it. This list is shared
3296 3297 3298
 *    between multiple relocations for the same type ID and is updated as some
 *    of the candidates are pruned due to structural incompatibility.
 */
3299 3300
static int bpf_core_reloc_field(struct bpf_program *prog,
				 const struct bpf_field_reloc *relo,
3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330
				 int relo_idx,
				 const struct btf *local_btf,
				 const struct btf *targ_btf,
				 struct hashmap *cand_cache)
{
	const char *prog_name = bpf_program__title(prog, false);
	struct bpf_core_spec local_spec, cand_spec, targ_spec;
	const void *type_key = u32_as_hash_key(relo->type_id);
	const struct btf_type *local_type, *cand_type;
	const char *local_name, *cand_name;
	struct ids_vec *cand_ids;
	__u32 local_id, cand_id;
	const char *spec_str;
	int i, j, err;

	local_id = relo->type_id;
	local_type = btf__type_by_id(local_btf, local_id);
	if (!local_type)
		return -EINVAL;

	local_name = btf__name_by_offset(local_btf, local_type->name_off);
	if (str_is_empty(local_name))
		return -EINVAL;

	spec_str = btf__name_by_offset(local_btf, relo->access_str_off);
	if (str_is_empty(spec_str))
		return -EINVAL;

	err = bpf_core_spec_parse(local_btf, local_id, spec_str, &local_spec);
	if (err) {
3331 3332 3333
		pr_warn("prog '%s': relo #%d: parsing [%d] %s + %s failed: %d\n",
			prog_name, relo_idx, local_id, local_name, spec_str,
			err);
3334 3335 3336
		return -EINVAL;
	}

3337 3338
	pr_debug("prog '%s': relo #%d: kind %d, spec is ", prog_name, relo_idx,
		 relo->kind);
3339 3340 3341 3342 3343 3344
	bpf_core_dump_spec(LIBBPF_DEBUG, &local_spec);
	libbpf_print(LIBBPF_DEBUG, "\n");

	if (!hashmap__find(cand_cache, type_key, (void **)&cand_ids)) {
		cand_ids = bpf_core_find_cands(local_btf, local_id, targ_btf);
		if (IS_ERR(cand_ids)) {
3345 3346 3347
			pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s: %ld",
				prog_name, relo_idx, local_id, local_name,
				PTR_ERR(cand_ids));
3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368
			return PTR_ERR(cand_ids);
		}
		err = hashmap__set(cand_cache, type_key, cand_ids, NULL, NULL);
		if (err) {
			bpf_core_free_cands(cand_ids);
			return err;
		}
	}

	for (i = 0, j = 0; i < cand_ids->len; i++) {
		cand_id = cand_ids->data[i];
		cand_type = btf__type_by_id(targ_btf, cand_id);
		cand_name = btf__name_by_offset(targ_btf, cand_type->name_off);

		err = bpf_core_spec_match(&local_spec, targ_btf,
					  cand_id, &cand_spec);
		pr_debug("prog '%s': relo #%d: matching candidate #%d %s against spec ",
			 prog_name, relo_idx, i, cand_name);
		bpf_core_dump_spec(LIBBPF_DEBUG, &cand_spec);
		libbpf_print(LIBBPF_DEBUG, ": %d\n", err);
		if (err < 0) {
3369 3370
			pr_warn("prog '%s': relo #%d: matching error: %d\n",
				prog_name, relo_idx, err);
3371 3372 3373 3374 3375 3376 3377
			return err;
		}
		if (err == 0)
			continue;

		if (j == 0) {
			targ_spec = cand_spec;
3378
		} else if (cand_spec.bit_offset != targ_spec.bit_offset) {
3379
			/* if there are many candidates, they should all
3380
			 * resolve to the same bit offset
3381
			 */
3382
			pr_warn("prog '%s': relo #%d: offset ambiguity: %u != %u\n",
3383 3384
				prog_name, relo_idx, cand_spec.bit_offset,
				targ_spec.bit_offset);
3385 3386 3387 3388 3389 3390
			return -EINVAL;
		}

		cand_ids->data[j++] = cand_spec.spec[0].type_id;
	}

3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402
	/*
	 * For BPF_FIELD_EXISTS relo or when relaxed CO-RE reloc mode is
	 * requested, it's expected that we might not find any candidates.
	 * In this case, if field wasn't found in any candidate, the list of
	 * candidates shouldn't change at all, we'll just handle relocating
	 * appropriately, depending on relo's kind.
	 */
	if (j > 0)
		cand_ids->len = j;

	if (j == 0 && !prog->obj->relaxed_core_relocs &&
	    relo->kind != BPF_FIELD_EXISTS) {
3403 3404
		pr_warn("prog '%s': relo #%d: no matching targets found for [%d] %s + %s\n",
			prog_name, relo_idx, local_id, local_name, spec_str);
3405 3406 3407
		return -ESRCH;
	}

3408 3409 3410
	/* bpf_core_reloc_insn should know how to handle missing targ_spec */
	err = bpf_core_reloc_insn(prog, relo, &local_spec,
				  j ? &targ_spec : NULL);
3411
	if (err) {
3412 3413
		pr_warn("prog '%s': relo #%d: failed to patch insn at offset %d: %d\n",
			prog_name, relo_idx, relo->insn_off, err);
3414 3415 3416 3417 3418 3419 3420
		return -EINVAL;
	}

	return 0;
}

static int
3421
bpf_core_reloc_fields(struct bpf_object *obj, const char *targ_btf_path)
3422 3423
{
	const struct btf_ext_info_sec *sec;
3424
	const struct bpf_field_reloc *rec;
3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437
	const struct btf_ext_info *seg;
	struct hashmap_entry *entry;
	struct hashmap *cand_cache = NULL;
	struct bpf_program *prog;
	struct btf *targ_btf;
	const char *sec_name;
	int i, err = 0;

	if (targ_btf_path)
		targ_btf = btf__parse_elf(targ_btf_path, NULL);
	else
		targ_btf = bpf_core_find_kernel_btf();
	if (IS_ERR(targ_btf)) {
3438
		pr_warn("failed to get target BTF: %ld\n", PTR_ERR(targ_btf));
3439 3440 3441 3442 3443 3444 3445 3446 3447
		return PTR_ERR(targ_btf);
	}

	cand_cache = hashmap__new(bpf_core_hash_fn, bpf_core_equal_fn, NULL);
	if (IS_ERR(cand_cache)) {
		err = PTR_ERR(cand_cache);
		goto out;
	}

3448
	seg = &obj->btf_ext->field_reloc_info;
3449 3450 3451 3452 3453 3454 3455 3456
	for_each_btf_ext_sec(seg, sec) {
		sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
		if (str_is_empty(sec_name)) {
			err = -EINVAL;
			goto out;
		}
		prog = bpf_object__find_program_by_title(obj, sec_name);
		if (!prog) {
3457 3458
			pr_warn("failed to find program '%s' for CO-RE offset relocation\n",
				sec_name);
3459 3460 3461 3462 3463 3464 3465 3466
			err = -EINVAL;
			goto out;
		}

		pr_debug("prog '%s': performing %d CO-RE offset relocs\n",
			 sec_name, sec->num_info);

		for_each_btf_ext_rec(seg, sec, i, rec) {
3467 3468
			err = bpf_core_reloc_field(prog, rec, i, obj->btf,
						   targ_btf, cand_cache);
3469
			if (err) {
3470 3471
				pr_warn("prog '%s': relo #%d: failed to relocate: %d\n",
					sec_name, i, err);
3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492
				goto out;
			}
		}
	}

out:
	btf__free(targ_btf);
	if (!IS_ERR_OR_NULL(cand_cache)) {
		hashmap__for_each_entry(cand_cache, entry, i) {
			bpf_core_free_cands(entry->value);
		}
		hashmap__free(cand_cache);
	}
	return err;
}

static int
bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
{
	int err = 0;

3493 3494
	if (obj->btf_ext->field_reloc_info.len)
		err = bpf_core_reloc_fields(obj, targ_btf_path);
3495 3496 3497 3498

	return err;
}

3499 3500 3501 3502 3503 3504 3505
static int
bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
			struct reloc_desc *relo)
{
	struct bpf_insn *insn, *new_insn;
	struct bpf_program *text;
	size_t new_cnt;
3506
	int err;
3507 3508 3509 3510 3511

	if (relo->type != RELO_CALL)
		return -LIBBPF_ERRNO__RELOC;

	if (prog->idx == obj->efile.text_shndx) {
3512 3513
		pr_warn("relo in .text insn %d into off %d\n",
			relo->insn_idx, relo->text_off);
3514 3515 3516 3517 3518 3519
		return -LIBBPF_ERRNO__RELOC;
	}

	if (prog->main_prog_cnt == 0) {
		text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx);
		if (!text) {
3520
			pr_warn("no .text section found yet relo into text exist\n");
3521 3522 3523
			return -LIBBPF_ERRNO__RELOC;
		}
		new_cnt = prog->insns_cnt + text->insns_cnt;
3524
		new_insn = reallocarray(prog->insns, new_cnt, sizeof(*insn));
3525
		if (!new_insn) {
3526
			pr_warn("oom in prog realloc\n");
3527 3528
			return -ENOMEM;
		}
3529
		prog->insns = new_insn;
3530

3531 3532 3533 3534 3535
		if (obj->btf_ext) {
			err = bpf_program_reloc_btf_ext(prog, obj,
							text->section_name,
							prog->insns_cnt);
			if (err)
3536 3537 3538
				return err;
		}

3539 3540 3541 3542
		memcpy(new_insn + prog->insns_cnt, text->insns,
		       text->insns_cnt * sizeof(*insn));
		prog->main_prog_cnt = prog->insns_cnt;
		prog->insns_cnt = new_cnt;
3543 3544 3545
		pr_debug("added %zd insn from %s to prog %s\n",
			 text->insns_cnt, text->section_name,
			 prog->section_name);
3546 3547 3548 3549 3550 3551
	}
	insn = &prog->insns[relo->insn_idx];
	insn->imm += prog->main_prog_cnt - relo->insn_idx;
	return 0;
}

W
Wang Nan 已提交
3552
static int
3553
bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
W
Wang Nan 已提交
3554
{
3555
	int i, err;
W
Wang Nan 已提交
3556

3557 3558 3559
	if (!prog)
		return 0;

3560 3561 3562 3563
	if (obj->btf_ext) {
		err = bpf_program_reloc_btf_ext(prog, obj,
						prog->section_name, 0);
		if (err)
3564 3565 3566 3567
			return err;
	}

	if (!prog->reloc_desc)
W
Wang Nan 已提交
3568 3569 3570
		return 0;

	for (i = 0; i < prog->nr_reloc; i++) {
3571 3572 3573
		if (prog->reloc_desc[i].type == RELO_LD64 ||
		    prog->reloc_desc[i].type == RELO_DATA) {
			bool relo_data = prog->reloc_desc[i].type == RELO_DATA;
3574 3575
			struct bpf_insn *insns = prog->insns;
			int insn_idx, map_idx;
W
Wang Nan 已提交
3576

3577 3578
			insn_idx = prog->reloc_desc[i].insn_idx;
			map_idx = prog->reloc_desc[i].map_idx;
W
Wang Nan 已提交
3579

3580
			if (insn_idx + 1 >= (int)prog->insns_cnt) {
3581 3582
				pr_warn("relocation out of range: '%s'\n",
					prog->section_name);
3583 3584
				return -LIBBPF_ERRNO__RELOC;
			}
3585 3586 3587 3588 3589 3590 3591

			if (!relo_data) {
				insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
			} else {
				insns[insn_idx].src_reg = BPF_PSEUDO_MAP_VALUE;
				insns[insn_idx + 1].imm = insns[insn_idx].imm;
			}
3592
			insns[insn_idx].imm = obj->maps[map_idx].fd;
3593
		} else if (prog->reloc_desc[i].type == RELO_CALL) {
3594 3595 3596 3597
			err = bpf_program__reloc_text(prog, obj,
						      &prog->reloc_desc[i]);
			if (err)
				return err;
W
Wang Nan 已提交
3598 3599 3600 3601 3602 3603 3604 3605 3606
		}
	}

	zfree(&prog->reloc_desc);
	prog->nr_reloc = 0;
	return 0;
}

static int
3607
bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
W
Wang Nan 已提交
3608 3609 3610 3611 3612
{
	struct bpf_program *prog;
	size_t i;
	int err;

3613 3614 3615
	if (obj->btf_ext) {
		err = bpf_object__relocate_core(obj, targ_btf_path);
		if (err) {
3616 3617
			pr_warn("failed to perform CO-RE relocations: %d\n",
				err);
3618 3619 3620
			return err;
		}
	}
W
Wang Nan 已提交
3621 3622 3623
	for (i = 0; i < obj->nr_programs; i++) {
		prog = &obj->programs[i];

3624
		err = bpf_program__relocate(prog, obj);
W
Wang Nan 已提交
3625
		if (err) {
3626
			pr_warn("failed to relocate '%s'\n", prog->section_name);
W
Wang Nan 已提交
3627 3628 3629 3630 3631 3632
			return err;
		}
	}
	return 0;
}

3633 3634 3635 3636 3637
static int bpf_object__collect_reloc(struct bpf_object *obj)
{
	int i, err;

	if (!obj_elf_valid(obj)) {
3638
		pr_warn("Internal error: elf object is closed\n");
3639
		return -LIBBPF_ERRNO__INTERNAL;
3640 3641 3642 3643 3644 3645 3646 3647 3648
	}

	for (i = 0; i < obj->efile.nr_reloc; i++) {
		GElf_Shdr *shdr = &obj->efile.reloc[i].shdr;
		Elf_Data *data = obj->efile.reloc[i].data;
		int idx = shdr->sh_info;
		struct bpf_program *prog;

		if (shdr->sh_type != SHT_REL) {
3649
			pr_warn("internal error at %d\n", __LINE__);
3650
			return -LIBBPF_ERRNO__INTERNAL;
3651 3652 3653 3654
		}

		prog = bpf_object__find_prog_by_idx(obj, idx);
		if (!prog) {
3655
			pr_warn("relocation failed: no section(%d)\n", idx);
3656
			return -LIBBPF_ERRNO__RELOC;
3657 3658
		}

3659
		err = bpf_program__collect_reloc(prog, shdr, data, obj);
3660
		if (err)
3661
			return err;
3662 3663 3664 3665
	}
	return 0;
}

3666
static int
3667
load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
3668
	     char *license, __u32 kern_version, int *pfd)
3669
{
3670
	struct bpf_load_program_attr load_attr;
3671
	char *cp, errmsg[STRERR_BUFSIZE];
3672
	int log_buf_size = BPF_LOG_BUF_SIZE;
3673
	char *log_buf;
3674
	int btf_fd, ret;
3675

3676 3677 3678
	if (!insns || !insns_cnt)
		return -EINVAL;

3679
	memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
3680 3681
	load_attr.prog_type = prog->type;
	load_attr.expected_attach_type = prog->expected_attach_type;
3682 3683
	if (prog->caps->name)
		load_attr.name = prog->name;
3684 3685 3686
	load_attr.insns = insns;
	load_attr.insns_cnt = insns_cnt;
	load_attr.license = license;
3687 3688 3689 3690 3691 3692 3693
	if (prog->type == BPF_PROG_TYPE_TRACING) {
		load_attr.attach_prog_fd = prog->attach_prog_fd;
		load_attr.attach_btf_id = prog->attach_btf_id;
	} else {
		load_attr.kern_version = kern_version;
		load_attr.prog_ifindex = prog->prog_ifindex;
	}
3694 3695 3696 3697 3698
	/* if .BTF.ext was loaded, kernel supports associated BTF for prog */
	if (prog->obj->btf_ext)
		btf_fd = bpf_object__btf_fd(prog->obj);
	else
		btf_fd = -1;
3699
	load_attr.prog_btf_fd = btf_fd >= 0 ? btf_fd : 0;
3700 3701
	load_attr.func_info = prog->func_info;
	load_attr.func_info_rec_size = prog->func_info_rec_size;
3702
	load_attr.func_info_cnt = prog->func_info_cnt;
3703 3704 3705
	load_attr.line_info = prog->line_info;
	load_attr.line_info_rec_size = prog->line_info_rec_size;
	load_attr.line_info_cnt = prog->line_info_cnt;
3706
	load_attr.log_level = prog->log_level;
3707
	load_attr.prog_flags = prog->prog_flags;
3708

3709 3710
retry_load:
	log_buf = malloc(log_buf_size);
3711
	if (!log_buf)
3712
		pr_warn("Alloc log buffer for bpf loader error, continue without log\n");
3713

3714
	ret = bpf_load_program_xattr(&load_attr, log_buf, log_buf_size);
3715 3716

	if (ret >= 0) {
3717 3718
		if (load_attr.log_level)
			pr_debug("verifier log:\n%s", log_buf);
3719 3720 3721 3722 3723
		*pfd = ret;
		ret = 0;
		goto out;
	}

3724 3725 3726 3727 3728
	if (errno == ENOSPC) {
		log_buf_size <<= 1;
		free(log_buf);
		goto retry_load;
	}
3729
	ret = -errno;
3730
	cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
3731
	pr_warn("load bpf program failed: %s\n", cp);
3732

3733 3734
	if (log_buf && log_buf[0] != '\0') {
		ret = -LIBBPF_ERRNO__VERIFY;
3735 3736 3737
		pr_warn("-- BEGIN DUMP LOG ---\n");
		pr_warn("\n%s\n", log_buf);
		pr_warn("-- END LOG --\n");
3738
	} else if (load_attr.insns_cnt >= BPF_MAXINSNS) {
3739 3740
		pr_warn("Program too large (%zu insns), at most %d insns\n",
			load_attr.insns_cnt, BPF_MAXINSNS);
3741
		ret = -LIBBPF_ERRNO__PROG2BIG;
3742
	} else if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) {
3743
		/* Wrong program type? */
3744
		int fd;
3745

3746 3747 3748 3749 3750 3751 3752 3753
		load_attr.prog_type = BPF_PROG_TYPE_KPROBE;
		load_attr.expected_attach_type = 0;
		fd = bpf_load_program_xattr(&load_attr, NULL, 0);
		if (fd >= 0) {
			close(fd);
			ret = -LIBBPF_ERRNO__PROGTYPE;
			goto out;
		}
3754 3755 3756 3757 3758 3759 3760
	}

out:
	free(log_buf);
	return ret;
}

3761
int
3762
bpf_program__load(struct bpf_program *prog,
3763
		  char *license, __u32 kern_version)
3764
{
3765
	int err = 0, fd, i;
3766

3767 3768
	if (prog->instances.nr < 0 || !prog->instances.fds) {
		if (prog->preprocessor) {
3769 3770
			pr_warn("Internal error: can't load program '%s'\n",
				prog->section_name);
3771 3772
			return -LIBBPF_ERRNO__INTERNAL;
		}
3773

3774 3775
		prog->instances.fds = malloc(sizeof(int));
		if (!prog->instances.fds) {
3776
			pr_warn("Not enough memory for BPF fds\n");
3777 3778 3779 3780 3781 3782 3783 3784
			return -ENOMEM;
		}
		prog->instances.nr = 1;
		prog->instances.fds[0] = -1;
	}

	if (!prog->preprocessor) {
		if (prog->instances.nr != 1) {
3785 3786
			pr_warn("Program '%s' is inconsistent: nr(%d) != 1\n",
				prog->section_name, prog->instances.nr);
3787
		}
3788
		err = load_program(prog, prog->insns, prog->insns_cnt,
3789
				   license, kern_version, &fd);
3790 3791 3792 3793 3794 3795 3796 3797 3798
		if (!err)
			prog->instances.fds[0] = fd;
		goto out;
	}

	for (i = 0; i < prog->instances.nr; i++) {
		struct bpf_prog_prep_result result;
		bpf_program_prep_t preprocessor = prog->preprocessor;

3799
		memset(&result, 0, sizeof(result));
3800 3801 3802
		err = preprocessor(prog, i, prog->insns,
				   prog->insns_cnt, &result);
		if (err) {
3803 3804
			pr_warn("Preprocessing the %dth instance of program '%s' failed\n",
				i, prog->section_name);
3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816
			goto out;
		}

		if (!result.new_insn_ptr || !result.new_insn_cnt) {
			pr_debug("Skip loading the %dth instance of program '%s'\n",
				 i, prog->section_name);
			prog->instances.fds[i] = -1;
			if (result.pfd)
				*result.pfd = -1;
			continue;
		}

3817
		err = load_program(prog, result.new_insn_ptr,
3818
				   result.new_insn_cnt,
3819
				   license, kern_version, &fd);
3820 3821

		if (err) {
3822 3823
			pr_warn("Loading the %dth instance of program '%s' failed\n",
				i, prog->section_name);
3824 3825 3826 3827 3828 3829 3830 3831
			goto out;
		}

		if (result.pfd)
			*result.pfd = fd;
		prog->instances.fds[i] = fd;
	}
out:
3832
	if (err)
3833
		pr_warn("failed to load program '%s'\n", prog->section_name);
3834 3835 3836 3837 3838
	zfree(&prog->insns);
	prog->insns_cnt = 0;
	return err;
}

A
Andrii Nakryiko 已提交
3839 3840
static bool bpf_program__is_function_storage(const struct bpf_program *prog,
					     const struct bpf_object *obj)
3841 3842 3843 3844
{
	return prog->idx == obj->efile.text_shndx && obj->has_pseudo_calls;
}

3845
static int
3846
bpf_object__load_progs(struct bpf_object *obj, int log_level)
3847 3848 3849 3850 3851
{
	size_t i;
	int err;

	for (i = 0; i < obj->nr_programs; i++) {
3852
		if (bpf_program__is_function_storage(&obj->programs[i], obj))
3853
			continue;
3854
		obj->programs[i].log_level |= log_level;
3855 3856 3857 3858 3859 3860 3861 3862 3863
		err = bpf_program__load(&obj->programs[i],
					obj->license,
					obj->kern_version);
		if (err)
			return err;
	}
	return 0;
}

3864 3865 3866
static int libbpf_find_attach_btf_id(const char *name,
				     enum bpf_attach_type attach_type,
				     __u32 attach_prog_fd);
3867
static struct bpf_object *
3868
__bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
3869
		   struct bpf_object_open_opts *opts)
3870
{
3871
	const char *pin_root_path;
3872
	struct bpf_program *prog;
3873
	struct bpf_object *obj;
3874 3875 3876
	const char *obj_name;
	char tmp_name[64];
	bool relaxed_maps;
3877
	__u32 attach_prog_fd;
3878
	int err;
3879 3880

	if (elf_version(EV_CURRENT) == EV_NONE) {
3881 3882
		pr_warn("failed to init libelf for %s\n",
			path ? : "(mem buf)");
3883
		return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
3884 3885
	}

3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900
	if (!OPTS_VALID(opts, bpf_object_open_opts))
		return ERR_PTR(-EINVAL);

	obj_name = OPTS_GET(opts, object_name, path);
	if (obj_buf) {
		if (!obj_name) {
			snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
				 (unsigned long)obj_buf,
				 (unsigned long)obj_buf_sz);
			obj_name = tmp_name;
		}
		path = obj_name;
		pr_debug("loading object '%s' from buffer\n", obj_name);
	}

3901
	obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name);
3902 3903
	if (IS_ERR(obj))
		return obj;
3904

3905
	obj->relaxed_core_relocs = OPTS_GET(opts, relaxed_core_relocs, false);
3906
	relaxed_maps = OPTS_GET(opts, relaxed_maps, false);
3907
	pin_root_path = OPTS_GET(opts, pin_root_path, NULL);
3908
	attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0);
3909

3910 3911
	CHECK_ERR(bpf_object__elf_init(obj), err, out);
	CHECK_ERR(bpf_object__check_endianness(obj), err, out);
3912
	CHECK_ERR(bpf_object__probe_caps(obj), err, out);
3913 3914
	CHECK_ERR(bpf_object__elf_collect(obj, relaxed_maps, pin_root_path),
		  err, out);
3915
	CHECK_ERR(bpf_object__collect_reloc(obj), err, out);
3916
	bpf_object__elf_finish(obj);
3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931

	bpf_object__for_each_program(prog, obj) {
		enum bpf_prog_type prog_type;
		enum bpf_attach_type attach_type;

		err = libbpf_prog_type_by_name(prog->section_name, &prog_type,
					       &attach_type);
		if (err == -ESRCH)
			/* couldn't guess, but user might manually specify */
			continue;
		if (err)
			goto out;

		bpf_program__set_type(prog, prog_type);
		bpf_program__set_expected_attach_type(prog, attach_type);
3932
		if (prog_type == BPF_PROG_TYPE_TRACING) {
3933 3934 3935
			err = libbpf_find_attach_btf_id(prog->section_name,
							attach_type,
							attach_prog_fd);
3936
			if (err <= 0)
3937
				goto out;
3938
			prog->attach_btf_id = err;
3939
			prog->attach_prog_fd = attach_prog_fd;
3940
		}
3941 3942
	}

3943 3944 3945
	return obj;
out:
	bpf_object__close(obj);
3946
	return ERR_PTR(err);
3947 3948
}

3949 3950
static struct bpf_object *
__bpf_object__open_xattr(struct bpf_object_open_attr *attr, int flags)
3951
{
3952
	DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
3953 3954 3955
		.relaxed_maps = flags & MAPS_RELAX_COMPAT,
	);

3956
	/* param validation */
3957
	if (!attr->file)
3958 3959
		return NULL;

3960
	pr_debug("loading %s\n", attr->file);
3961
	return __bpf_object__open(attr->file, NULL, 0, &opts);
3962 3963 3964 3965 3966
}

struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
{
	return __bpf_object__open_xattr(attr, 0);
3967 3968 3969 3970 3971 3972 3973 3974
}

struct bpf_object *bpf_object__open(const char *path)
{
	struct bpf_object_open_attr attr = {
		.file		= path,
		.prog_type	= BPF_PROG_TYPE_UNSPEC,
	};
3975

3976
	return bpf_object__open_xattr(&attr);
3977 3978
}

3979 3980 3981 3982 3983 3984 3985 3986
struct bpf_object *
bpf_object__open_file(const char *path, struct bpf_object_open_opts *opts)
{
	if (!path)
		return ERR_PTR(-EINVAL);

	pr_debug("loading %s\n", path);

3987
	return __bpf_object__open(path, NULL, 0, opts);
3988 3989 3990 3991 3992
}

struct bpf_object *
bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
		     struct bpf_object_open_opts *opts)
3993
{
3994 3995
	if (!obj_buf || obj_buf_sz == 0)
		return ERR_PTR(-EINVAL);
3996

3997
	return __bpf_object__open(NULL, obj_buf, obj_buf_sz, opts);
3998 3999 4000 4001 4002 4003
}

struct bpf_object *
bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz,
			const char *name)
{
4004
	DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
4005 4006 4007 4008 4009 4010 4011 4012
		.object_name = name,
		/* wrong default, but backwards-compatible */
		.relaxed_maps = true,
	);

	/* returning NULL is wrong, but backwards-compatible */
	if (!obj_buf || obj_buf_sz == 0)
		return NULL;
4013

4014
	return bpf_object__open_mem(obj_buf, obj_buf_sz, &opts);
4015 4016
}

4017 4018 4019 4020 4021 4022 4023
int bpf_object__unload(struct bpf_object *obj)
{
	size_t i;

	if (!obj)
		return -EINVAL;

4024 4025
	for (i = 0; i < obj->nr_maps; i++)
		zclose(obj->maps[i].fd);
4026

4027 4028 4029
	for (i = 0; i < obj->nr_programs; i++)
		bpf_program__unload(&obj->programs[i]);

4030 4031 4032
	return 0;
}

4033
int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
4034
{
4035
	struct bpf_object *obj;
4036
	int err, i;
4037

4038 4039 4040
	if (!attr)
		return -EINVAL;
	obj = attr->obj;
4041 4042 4043 4044
	if (!obj)
		return -EINVAL;

	if (obj->loaded) {
4045
		pr_warn("object should not be loaded twice\n");
4046 4047 4048 4049
		return -EINVAL;
	}

	obj->loaded = true;
4050 4051

	CHECK_ERR(bpf_object__create_maps(obj), err, out);
4052
	CHECK_ERR(bpf_object__relocate(obj, attr->target_btf_path), err, out);
4053
	CHECK_ERR(bpf_object__load_progs(obj, attr->log_level), err, out);
4054 4055 4056

	return 0;
out:
4057 4058 4059 4060 4061
	/* unpin any maps that were auto-pinned during load */
	for (i = 0; i < obj->nr_maps; i++)
		if (obj->maps[i].pinned && !obj->maps[i].reused)
			bpf_map__unpin(&obj->maps[i], NULL);

4062
	bpf_object__unload(obj);
4063
	pr_warn("failed to load object '%s'\n", obj->path);
4064
	return err;
4065 4066
}

4067 4068 4069 4070 4071 4072 4073 4074 4075
int bpf_object__load(struct bpf_object *obj)
{
	struct bpf_object_load_attr attr = {
		.obj = obj,
	};

	return bpf_object__load_xattr(&attr);
}

4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097
static int make_parent_dir(const char *path)
{
	char *cp, errmsg[STRERR_BUFSIZE];
	char *dname, *dir;
	int err = 0;

	dname = strdup(path);
	if (dname == NULL)
		return -ENOMEM;

	dir = dirname(dname);
	if (mkdir(dir, 0700) && errno != EEXIST)
		err = -errno;

	free(dname);
	if (err) {
		cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
		pr_warn("failed to mkdir %s: %s\n", path, cp);
	}
	return err;
}

4098 4099
static int check_path(const char *path)
{
4100
	char *cp, errmsg[STRERR_BUFSIZE];
4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113
	struct statfs st_fs;
	char *dname, *dir;
	int err = 0;

	if (path == NULL)
		return -EINVAL;

	dname = strdup(path);
	if (dname == NULL)
		return -ENOMEM;

	dir = dirname(dname);
	if (statfs(dir, &st_fs)) {
4114
		cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
4115
		pr_warn("failed to statfs %s: %s\n", dir, cp);
4116 4117 4118 4119 4120
		err = -errno;
	}
	free(dname);

	if (!err && st_fs.f_type != BPF_FS_MAGIC) {
4121
		pr_warn("specified path %s is not on BPF FS\n", path);
4122 4123 4124 4125 4126 4127 4128 4129 4130
		err = -EINVAL;
	}

	return err;
}

int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
			      int instance)
{
4131
	char *cp, errmsg[STRERR_BUFSIZE];
4132 4133
	int err;

4134 4135 4136 4137
	err = make_parent_dir(path);
	if (err)
		return err;

4138 4139 4140 4141 4142
	err = check_path(path);
	if (err)
		return err;

	if (prog == NULL) {
4143
		pr_warn("invalid program pointer\n");
4144 4145 4146 4147
		return -EINVAL;
	}

	if (instance < 0 || instance >= prog->instances.nr) {
4148 4149
		pr_warn("invalid prog instance %d of prog %s (max %d)\n",
			instance, prog->section_name, prog->instances.nr);
4150 4151 4152 4153
		return -EINVAL;
	}

	if (bpf_obj_pin(prog->instances.fds[instance], path)) {
4154
		cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
4155
		pr_warn("failed to pin program: %s\n", cp);
4156 4157 4158 4159 4160 4161 4162
		return -errno;
	}
	pr_debug("pinned program '%s'\n", path);

	return 0;
}

4163 4164 4165 4166 4167 4168 4169 4170 4171 4172
int bpf_program__unpin_instance(struct bpf_program *prog, const char *path,
				int instance)
{
	int err;

	err = check_path(path);
	if (err)
		return err;

	if (prog == NULL) {
4173
		pr_warn("invalid program pointer\n");
4174 4175 4176 4177
		return -EINVAL;
	}

	if (instance < 0 || instance >= prog->instances.nr) {
4178 4179
		pr_warn("invalid prog instance %d of prog %s (max %d)\n",
			instance, prog->section_name, prog->instances.nr);
4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190
		return -EINVAL;
	}

	err = unlink(path);
	if (err != 0)
		return -errno;
	pr_debug("unpinned program '%s'\n", path);

	return 0;
}

4191 4192 4193 4194
int bpf_program__pin(struct bpf_program *prog, const char *path)
{
	int i, err;

4195 4196 4197 4198
	err = make_parent_dir(path);
	if (err)
		return err;

4199 4200 4201 4202 4203
	err = check_path(path);
	if (err)
		return err;

	if (prog == NULL) {
4204
		pr_warn("invalid program pointer\n");
4205 4206 4207 4208
		return -EINVAL;
	}

	if (prog->instances.nr <= 0) {
4209
		pr_warn("no instances of prog %s to pin\n",
4210 4211 4212 4213
			   prog->section_name);
		return -EINVAL;
	}

4214 4215 4216 4217 4218
	if (prog->instances.nr == 1) {
		/* don't create subdirs when pinning single instance */
		return bpf_program__pin_instance(prog, path, 0);
	}

4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266
	for (i = 0; i < prog->instances.nr; i++) {
		char buf[PATH_MAX];
		int len;

		len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
		if (len < 0) {
			err = -EINVAL;
			goto err_unpin;
		} else if (len >= PATH_MAX) {
			err = -ENAMETOOLONG;
			goto err_unpin;
		}

		err = bpf_program__pin_instance(prog, buf, i);
		if (err)
			goto err_unpin;
	}

	return 0;

err_unpin:
	for (i = i - 1; i >= 0; i--) {
		char buf[PATH_MAX];
		int len;

		len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
		if (len < 0)
			continue;
		else if (len >= PATH_MAX)
			continue;

		bpf_program__unpin_instance(prog, buf, i);
	}

	rmdir(path);

	return err;
}

int bpf_program__unpin(struct bpf_program *prog, const char *path)
{
	int i, err;

	err = check_path(path);
	if (err)
		return err;

	if (prog == NULL) {
4267
		pr_warn("invalid program pointer\n");
4268 4269 4270 4271
		return -EINVAL;
	}

	if (prog->instances.nr <= 0) {
4272
		pr_warn("no instances of prog %s to pin\n",
4273 4274
			   prog->section_name);
		return -EINVAL;
4275 4276 4277 4278 4279
	}

	if (prog->instances.nr == 1) {
		/* don't create subdirs when pinning single instance */
		return bpf_program__unpin_instance(prog, path, 0);
4280 4281
	}

4282 4283 4284 4285 4286 4287 4288 4289 4290 4291
	for (i = 0; i < prog->instances.nr; i++) {
		char buf[PATH_MAX];
		int len;

		len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
		if (len < 0)
			return -EINVAL;
		else if (len >= PATH_MAX)
			return -ENAMETOOLONG;

4292
		err = bpf_program__unpin_instance(prog, buf, i);
4293 4294 4295 4296
		if (err)
			return err;
	}

4297 4298 4299 4300
	err = rmdir(path);
	if (err)
		return -errno;

4301 4302 4303
	return 0;
}

J
Joe Stringer 已提交
4304 4305
int bpf_map__pin(struct bpf_map *map, const char *path)
{
4306
	char *cp, errmsg[STRERR_BUFSIZE];
J
Joe Stringer 已提交
4307 4308 4309
	int err;

	if (map == NULL) {
4310
		pr_warn("invalid map pointer\n");
J
Joe Stringer 已提交
4311 4312 4313
		return -EINVAL;
	}

4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338
	if (map->pin_path) {
		if (path && strcmp(path, map->pin_path)) {
			pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
				bpf_map__name(map), map->pin_path, path);
			return -EINVAL;
		} else if (map->pinned) {
			pr_debug("map '%s' already pinned at '%s'; not re-pinning\n",
				 bpf_map__name(map), map->pin_path);
			return 0;
		}
	} else {
		if (!path) {
			pr_warn("missing a path to pin map '%s' at\n",
				bpf_map__name(map));
			return -EINVAL;
		} else if (map->pinned) {
			pr_warn("map '%s' already pinned\n", bpf_map__name(map));
			return -EEXIST;
		}

		map->pin_path = strdup(path);
		if (!map->pin_path) {
			err = -errno;
			goto out_err;
		}
J
Joe Stringer 已提交
4339 4340
	}

4341 4342 4343 4344
	err = make_parent_dir(map->pin_path);
	if (err)
		return err;

4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355
	err = check_path(map->pin_path);
	if (err)
		return err;

	if (bpf_obj_pin(map->fd, map->pin_path)) {
		err = -errno;
		goto out_err;
	}

	map->pinned = true;
	pr_debug("pinned map '%s'\n", map->pin_path);
4356

J
Joe Stringer 已提交
4357
	return 0;
4358 4359 4360 4361 4362

out_err:
	cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
	pr_warn("failed to pin map: %s\n", cp);
	return err;
J
Joe Stringer 已提交
4363 4364
}

4365 4366 4367 4368 4369
int bpf_map__unpin(struct bpf_map *map, const char *path)
{
	int err;

	if (map == NULL) {
4370
		pr_warn("invalid map pointer\n");
4371 4372 4373
		return -EINVAL;
	}

4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390
	if (map->pin_path) {
		if (path && strcmp(path, map->pin_path)) {
			pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
				bpf_map__name(map), map->pin_path, path);
			return -EINVAL;
		}
		path = map->pin_path;
	} else if (!path) {
		pr_warn("no path to unpin map '%s' from\n",
			bpf_map__name(map));
		return -EINVAL;
	}

	err = check_path(path);
	if (err)
		return err;

4391 4392 4393
	err = unlink(path);
	if (err != 0)
		return -errno;
4394 4395 4396

	map->pinned = false;
	pr_debug("unpinned map '%s' from '%s'\n", bpf_map__name(map), path);
4397 4398 4399 4400

	return 0;
}

4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425
int bpf_map__set_pin_path(struct bpf_map *map, const char *path)
{
	char *new = NULL;

	if (path) {
		new = strdup(path);
		if (!new)
			return -errno;
	}

	free(map->pin_path);
	map->pin_path = new;
	return 0;
}

const char *bpf_map__get_pin_path(const struct bpf_map *map)
{
	return map->pin_path;
}

bool bpf_map__is_pinned(const struct bpf_map *map)
{
	return map->pinned;
}

4426
int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
4427 4428 4429 4430 4431 4432 4433 4434
{
	struct bpf_map *map;
	int err;

	if (!obj)
		return -ENOENT;

	if (!obj->loaded) {
4435
		pr_warn("object not yet loaded; load it first\n");
4436 4437 4438
		return -ENOENT;
	}

4439
	bpf_object__for_each_map(map, obj) {
4440
		char *pin_path = NULL;
4441 4442
		char buf[PATH_MAX];

4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457
		if (path) {
			int len;

			len = snprintf(buf, PATH_MAX, "%s/%s", path,
				       bpf_map__name(map));
			if (len < 0) {
				err = -EINVAL;
				goto err_unpin_maps;
			} else if (len >= PATH_MAX) {
				err = -ENAMETOOLONG;
				goto err_unpin_maps;
			}
			pin_path = buf;
		} else if (!map->pin_path) {
			continue;
4458 4459
		}

4460
		err = bpf_map__pin(map, pin_path);
4461 4462 4463 4464 4465 4466 4467 4468
		if (err)
			goto err_unpin_maps;
	}

	return 0;

err_unpin_maps:
	while ((map = bpf_map__prev(map, obj))) {
4469
		if (!map->pin_path)
4470 4471
			continue;

4472
		bpf_map__unpin(map, NULL);
4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485
	}

	return err;
}

int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
{
	struct bpf_map *map;
	int err;

	if (!obj)
		return -ENOENT;

4486
	bpf_object__for_each_map(map, obj) {
4487
		char *pin_path = NULL;
4488 4489
		char buf[PATH_MAX];

4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502
		if (path) {
			int len;

			len = snprintf(buf, PATH_MAX, "%s/%s", path,
				       bpf_map__name(map));
			if (len < 0)
				return -EINVAL;
			else if (len >= PATH_MAX)
				return -ENAMETOOLONG;
			pin_path = buf;
		} else if (!map->pin_path) {
			continue;
		}
4503

4504
		err = bpf_map__unpin(map, pin_path);
4505 4506 4507 4508
		if (err)
			return err;
	}

4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520
	return 0;
}

int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
{
	struct bpf_program *prog;
	int err;

	if (!obj)
		return -ENOENT;

	if (!obj->loaded) {
4521
		pr_warn("object not yet loaded; load it first\n");
4522 4523 4524 4525 4526 4527 4528 4529
		return -ENOENT;
	}

	bpf_object__for_each_program(prog, obj) {
		char buf[PATH_MAX];
		int len;

		len = snprintf(buf, PATH_MAX, "%s/%s", path,
S
Stanislav Fomichev 已提交
4530
			       prog->pin_name);
4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551
		if (len < 0) {
			err = -EINVAL;
			goto err_unpin_programs;
		} else if (len >= PATH_MAX) {
			err = -ENAMETOOLONG;
			goto err_unpin_programs;
		}

		err = bpf_program__pin(prog, buf);
		if (err)
			goto err_unpin_programs;
	}

	return 0;

err_unpin_programs:
	while ((prog = bpf_program__prev(prog, obj))) {
		char buf[PATH_MAX];
		int len;

		len = snprintf(buf, PATH_MAX, "%s/%s", path,
S
Stanislav Fomichev 已提交
4552
			       prog->pin_name);
4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571
		if (len < 0)
			continue;
		else if (len >= PATH_MAX)
			continue;

		bpf_program__unpin(prog, buf);
	}

	return err;
}

int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
{
	struct bpf_program *prog;
	int err;

	if (!obj)
		return -ENOENT;

4572 4573 4574 4575 4576
	bpf_object__for_each_program(prog, obj) {
		char buf[PATH_MAX];
		int len;

		len = snprintf(buf, PATH_MAX, "%s/%s", path,
S
Stanislav Fomichev 已提交
4577
			       prog->pin_name);
4578 4579 4580 4581 4582
		if (len < 0)
			return -EINVAL;
		else if (len >= PATH_MAX)
			return -ENAMETOOLONG;

4583
		err = bpf_program__unpin(prog, buf);
4584 4585 4586 4587 4588 4589 4590
		if (err)
			return err;
	}

	return 0;
}

4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607
int bpf_object__pin(struct bpf_object *obj, const char *path)
{
	int err;

	err = bpf_object__pin_maps(obj, path);
	if (err)
		return err;

	err = bpf_object__pin_programs(obj, path);
	if (err) {
		bpf_object__unpin_maps(obj, path);
		return err;
	}

	return 0;
}

4608 4609
void bpf_object__close(struct bpf_object *obj)
{
4610 4611
	size_t i;

4612 4613 4614
	if (!obj)
		return;

4615 4616 4617
	if (obj->clear_priv)
		obj->clear_priv(obj, obj->priv);

4618
	bpf_object__elf_finish(obj);
4619
	bpf_object__unload(obj);
4620
	btf__free(obj->btf);
4621
	btf_ext__free(obj->btf_ext);
4622

4623
	for (i = 0; i < obj->nr_maps; i++) {
4624
		zfree(&obj->maps[i].name);
4625
		zfree(&obj->maps[i].pin_path);
4626 4627 4628 4629 4630 4631
		if (obj->maps[i].clear_priv)
			obj->maps[i].clear_priv(&obj->maps[i],
						obj->maps[i].priv);
		obj->maps[i].priv = NULL;
		obj->maps[i].clear_priv = NULL;
	}
4632 4633 4634

	zfree(&obj->sections.rodata);
	zfree(&obj->sections.data);
4635 4636
	zfree(&obj->maps);
	obj->nr_maps = 0;
4637 4638 4639 4640 4641 4642 4643

	if (obj->programs && obj->nr_programs) {
		for (i = 0; i < obj->nr_programs; i++)
			bpf_program__exit(&obj->programs[i]);
	}
	zfree(&obj->programs);

4644
	list_del(&obj->list);
4645 4646
	free(obj);
}
4647

4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666
struct bpf_object *
bpf_object__next(struct bpf_object *prev)
{
	struct bpf_object *next;

	if (!prev)
		next = list_first_entry(&bpf_objects_list,
					struct bpf_object,
					list);
	else
		next = list_next_entry(prev, list);

	/* Empty list is noticed here so don't need checking on entry. */
	if (&next->list == &bpf_objects_list)
		return NULL;

	return next;
}

A
Andrii Nakryiko 已提交
4667
const char *bpf_object__name(const struct bpf_object *obj)
4668
{
4669
	return obj ? obj->name : ERR_PTR(-EINVAL);
4670 4671
}

A
Andrii Nakryiko 已提交
4672
unsigned int bpf_object__kversion(const struct bpf_object *obj)
4673
{
4674
	return obj ? obj->kern_version : 0;
4675 4676
}

A
Andrii Nakryiko 已提交
4677
struct btf *bpf_object__btf(const struct bpf_object *obj)
4678 4679 4680 4681
{
	return obj ? obj->btf : NULL;
}

4682 4683 4684 4685 4686
int bpf_object__btf_fd(const struct bpf_object *obj)
{
	return obj->btf ? btf__fd(obj->btf) : -1;
}

4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697
int bpf_object__set_priv(struct bpf_object *obj, void *priv,
			 bpf_object_clear_priv_t clear_priv)
{
	if (obj->priv && obj->clear_priv)
		obj->clear_priv(obj, obj->priv);

	obj->priv = priv;
	obj->clear_priv = clear_priv;
	return 0;
}

A
Andrii Nakryiko 已提交
4698
void *bpf_object__priv(const struct bpf_object *obj)
4699 4700 4701 4702
{
	return obj ? obj->priv : ERR_PTR(-EINVAL);
}

4703
static struct bpf_program *
A
Andrii Nakryiko 已提交
4704 4705
__bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj,
		    bool forward)
4706
{
4707
	size_t nr_programs = obj->nr_programs;
4708
	ssize_t idx;
4709

4710
	if (!nr_programs)
4711 4712
		return NULL;

4713 4714 4715 4716 4717
	if (!p)
		/* Iter from the beginning */
		return forward ? &obj->programs[0] :
			&obj->programs[nr_programs - 1];

4718
	if (p->obj != obj) {
4719
		pr_warn("error: program handler doesn't match object\n");
4720 4721 4722
		return NULL;
	}

4723
	idx = (p - obj->programs) + (forward ? 1 : -1);
4724
	if (idx >= obj->nr_programs || idx < 0)
4725 4726 4727 4728
		return NULL;
	return &obj->programs[idx];
}

4729
struct bpf_program *
A
Andrii Nakryiko 已提交
4730
bpf_program__next(struct bpf_program *prev, const struct bpf_object *obj)
4731 4732 4733 4734
{
	struct bpf_program *prog = prev;

	do {
4735
		prog = __bpf_program__iter(prog, obj, true);
4736 4737 4738 4739 4740 4741
	} while (prog && bpf_program__is_function_storage(prog, obj));

	return prog;
}

struct bpf_program *
A
Andrii Nakryiko 已提交
4742
bpf_program__prev(struct bpf_program *next, const struct bpf_object *obj)
4743 4744 4745 4746
{
	struct bpf_program *prog = next;

	do {
4747
		prog = __bpf_program__iter(prog, obj, false);
4748 4749 4750 4751 4752
	} while (prog && bpf_program__is_function_storage(prog, obj));

	return prog;
}

4753 4754
int bpf_program__set_priv(struct bpf_program *prog, void *priv,
			  bpf_program_clear_priv_t clear_priv)
4755 4756 4757 4758 4759 4760 4761 4762 4763
{
	if (prog->priv && prog->clear_priv)
		prog->clear_priv(prog, prog->priv);

	prog->priv = priv;
	prog->clear_priv = clear_priv;
	return 0;
}

A
Andrii Nakryiko 已提交
4764
void *bpf_program__priv(const struct bpf_program *prog)
4765
{
4766
	return prog ? prog->priv : ERR_PTR(-EINVAL);
4767 4768
}

4769 4770 4771 4772 4773
void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
{
	prog->prog_ifindex = ifindex;
}

A
Andrii Nakryiko 已提交
4774
const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy)
4775 4776 4777 4778
{
	const char *title;

	title = prog->section_name;
4779
	if (needs_copy) {
4780 4781
		title = strdup(title);
		if (!title) {
4782
			pr_warn("failed to strdup program title\n");
4783
			return ERR_PTR(-ENOMEM);
4784 4785 4786 4787 4788 4789
		}
	}

	return title;
}

A
Andrii Nakryiko 已提交
4790
int bpf_program__fd(const struct bpf_program *prog)
4791
{
4792 4793 4794
	return bpf_program__nth_fd(prog, 0);
}

4795 4796 4797 4798 4799
size_t bpf_program__size(const struct bpf_program *prog)
{
	return prog->insns_cnt * sizeof(struct bpf_insn);
}

4800 4801 4802 4803 4804 4805 4806 4807 4808
int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
			  bpf_program_prep_t prep)
{
	int *instances_fds;

	if (nr_instances <= 0 || !prep)
		return -EINVAL;

	if (prog->instances.nr > 0 || prog->instances.fds) {
4809
		pr_warn("Can't set pre-processor after loading\n");
4810 4811 4812 4813 4814
		return -EINVAL;
	}

	instances_fds = malloc(sizeof(int) * nr_instances);
	if (!instances_fds) {
4815
		pr_warn("alloc memory failed for fds\n");
4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827
		return -ENOMEM;
	}

	/* fill all fd with -1 */
	memset(instances_fds, -1, sizeof(int) * nr_instances);

	prog->instances.nr = nr_instances;
	prog->instances.fds = instances_fds;
	prog->preprocessor = prep;
	return 0;
}

A
Andrii Nakryiko 已提交
4828
int bpf_program__nth_fd(const struct bpf_program *prog, int n)
4829 4830 4831
{
	int fd;

4832 4833 4834
	if (!prog)
		return -EINVAL;

4835
	if (n >= prog->instances.nr || n < 0) {
4836 4837
		pr_warn("Can't get the %dth fd from program %s: only %d instances\n",
			n, prog->section_name, prog->instances.nr);
4838 4839 4840 4841 4842
		return -EINVAL;
	}

	fd = prog->instances.fds[n];
	if (fd < 0) {
4843 4844
		pr_warn("%dth instance of program '%s' is invalid\n",
			n, prog->section_name);
4845 4846 4847 4848
		return -ENOENT;
	}

	return fd;
4849
}
4850

4851 4852 4853 4854 4855
enum bpf_prog_type bpf_program__get_type(struct bpf_program *prog)
{
	return prog->type;
}

4856
void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
4857 4858 4859 4860
{
	prog->type = type;
}

A
Andrii Nakryiko 已提交
4861
static bool bpf_program__is_type(const struct bpf_program *prog,
4862 4863 4864 4865 4866
				 enum bpf_prog_type type)
{
	return prog ? (prog->type == type) : false;
}

A
Andrii Nakryiko 已提交
4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879
#define BPF_PROG_TYPE_FNS(NAME, TYPE)				\
int bpf_program__set_##NAME(struct bpf_program *prog)		\
{								\
	if (!prog)						\
		return -EINVAL;					\
	bpf_program__set_type(prog, TYPE);			\
	return 0;						\
}								\
								\
bool bpf_program__is_##NAME(const struct bpf_program *prog)	\
{								\
	return bpf_program__is_type(prog, TYPE);		\
}								\
4880

4881
BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
4882
BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
4883 4884
BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS);
BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT);
4885
BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
4886
BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
4887 4888
BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
4889
BPF_PROG_TYPE_FNS(tracing, BPF_PROG_TYPE_TRACING);
4890

4891 4892 4893 4894 4895 4896
enum bpf_attach_type
bpf_program__get_expected_attach_type(struct bpf_program *prog)
{
	return prog->expected_attach_type;
}

J
John Fastabend 已提交
4897 4898
void bpf_program__set_expected_attach_type(struct bpf_program *prog,
					   enum bpf_attach_type type)
4899 4900 4901 4902
{
	prog->expected_attach_type = type;
}

4903 4904
#define BPF_PROG_SEC_IMPL(string, ptype, eatype, is_attachable, btf, atype) \
	{ string, sizeof(string) - 1, ptype, eatype, is_attachable, btf, atype }
4905

4906
/* Programs that can NOT be attached. */
4907
#define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0, 0)
4908

4909 4910
/* Programs that can be attached. */
#define BPF_APROG_SEC(string, ptype, atype) \
4911
	BPF_PROG_SEC_IMPL(string, ptype, 0, 1, 0, atype)
4912

4913 4914
/* Programs that must specify expected attach type at load time. */
#define BPF_EAPROG_SEC(string, ptype, eatype) \
4915 4916 4917
	BPF_PROG_SEC_IMPL(string, ptype, eatype, 1, 0, eatype)

/* Programs that use BTF to identify attach point */
4918 4919
#define BPF_PROG_BTF(string, ptype, eatype) \
	BPF_PROG_SEC_IMPL(string, ptype, eatype, 0, 1, 0)
4920 4921 4922 4923 4924

/* Programs that can be attached but attach type can't be identified by section
 * name. Kept for backward compatibility.
 */
#define BPF_APROG_COMPAT(string, ptype) BPF_PROG_SEC(string, ptype)
4925

4926 4927 4928 4929
static const struct {
	const char *sec;
	size_t len;
	enum bpf_prog_type prog_type;
4930
	enum bpf_attach_type expected_attach_type;
4931 4932
	bool is_attachable;
	bool is_attach_btf;
4933
	enum bpf_attach_type attach_type;
4934
} section_names[] = {
4935 4936
	BPF_PROG_SEC("socket",			BPF_PROG_TYPE_SOCKET_FILTER),
	BPF_PROG_SEC("kprobe/",			BPF_PROG_TYPE_KPROBE),
4937
	BPF_PROG_SEC("uprobe/",			BPF_PROG_TYPE_KPROBE),
4938
	BPF_PROG_SEC("kretprobe/",		BPF_PROG_TYPE_KPROBE),
4939
	BPF_PROG_SEC("uretprobe/",		BPF_PROG_TYPE_KPROBE),
4940 4941 4942
	BPF_PROG_SEC("classifier",		BPF_PROG_TYPE_SCHED_CLS),
	BPF_PROG_SEC("action",			BPF_PROG_TYPE_SCHED_ACT),
	BPF_PROG_SEC("tracepoint/",		BPF_PROG_TYPE_TRACEPOINT),
4943
	BPF_PROG_SEC("tp/",			BPF_PROG_TYPE_TRACEPOINT),
4944
	BPF_PROG_SEC("raw_tracepoint/",		BPF_PROG_TYPE_RAW_TRACEPOINT),
4945
	BPF_PROG_SEC("raw_tp/",			BPF_PROG_TYPE_RAW_TRACEPOINT),
4946 4947
	BPF_PROG_BTF("tp_btf/",			BPF_PROG_TYPE_TRACING,
						BPF_TRACE_RAW_TP),
4948 4949 4950 4951
	BPF_PROG_BTF("fentry/",			BPF_PROG_TYPE_TRACING,
						BPF_TRACE_FENTRY),
	BPF_PROG_BTF("fexit/",			BPF_PROG_TYPE_TRACING,
						BPF_TRACE_FEXIT),
4952 4953 4954 4955 4956 4957
	BPF_PROG_SEC("xdp",			BPF_PROG_TYPE_XDP),
	BPF_PROG_SEC("perf_event",		BPF_PROG_TYPE_PERF_EVENT),
	BPF_PROG_SEC("lwt_in",			BPF_PROG_TYPE_LWT_IN),
	BPF_PROG_SEC("lwt_out",			BPF_PROG_TYPE_LWT_OUT),
	BPF_PROG_SEC("lwt_xmit",		BPF_PROG_TYPE_LWT_XMIT),
	BPF_PROG_SEC("lwt_seg6local",		BPF_PROG_TYPE_LWT_SEG6LOCAL),
4958 4959 4960 4961
	BPF_APROG_SEC("cgroup_skb/ingress",	BPF_PROG_TYPE_CGROUP_SKB,
						BPF_CGROUP_INET_INGRESS),
	BPF_APROG_SEC("cgroup_skb/egress",	BPF_PROG_TYPE_CGROUP_SKB,
						BPF_CGROUP_INET_EGRESS),
4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972
	BPF_APROG_COMPAT("cgroup/skb",		BPF_PROG_TYPE_CGROUP_SKB),
	BPF_APROG_SEC("cgroup/sock",		BPF_PROG_TYPE_CGROUP_SOCK,
						BPF_CGROUP_INET_SOCK_CREATE),
	BPF_EAPROG_SEC("cgroup/post_bind4",	BPF_PROG_TYPE_CGROUP_SOCK,
						BPF_CGROUP_INET4_POST_BIND),
	BPF_EAPROG_SEC("cgroup/post_bind6",	BPF_PROG_TYPE_CGROUP_SOCK,
						BPF_CGROUP_INET6_POST_BIND),
	BPF_APROG_SEC("cgroup/dev",		BPF_PROG_TYPE_CGROUP_DEVICE,
						BPF_CGROUP_DEVICE),
	BPF_APROG_SEC("sockops",		BPF_PROG_TYPE_SOCK_OPS,
						BPF_CGROUP_SOCK_OPS),
4973 4974 4975 4976
	BPF_APROG_SEC("sk_skb/stream_parser",	BPF_PROG_TYPE_SK_SKB,
						BPF_SK_SKB_STREAM_PARSER),
	BPF_APROG_SEC("sk_skb/stream_verdict",	BPF_PROG_TYPE_SK_SKB,
						BPF_SK_SKB_STREAM_VERDICT),
4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995
	BPF_APROG_COMPAT("sk_skb",		BPF_PROG_TYPE_SK_SKB),
	BPF_APROG_SEC("sk_msg",			BPF_PROG_TYPE_SK_MSG,
						BPF_SK_MSG_VERDICT),
	BPF_APROG_SEC("lirc_mode2",		BPF_PROG_TYPE_LIRC_MODE2,
						BPF_LIRC_MODE2),
	BPF_APROG_SEC("flow_dissector",		BPF_PROG_TYPE_FLOW_DISSECTOR,
						BPF_FLOW_DISSECTOR),
	BPF_EAPROG_SEC("cgroup/bind4",		BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
						BPF_CGROUP_INET4_BIND),
	BPF_EAPROG_SEC("cgroup/bind6",		BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
						BPF_CGROUP_INET6_BIND),
	BPF_EAPROG_SEC("cgroup/connect4",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
						BPF_CGROUP_INET4_CONNECT),
	BPF_EAPROG_SEC("cgroup/connect6",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
						BPF_CGROUP_INET6_CONNECT),
	BPF_EAPROG_SEC("cgroup/sendmsg4",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
						BPF_CGROUP_UDP4_SENDMSG),
	BPF_EAPROG_SEC("cgroup/sendmsg6",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
						BPF_CGROUP_UDP6_SENDMSG),
4996 4997 4998 4999
	BPF_EAPROG_SEC("cgroup/recvmsg4",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
						BPF_CGROUP_UDP4_RECVMSG),
	BPF_EAPROG_SEC("cgroup/recvmsg6",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
						BPF_CGROUP_UDP6_RECVMSG),
A
Andrey Ignatov 已提交
5000 5001
	BPF_EAPROG_SEC("cgroup/sysctl",		BPF_PROG_TYPE_CGROUP_SYSCTL,
						BPF_CGROUP_SYSCTL),
5002 5003 5004 5005
	BPF_EAPROG_SEC("cgroup/getsockopt",	BPF_PROG_TYPE_CGROUP_SOCKOPT,
						BPF_CGROUP_GETSOCKOPT),
	BPF_EAPROG_SEC("cgroup/setsockopt",	BPF_PROG_TYPE_CGROUP_SOCKOPT,
						BPF_CGROUP_SETSOCKOPT),
5006
};
5007

5008
#undef BPF_PROG_SEC_IMPL
5009
#undef BPF_PROG_SEC
5010 5011 5012
#undef BPF_APROG_SEC
#undef BPF_EAPROG_SEC
#undef BPF_APROG_COMPAT
5013

5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041
#define MAX_TYPE_NAME_SIZE 32

static char *libbpf_get_type_names(bool attach_type)
{
	int i, len = ARRAY_SIZE(section_names) * MAX_TYPE_NAME_SIZE;
	char *buf;

	buf = malloc(len);
	if (!buf)
		return NULL;

	buf[0] = '\0';
	/* Forge string buf with all available names */
	for (i = 0; i < ARRAY_SIZE(section_names); i++) {
		if (attach_type && !section_names[i].is_attachable)
			continue;

		if (strlen(buf) + strlen(section_names[i].sec) + 2 > len) {
			free(buf);
			return NULL;
		}
		strcat(buf, " ");
		strcat(buf, section_names[i].sec);
	}

	return buf;
}

5042 5043
int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
			     enum bpf_attach_type *expected_attach_type)
5044
{
5045
	char *type_names;
5046 5047
	int i;

5048 5049
	if (!name)
		return -EINVAL;
5050

5051 5052 5053 5054 5055 5056 5057
	for (i = 0; i < ARRAY_SIZE(section_names); i++) {
		if (strncmp(name, section_names[i].sec, section_names[i].len))
			continue;
		*prog_type = section_names[i].prog_type;
		*expected_attach_type = section_names[i].expected_attach_type;
		return 0;
	}
5058
	pr_warn("failed to guess program type based on ELF section name '%s'\n", name);
5059 5060 5061 5062 5063 5064
	type_names = libbpf_get_type_names(false);
	if (type_names != NULL) {
		pr_info("supported section(type) names are:%s\n", type_names);
		free(type_names);
	}

5065
	return -ESRCH;
5066
}
5067

5068
#define BTF_PREFIX "btf_trace_"
5069 5070
int libbpf_find_vmlinux_btf_id(const char *name,
			       enum bpf_attach_type attach_type)
5071 5072
{
	struct btf *btf = bpf_core_find_kernel_btf();
5073 5074 5075 5076 5077
	char raw_tp_btf[128] = BTF_PREFIX;
	char *dst = raw_tp_btf + sizeof(BTF_PREFIX) - 1;
	const char *btf_name;
	int err = -EINVAL;
	u32 kind;
5078 5079 5080 5081 5082 5083

	if (IS_ERR(btf)) {
		pr_warn("vmlinux BTF is not found\n");
		return -EINVAL;
	}

5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097
	if (attach_type == BPF_TRACE_RAW_TP) {
		/* prepend "btf_trace_" prefix per kernel convention */
		strncat(dst, name, sizeof(raw_tp_btf) - sizeof(BTF_PREFIX));
		btf_name = raw_tp_btf;
		kind = BTF_KIND_TYPEDEF;
	} else {
		btf_name = name;
		kind = BTF_KIND_FUNC;
	}
	err = btf__find_by_name_kind(btf, btf_name, kind);
	btf__free(btf);
	return err;
}

5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133
static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd)
{
	struct bpf_prog_info_linear *info_linear;
	struct bpf_prog_info *info;
	struct btf *btf = NULL;
	int err = -EINVAL;

	info_linear = bpf_program__get_prog_info_linear(attach_prog_fd, 0);
	if (IS_ERR_OR_NULL(info_linear)) {
		pr_warn("failed get_prog_info_linear for FD %d\n",
			attach_prog_fd);
		return -EINVAL;
	}
	info = &info_linear->info;
	if (!info->btf_id) {
		pr_warn("The target program doesn't have BTF\n");
		goto out;
	}
	if (btf__get_from_id(info->btf_id, &btf)) {
		pr_warn("Failed to get BTF of the program\n");
		goto out;
	}
	err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
	btf__free(btf);
	if (err <= 0) {
		pr_warn("%s is not found in prog's BTF\n", name);
		goto out;
	}
out:
	free(info_linear);
	return err;
}

static int libbpf_find_attach_btf_id(const char *name,
				     enum bpf_attach_type attach_type,
				     __u32 attach_prog_fd)
5134 5135 5136
{
	int i, err;

5137
	if (!name)
5138
		return -EINVAL;
5139 5140 5141 5142 5143 5144

	for (i = 0; i < ARRAY_SIZE(section_names); i++) {
		if (!section_names[i].is_attach_btf)
			continue;
		if (strncmp(name, section_names[i].sec, section_names[i].len))
			continue;
5145 5146 5147 5148 5149 5150
		if (attach_prog_fd)
			err = libbpf_find_prog_btf_id(name + section_names[i].len,
						      attach_prog_fd);
		else
			err = libbpf_find_vmlinux_btf_id(name + section_names[i].len,
							 attach_type);
5151 5152 5153
		if (err <= 0)
			pr_warn("%s is not found in vmlinux BTF\n", name);
		return err;
5154 5155
	}
	pr_warn("failed to identify btf_id based on ELF section name '%s'\n", name);
5156
	return -ESRCH;
5157 5158
}

5159 5160 5161
int libbpf_attach_type_by_name(const char *name,
			       enum bpf_attach_type *attach_type)
{
5162
	char *type_names;
5163 5164 5165 5166 5167 5168 5169 5170
	int i;

	if (!name)
		return -EINVAL;

	for (i = 0; i < ARRAY_SIZE(section_names); i++) {
		if (strncmp(name, section_names[i].sec, section_names[i].len))
			continue;
5171
		if (!section_names[i].is_attachable)
5172 5173 5174 5175
			return -EINVAL;
		*attach_type = section_names[i].attach_type;
		return 0;
	}
5176
	pr_warn("failed to guess attach type based on ELF section name '%s'\n", name);
5177 5178 5179 5180 5181 5182
	type_names = libbpf_get_type_names(true);
	if (type_names != NULL) {
		pr_info("attachable section(type) names are:%s\n", type_names);
		free(type_names);
	}

5183 5184 5185
	return -EINVAL;
}

A
Andrii Nakryiko 已提交
5186
int bpf_map__fd(const struct bpf_map *map)
5187
{
5188
	return map ? map->fd : -EINVAL;
5189 5190
}

A
Andrii Nakryiko 已提交
5191
const struct bpf_map_def *bpf_map__def(const struct bpf_map *map)
5192
{
5193
	return map ? &map->def : ERR_PTR(-EINVAL);
5194 5195
}

A
Andrii Nakryiko 已提交
5196
const char *bpf_map__name(const struct bpf_map *map)
5197
{
5198
	return map ? map->name : NULL;
5199 5200
}

5201
__u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
5202
{
5203
	return map ? map->btf_key_type_id : 0;
5204 5205
}

5206
__u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
5207
{
5208
	return map ? map->btf_value_type_id : 0;
5209 5210
}

5211 5212
int bpf_map__set_priv(struct bpf_map *map, void *priv,
		     bpf_map_clear_priv_t clear_priv)
5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226
{
	if (!map)
		return -EINVAL;

	if (map->priv) {
		if (map->clear_priv)
			map->clear_priv(map, map->priv);
	}

	map->priv = priv;
	map->clear_priv = clear_priv;
	return 0;
}

A
Andrii Nakryiko 已提交
5227
void *bpf_map__priv(const struct bpf_map *map)
5228
{
5229
	return map ? map->priv : ERR_PTR(-EINVAL);
5230 5231
}

A
Andrii Nakryiko 已提交
5232
bool bpf_map__is_offload_neutral(const struct bpf_map *map)
5233 5234 5235 5236
{
	return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
}

A
Andrii Nakryiko 已提交
5237
bool bpf_map__is_internal(const struct bpf_map *map)
5238 5239 5240 5241
{
	return map->libbpf_type != LIBBPF_MAP_UNSPEC;
}

5242 5243 5244 5245 5246
void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
{
	map->map_ifindex = ifindex;
}

5247 5248 5249
int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
{
	if (!bpf_map_type__is_map_in_map(map->def.type)) {
5250
		pr_warn("error: unsupported map type\n");
5251 5252 5253
		return -EINVAL;
	}
	if (map->inner_map_fd != -1) {
5254
		pr_warn("error: inner_map_fd already specified\n");
5255 5256 5257 5258 5259 5260
		return -EINVAL;
	}
	map->inner_map_fd = fd;
	return 0;
}

5261
static struct bpf_map *
A
Andrii Nakryiko 已提交
5262
__bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
5263
{
5264
	ssize_t idx;
5265 5266 5267 5268 5269 5270 5271 5272
	struct bpf_map *s, *e;

	if (!obj || !obj->maps)
		return NULL;

	s = obj->maps;
	e = obj->maps + obj->nr_maps;

5273
	if ((m < s) || (m >= e)) {
5274 5275
		pr_warn("error in %s: map handler doesn't belong to object\n",
			 __func__);
5276 5277 5278
		return NULL;
	}

5279 5280
	idx = (m - obj->maps) + i;
	if (idx >= obj->nr_maps || idx < 0)
5281 5282 5283
		return NULL;
	return &obj->maps[idx];
}
5284

5285
struct bpf_map *
A
Andrii Nakryiko 已提交
5286
bpf_map__next(const struct bpf_map *prev, const struct bpf_object *obj)
5287 5288 5289 5290 5291 5292 5293 5294
{
	if (prev == NULL)
		return obj->maps;

	return __bpf_map__iter(prev, obj, 1);
}

struct bpf_map *
A
Andrii Nakryiko 已提交
5295
bpf_map__prev(const struct bpf_map *next, const struct bpf_object *obj)
5296 5297 5298 5299 5300 5301 5302 5303 5304 5305
{
	if (next == NULL) {
		if (!obj->nr_maps)
			return NULL;
		return obj->maps + obj->nr_maps - 1;
	}

	return __bpf_map__iter(next, obj, -1);
}

5306
struct bpf_map *
A
Andrii Nakryiko 已提交
5307
bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name)
5308 5309 5310
{
	struct bpf_map *pos;

5311
	bpf_object__for_each_map(pos, obj) {
5312
		if (pos->name && !strcmp(pos->name, name))
5313 5314 5315 5316
			return pos;
	}
	return NULL;
}
5317

5318
int
A
Andrii Nakryiko 已提交
5319
bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name)
5320 5321 5322 5323
{
	return bpf_map__fd(bpf_object__find_map_by_name(obj, name));
}

5324 5325 5326
struct bpf_map *
bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
{
5327
	return ERR_PTR(-ENOTSUP);
5328
}
5329 5330 5331

long libbpf_get_error(const void *ptr)
{
5332
	return PTR_ERR_OR_ZERO(ptr);
5333
}
5334 5335 5336

int bpf_prog_load(const char *file, enum bpf_prog_type type,
		  struct bpf_object **pobj, int *prog_fd)
5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349
{
	struct bpf_prog_load_attr attr;

	memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
	attr.file = file;
	attr.prog_type = type;
	attr.expected_attach_type = 0;

	return bpf_prog_load_xattr(&attr, pobj, prog_fd);
}

int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
			struct bpf_object **pobj, int *prog_fd)
5350
{
5351
	struct bpf_object_open_attr open_attr = {};
5352
	struct bpf_program *prog, *first_prog = NULL;
5353
	struct bpf_object *obj;
5354
	struct bpf_map *map;
5355 5356
	int err;

5357 5358
	if (!attr)
		return -EINVAL;
5359 5360
	if (!attr->file)
		return -EINVAL;
5361

5362 5363 5364
	open_attr.file = attr->file;
	open_attr.prog_type = attr->prog_type;

5365
	obj = bpf_object__open_xattr(&open_attr);
5366
	if (IS_ERR_OR_NULL(obj))
5367 5368
		return -ENOENT;

5369
	bpf_object__for_each_program(prog, obj) {
5370
		enum bpf_attach_type attach_type = attr->expected_attach_type;
5371
		/*
5372 5373 5374
		 * to preserve backwards compatibility, bpf_prog_load treats
		 * attr->prog_type, if specified, as an override to whatever
		 * bpf_object__open guessed
5375
		 */
5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387
		if (attr->prog_type != BPF_PROG_TYPE_UNSPEC) {
			bpf_program__set_type(prog, attr->prog_type);
			bpf_program__set_expected_attach_type(prog,
							      attach_type);
		}
		if (bpf_program__get_type(prog) == BPF_PROG_TYPE_UNSPEC) {
			/*
			 * we haven't guessed from section name and user
			 * didn't provide a fallback type, too bad...
			 */
			bpf_object__close(obj);
			return -EINVAL;
5388
		}
5389

5390
		prog->prog_ifindex = attr->ifindex;
5391
		prog->log_level = attr->log_level;
5392
		prog->prog_flags = attr->prog_flags;
5393
		if (!first_prog)
5394 5395 5396
			first_prog = prog;
	}

5397
	bpf_object__for_each_map(map, obj) {
5398 5399
		if (!bpf_map__is_offload_neutral(map))
			map->map_ifindex = attr->ifindex;
5400 5401
	}

5402
	if (!first_prog) {
5403
		pr_warn("object file doesn't contain bpf program\n");
5404 5405
		bpf_object__close(obj);
		return -ENOENT;
5406 5407
	}

5408 5409 5410 5411 5412 5413 5414
	err = bpf_object__load(obj);
	if (err) {
		bpf_object__close(obj);
		return -EINVAL;
	}

	*pobj = obj;
5415
	*prog_fd = bpf_program__fd(first_prog);
5416 5417
	return 0;
}
5418

5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435
struct bpf_link {
	int (*destroy)(struct bpf_link *link);
};

int bpf_link__destroy(struct bpf_link *link)
{
	int err;

	if (!link)
		return 0;

	err = link->destroy(link);
	free(link);

	return err;
}

5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461
struct bpf_link_fd {
	struct bpf_link link; /* has to be at the top of struct */
	int fd; /* hook FD */
};

static int bpf_link__destroy_perf_event(struct bpf_link *link)
{
	struct bpf_link_fd *l = (void *)link;
	int err;

	err = ioctl(l->fd, PERF_EVENT_IOC_DISABLE, 0);
	if (err)
		err = -errno;

	close(l->fd);
	return err;
}

struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog,
						int pfd)
{
	char errmsg[STRERR_BUFSIZE];
	struct bpf_link_fd *link;
	int prog_fd, err;

	if (pfd < 0) {
5462 5463
		pr_warn("program '%s': invalid perf event FD %d\n",
			bpf_program__title(prog, false), pfd);
5464 5465 5466 5467
		return ERR_PTR(-EINVAL);
	}
	prog_fd = bpf_program__fd(prog);
	if (prog_fd < 0) {
5468 5469
		pr_warn("program '%s': can't attach BPF program w/o FD (did you load it?)\n",
			bpf_program__title(prog, false));
5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481
		return ERR_PTR(-EINVAL);
	}

	link = malloc(sizeof(*link));
	if (!link)
		return ERR_PTR(-ENOMEM);
	link->link.destroy = &bpf_link__destroy_perf_event;
	link->fd = pfd;

	if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) {
		err = -errno;
		free(link);
5482 5483
		pr_warn("program '%s': failed to attach to pfd %d: %s\n",
			bpf_program__title(prog, false), pfd,
5484 5485 5486 5487 5488 5489
			   libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
		return ERR_PTR(err);
	}
	if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
		err = -errno;
		free(link);
5490 5491
		pr_warn("program '%s': failed to enable pfd %d: %s\n",
			bpf_program__title(prog, false), pfd,
5492 5493 5494 5495 5496 5497
			   libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
		return ERR_PTR(err);
	}
	return (struct bpf_link *)link;
}

5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565
/*
 * this function is expected to parse integer in the range of [0, 2^31-1] from
 * given file using scanf format string fmt. If actual parsed value is
 * negative, the result might be indistinguishable from error
 */
static int parse_uint_from_file(const char *file, const char *fmt)
{
	char buf[STRERR_BUFSIZE];
	int err, ret;
	FILE *f;

	f = fopen(file, "r");
	if (!f) {
		err = -errno;
		pr_debug("failed to open '%s': %s\n", file,
			 libbpf_strerror_r(err, buf, sizeof(buf)));
		return err;
	}
	err = fscanf(f, fmt, &ret);
	if (err != 1) {
		err = err == EOF ? -EIO : -errno;
		pr_debug("failed to parse '%s': %s\n", file,
			libbpf_strerror_r(err, buf, sizeof(buf)));
		fclose(f);
		return err;
	}
	fclose(f);
	return ret;
}

static int determine_kprobe_perf_type(void)
{
	const char *file = "/sys/bus/event_source/devices/kprobe/type";

	return parse_uint_from_file(file, "%d\n");
}

static int determine_uprobe_perf_type(void)
{
	const char *file = "/sys/bus/event_source/devices/uprobe/type";

	return parse_uint_from_file(file, "%d\n");
}

static int determine_kprobe_retprobe_bit(void)
{
	const char *file = "/sys/bus/event_source/devices/kprobe/format/retprobe";

	return parse_uint_from_file(file, "config:%d\n");
}

static int determine_uprobe_retprobe_bit(void)
{
	const char *file = "/sys/bus/event_source/devices/uprobe/format/retprobe";

	return parse_uint_from_file(file, "config:%d\n");
}

static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
				 uint64_t offset, int pid)
{
	struct perf_event_attr attr = {};
	char errmsg[STRERR_BUFSIZE];
	int type, pfd, err;

	type = uprobe ? determine_uprobe_perf_type()
		      : determine_kprobe_perf_type();
	if (type < 0) {
5566 5567 5568
		pr_warn("failed to determine %s perf type: %s\n",
			uprobe ? "uprobe" : "kprobe",
			libbpf_strerror_r(type, errmsg, sizeof(errmsg)));
5569 5570 5571 5572 5573 5574 5575
		return type;
	}
	if (retprobe) {
		int bit = uprobe ? determine_uprobe_retprobe_bit()
				 : determine_kprobe_retprobe_bit();

		if (bit < 0) {
5576 5577 5578
			pr_warn("failed to determine %s retprobe bit: %s\n",
				uprobe ? "uprobe" : "kprobe",
				libbpf_strerror_r(bit, errmsg, sizeof(errmsg)));
5579 5580 5581 5582 5583 5584
			return bit;
		}
		attr.config |= 1 << bit;
	}
	attr.size = sizeof(attr);
	attr.type = type;
5585 5586
	attr.config1 = ptr_to_u64(name); /* kprobe_func or uprobe_path */
	attr.config2 = offset;		 /* kprobe_addr or probe_offset */
5587 5588 5589 5590 5591 5592 5593 5594

	/* pid filter is meaningful only for uprobes */
	pfd = syscall(__NR_perf_event_open, &attr,
		      pid < 0 ? -1 : pid /* pid */,
		      pid == -1 ? 0 : -1 /* cpu */,
		      -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
	if (pfd < 0) {
		err = -errno;
5595 5596 5597
		pr_warn("%s perf_event_open() failed: %s\n",
			uprobe ? "uprobe" : "kprobe",
			libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613
		return err;
	}
	return pfd;
}

struct bpf_link *bpf_program__attach_kprobe(struct bpf_program *prog,
					    bool retprobe,
					    const char *func_name)
{
	char errmsg[STRERR_BUFSIZE];
	struct bpf_link *link;
	int pfd, err;

	pfd = perf_event_open_probe(false /* uprobe */, retprobe, func_name,
				    0 /* offset */, -1 /* pid */);
	if (pfd < 0) {
5614 5615 5616 5617
		pr_warn("program '%s': failed to create %s '%s' perf event: %s\n",
			bpf_program__title(prog, false),
			retprobe ? "kretprobe" : "kprobe", func_name,
			libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
5618 5619 5620 5621 5622 5623
		return ERR_PTR(pfd);
	}
	link = bpf_program__attach_perf_event(prog, pfd);
	if (IS_ERR(link)) {
		close(pfd);
		err = PTR_ERR(link);
5624 5625 5626 5627
		pr_warn("program '%s': failed to attach to %s '%s': %s\n",
			bpf_program__title(prog, false),
			retprobe ? "kretprobe" : "kprobe", func_name,
			libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644
		return link;
	}
	return link;
}

struct bpf_link *bpf_program__attach_uprobe(struct bpf_program *prog,
					    bool retprobe, pid_t pid,
					    const char *binary_path,
					    size_t func_offset)
{
	char errmsg[STRERR_BUFSIZE];
	struct bpf_link *link;
	int pfd, err;

	pfd = perf_event_open_probe(true /* uprobe */, retprobe,
				    binary_path, func_offset, pid);
	if (pfd < 0) {
5645 5646 5647 5648 5649
		pr_warn("program '%s': failed to create %s '%s:0x%zx' perf event: %s\n",
			bpf_program__title(prog, false),
			retprobe ? "uretprobe" : "uprobe",
			binary_path, func_offset,
			libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
5650 5651 5652 5653 5654 5655
		return ERR_PTR(pfd);
	}
	link = bpf_program__attach_perf_event(prog, pfd);
	if (IS_ERR(link)) {
		close(pfd);
		err = PTR_ERR(link);
5656 5657 5658 5659 5660
		pr_warn("program '%s': failed to attach to %s '%s:0x%zx': %s\n",
			bpf_program__title(prog, false),
			retprobe ? "uretprobe" : "uprobe",
			binary_path, func_offset,
			libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
5661 5662 5663 5664 5665
		return link;
	}
	return link;
}

5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693
static int determine_tracepoint_id(const char *tp_category,
				   const char *tp_name)
{
	char file[PATH_MAX];
	int ret;

	ret = snprintf(file, sizeof(file),
		       "/sys/kernel/debug/tracing/events/%s/%s/id",
		       tp_category, tp_name);
	if (ret < 0)
		return -errno;
	if (ret >= sizeof(file)) {
		pr_debug("tracepoint %s/%s path is too long\n",
			 tp_category, tp_name);
		return -E2BIG;
	}
	return parse_uint_from_file(file, "%d\n");
}

static int perf_event_open_tracepoint(const char *tp_category,
				      const char *tp_name)
{
	struct perf_event_attr attr = {};
	char errmsg[STRERR_BUFSIZE];
	int tp_id, pfd, err;

	tp_id = determine_tracepoint_id(tp_category, tp_name);
	if (tp_id < 0) {
5694 5695 5696
		pr_warn("failed to determine tracepoint '%s/%s' perf event ID: %s\n",
			tp_category, tp_name,
			libbpf_strerror_r(tp_id, errmsg, sizeof(errmsg)));
5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707
		return tp_id;
	}

	attr.type = PERF_TYPE_TRACEPOINT;
	attr.size = sizeof(attr);
	attr.config = tp_id;

	pfd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu */,
		      -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
	if (pfd < 0) {
		err = -errno;
5708 5709 5710
		pr_warn("tracepoint '%s/%s' perf_event_open() failed: %s\n",
			tp_category, tp_name,
			libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725
		return err;
	}
	return pfd;
}

struct bpf_link *bpf_program__attach_tracepoint(struct bpf_program *prog,
						const char *tp_category,
						const char *tp_name)
{
	char errmsg[STRERR_BUFSIZE];
	struct bpf_link *link;
	int pfd, err;

	pfd = perf_event_open_tracepoint(tp_category, tp_name);
	if (pfd < 0) {
5726 5727 5728 5729
		pr_warn("program '%s': failed to create tracepoint '%s/%s' perf event: %s\n",
			bpf_program__title(prog, false),
			tp_category, tp_name,
			libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
5730 5731 5732 5733 5734 5735
		return ERR_PTR(pfd);
	}
	link = bpf_program__attach_perf_event(prog, pfd);
	if (IS_ERR(link)) {
		close(pfd);
		err = PTR_ERR(link);
5736 5737 5738 5739
		pr_warn("program '%s': failed to attach to tracepoint '%s/%s': %s\n",
			bpf_program__title(prog, false),
			tp_category, tp_name,
			libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
5740 5741 5742 5743 5744
		return link;
	}
	return link;
}

5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760
static int bpf_link__destroy_fd(struct bpf_link *link)
{
	struct bpf_link_fd *l = (void *)link;

	return close(l->fd);
}

struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
						    const char *tp_name)
{
	char errmsg[STRERR_BUFSIZE];
	struct bpf_link_fd *link;
	int prog_fd, pfd;

	prog_fd = bpf_program__fd(prog);
	if (prog_fd < 0) {
5761 5762
		pr_warn("program '%s': can't attach before loaded\n",
			bpf_program__title(prog, false));
5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774
		return ERR_PTR(-EINVAL);
	}

	link = malloc(sizeof(*link));
	if (!link)
		return ERR_PTR(-ENOMEM);
	link->link.destroy = &bpf_link__destroy_fd;

	pfd = bpf_raw_tracepoint_open(tp_name, prog_fd);
	if (pfd < 0) {
		pfd = -errno;
		free(link);
5775 5776 5777
		pr_warn("program '%s': failed to attach to raw tracepoint '%s': %s\n",
			bpf_program__title(prog, false), tp_name,
			libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
5778 5779 5780 5781 5782 5783
		return ERR_PTR(pfd);
	}
	link->fd = pfd;
	return (struct bpf_link *)link;
}

5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814
struct bpf_link *bpf_program__attach_trace(struct bpf_program *prog)
{
	char errmsg[STRERR_BUFSIZE];
	struct bpf_link_fd *link;
	int prog_fd, pfd;

	prog_fd = bpf_program__fd(prog);
	if (prog_fd < 0) {
		pr_warn("program '%s': can't attach before loaded\n",
			bpf_program__title(prog, false));
		return ERR_PTR(-EINVAL);
	}

	link = malloc(sizeof(*link));
	if (!link)
		return ERR_PTR(-ENOMEM);
	link->link.destroy = &bpf_link__destroy_fd;

	pfd = bpf_raw_tracepoint_open(NULL, prog_fd);
	if (pfd < 0) {
		pfd = -errno;
		free(link);
		pr_warn("program '%s': failed to attach to trace: %s\n",
			bpf_program__title(prog, false),
			libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
		return ERR_PTR(pfd);
	}
	link->fd = pfd;
	return (struct bpf_link *)link;
}

5815
enum bpf_perf_event_ret
5816 5817 5818
bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
			   void **copy_mem, size_t *copy_size,
			   bpf_perf_event_print_t fn, void *private_data)
5819
{
5820
	struct perf_event_mmap_page *header = mmap_mem;
5821
	__u64 data_head = ring_buffer_read_head(header);
5822
	__u64 data_tail = header->data_tail;
5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841
	void *base = ((__u8 *)header) + page_size;
	int ret = LIBBPF_PERF_EVENT_CONT;
	struct perf_event_header *ehdr;
	size_t ehdr_size;

	while (data_head != data_tail) {
		ehdr = base + (data_tail & (mmap_size - 1));
		ehdr_size = ehdr->size;

		if (((void *)ehdr) + ehdr_size > base + mmap_size) {
			void *copy_start = ehdr;
			size_t len_first = base + mmap_size - copy_start;
			size_t len_secnd = ehdr_size - len_first;

			if (*copy_size < ehdr_size) {
				free(*copy_mem);
				*copy_mem = malloc(ehdr_size);
				if (!*copy_mem) {
					*copy_size = 0;
5842 5843 5844
					ret = LIBBPF_PERF_EVENT_ERROR;
					break;
				}
5845
				*copy_size = ehdr_size;
5846 5847
			}

5848 5849 5850
			memcpy(*copy_mem, copy_start, len_first);
			memcpy(*copy_mem + len_first, base, len_secnd);
			ehdr = *copy_mem;
5851 5852
		}

5853 5854
		ret = fn(ehdr, private_data);
		data_tail += ehdr_size;
5855 5856 5857 5858
		if (ret != LIBBPF_PERF_EVENT_CONT)
			break;
	}

5859
	ring_buffer_write_tail(header, data_tail);
5860 5861
	return ret;
}
5862

A
Andrii Nakryiko 已提交
5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909
struct perf_buffer;

struct perf_buffer_params {
	struct perf_event_attr *attr;
	/* if event_cb is specified, it takes precendence */
	perf_buffer_event_fn event_cb;
	/* sample_cb and lost_cb are higher-level common-case callbacks */
	perf_buffer_sample_fn sample_cb;
	perf_buffer_lost_fn lost_cb;
	void *ctx;
	int cpu_cnt;
	int *cpus;
	int *map_keys;
};

struct perf_cpu_buf {
	struct perf_buffer *pb;
	void *base; /* mmap()'ed memory */
	void *buf; /* for reconstructing segmented data */
	size_t buf_size;
	int fd;
	int cpu;
	int map_key;
};

struct perf_buffer {
	perf_buffer_event_fn event_cb;
	perf_buffer_sample_fn sample_cb;
	perf_buffer_lost_fn lost_cb;
	void *ctx; /* passed into callbacks */

	size_t page_size;
	size_t mmap_size;
	struct perf_cpu_buf **cpu_bufs;
	struct epoll_event *events;
	int cpu_cnt;
	int epoll_fd; /* perf event FD */
	int map_fd; /* BPF_MAP_TYPE_PERF_EVENT_ARRAY BPF map FD */
};

static void perf_buffer__free_cpu_buf(struct perf_buffer *pb,
				      struct perf_cpu_buf *cpu_buf)
{
	if (!cpu_buf)
		return;
	if (cpu_buf->base &&
	    munmap(cpu_buf->base, pb->mmap_size + pb->page_size))
5910
		pr_warn("failed to munmap cpu_buf #%d\n", cpu_buf->cpu);
A
Andrii Nakryiko 已提交
5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959
	if (cpu_buf->fd >= 0) {
		ioctl(cpu_buf->fd, PERF_EVENT_IOC_DISABLE, 0);
		close(cpu_buf->fd);
	}
	free(cpu_buf->buf);
	free(cpu_buf);
}

void perf_buffer__free(struct perf_buffer *pb)
{
	int i;

	if (!pb)
		return;
	if (pb->cpu_bufs) {
		for (i = 0; i < pb->cpu_cnt && pb->cpu_bufs[i]; i++) {
			struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];

			bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key);
			perf_buffer__free_cpu_buf(pb, cpu_buf);
		}
		free(pb->cpu_bufs);
	}
	if (pb->epoll_fd >= 0)
		close(pb->epoll_fd);
	free(pb->events);
	free(pb);
}

static struct perf_cpu_buf *
perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr,
			  int cpu, int map_key)
{
	struct perf_cpu_buf *cpu_buf;
	char msg[STRERR_BUFSIZE];
	int err;

	cpu_buf = calloc(1, sizeof(*cpu_buf));
	if (!cpu_buf)
		return ERR_PTR(-ENOMEM);

	cpu_buf->pb = pb;
	cpu_buf->cpu = cpu;
	cpu_buf->map_key = map_key;

	cpu_buf->fd = syscall(__NR_perf_event_open, attr, -1 /* pid */, cpu,
			      -1, PERF_FLAG_FD_CLOEXEC);
	if (cpu_buf->fd < 0) {
		err = -errno;
5960 5961
		pr_warn("failed to open perf buffer event on cpu #%d: %s\n",
			cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
A
Andrii Nakryiko 已提交
5962 5963 5964 5965 5966 5967 5968 5969 5970
		goto error;
	}

	cpu_buf->base = mmap(NULL, pb->mmap_size + pb->page_size,
			     PROT_READ | PROT_WRITE, MAP_SHARED,
			     cpu_buf->fd, 0);
	if (cpu_buf->base == MAP_FAILED) {
		cpu_buf->base = NULL;
		err = -errno;
5971 5972
		pr_warn("failed to mmap perf buffer on cpu #%d: %s\n",
			cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
A
Andrii Nakryiko 已提交
5973 5974 5975 5976 5977
		goto error;
	}

	if (ioctl(cpu_buf->fd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
		err = -errno;
5978 5979
		pr_warn("failed to enable perf buffer event on cpu #%d: %s\n",
			cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
A
Andrii Nakryiko 已提交
5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995 5996
		goto error;
	}

	return cpu_buf;

error:
	perf_buffer__free_cpu_buf(pb, cpu_buf);
	return (struct perf_cpu_buf *)ERR_PTR(err);
}

static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
					      struct perf_buffer_params *p);

struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt,
				     const struct perf_buffer_opts *opts)
{
	struct perf_buffer_params p = {};
5997 5998 5999 6000 6001 6002 6003
	struct perf_event_attr attr = { 0, };

	attr.config = PERF_COUNT_SW_BPF_OUTPUT,
	attr.type = PERF_TYPE_SOFTWARE;
	attr.sample_type = PERF_SAMPLE_RAW;
	attr.sample_period = 1;
	attr.wakeup_events = 1;
A
Andrii Nakryiko 已提交
6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038

	p.attr = &attr;
	p.sample_cb = opts ? opts->sample_cb : NULL;
	p.lost_cb = opts ? opts->lost_cb : NULL;
	p.ctx = opts ? opts->ctx : NULL;

	return __perf_buffer__new(map_fd, page_cnt, &p);
}

struct perf_buffer *
perf_buffer__new_raw(int map_fd, size_t page_cnt,
		     const struct perf_buffer_raw_opts *opts)
{
	struct perf_buffer_params p = {};

	p.attr = opts->attr;
	p.event_cb = opts->event_cb;
	p.ctx = opts->ctx;
	p.cpu_cnt = opts->cpu_cnt;
	p.cpus = opts->cpus;
	p.map_keys = opts->map_keys;

	return __perf_buffer__new(map_fd, page_cnt, &p);
}

static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
					      struct perf_buffer_params *p)
{
	struct bpf_map_info map = {};
	char msg[STRERR_BUFSIZE];
	struct perf_buffer *pb;
	__u32 map_info_len;
	int err, i;

	if (page_cnt & (page_cnt - 1)) {
6039 6040
		pr_warn("page count should be power of two, but is %zu\n",
			page_cnt);
A
Andrii Nakryiko 已提交
6041 6042 6043 6044 6045 6046 6047
		return ERR_PTR(-EINVAL);
	}

	map_info_len = sizeof(map);
	err = bpf_obj_get_info_by_fd(map_fd, &map, &map_info_len);
	if (err) {
		err = -errno;
6048 6049
		pr_warn("failed to get map info for map FD %d: %s\n",
			map_fd, libbpf_strerror_r(err, msg, sizeof(msg)));
A
Andrii Nakryiko 已提交
6050 6051 6052 6053
		return ERR_PTR(err);
	}

	if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
6054 6055
		pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
			map.name);
A
Andrii Nakryiko 已提交
6056 6057 6058 6059 6060 6061 6062 6063 6064 6065 6066 6067 6068 6069 6070 6071 6072 6073 6074
		return ERR_PTR(-EINVAL);
	}

	pb = calloc(1, sizeof(*pb));
	if (!pb)
		return ERR_PTR(-ENOMEM);

	pb->event_cb = p->event_cb;
	pb->sample_cb = p->sample_cb;
	pb->lost_cb = p->lost_cb;
	pb->ctx = p->ctx;

	pb->page_size = getpagesize();
	pb->mmap_size = pb->page_size * page_cnt;
	pb->map_fd = map_fd;

	pb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
	if (pb->epoll_fd < 0) {
		err = -errno;
6075 6076
		pr_warn("failed to create epoll instance: %s\n",
			libbpf_strerror_r(err, msg, sizeof(msg)));
A
Andrii Nakryiko 已提交
6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090 6091 6092 6093 6094
		goto error;
	}

	if (p->cpu_cnt > 0) {
		pb->cpu_cnt = p->cpu_cnt;
	} else {
		pb->cpu_cnt = libbpf_num_possible_cpus();
		if (pb->cpu_cnt < 0) {
			err = pb->cpu_cnt;
			goto error;
		}
		if (map.max_entries < pb->cpu_cnt)
			pb->cpu_cnt = map.max_entries;
	}

	pb->events = calloc(pb->cpu_cnt, sizeof(*pb->events));
	if (!pb->events) {
		err = -ENOMEM;
6095
		pr_warn("failed to allocate events: out of memory\n");
A
Andrii Nakryiko 已提交
6096 6097 6098 6099 6100
		goto error;
	}
	pb->cpu_bufs = calloc(pb->cpu_cnt, sizeof(*pb->cpu_bufs));
	if (!pb->cpu_bufs) {
		err = -ENOMEM;
6101
		pr_warn("failed to allocate buffers: out of memory\n");
A
Andrii Nakryiko 已提交
6102 6103 6104 6105 6106 6107 6108 6109 6110 6111 6112 6113 6114 6115 6116 6117 6118 6119 6120 6121 6122 6123
		goto error;
	}

	for (i = 0; i < pb->cpu_cnt; i++) {
		struct perf_cpu_buf *cpu_buf;
		int cpu, map_key;

		cpu = p->cpu_cnt > 0 ? p->cpus[i] : i;
		map_key = p->cpu_cnt > 0 ? p->map_keys[i] : i;

		cpu_buf = perf_buffer__open_cpu_buf(pb, p->attr, cpu, map_key);
		if (IS_ERR(cpu_buf)) {
			err = PTR_ERR(cpu_buf);
			goto error;
		}

		pb->cpu_bufs[i] = cpu_buf;

		err = bpf_map_update_elem(pb->map_fd, &map_key,
					  &cpu_buf->fd, 0);
		if (err) {
			err = -errno;
6124 6125 6126
			pr_warn("failed to set cpu #%d, key %d -> perf FD %d: %s\n",
				cpu, map_key, cpu_buf->fd,
				libbpf_strerror_r(err, msg, sizeof(msg)));
A
Andrii Nakryiko 已提交
6127 6128 6129 6130 6131 6132 6133 6134
			goto error;
		}

		pb->events[i].events = EPOLLIN;
		pb->events[i].data.ptr = cpu_buf;
		if (epoll_ctl(pb->epoll_fd, EPOLL_CTL_ADD, cpu_buf->fd,
			      &pb->events[i]) < 0) {
			err = -errno;
6135 6136 6137
			pr_warn("failed to epoll_ctl cpu #%d perf FD %d: %s\n",
				cpu, cpu_buf->fd,
				libbpf_strerror_r(err, msg, sizeof(msg)));
A
Andrii Nakryiko 已提交
6138 6139 6140 6141 6142 6143 6144 6145 6146 6147 6148 6149 6150 6151 6152 6153 6154 6155 6156 6157 6158 6159 6160 6161 6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180 6181 6182 6183 6184 6185 6186 6187 6188 6189
			goto error;
		}
	}

	return pb;

error:
	if (pb)
		perf_buffer__free(pb);
	return ERR_PTR(err);
}

struct perf_sample_raw {
	struct perf_event_header header;
	uint32_t size;
	char data[0];
};

struct perf_sample_lost {
	struct perf_event_header header;
	uint64_t id;
	uint64_t lost;
	uint64_t sample_id;
};

static enum bpf_perf_event_ret
perf_buffer__process_record(struct perf_event_header *e, void *ctx)
{
	struct perf_cpu_buf *cpu_buf = ctx;
	struct perf_buffer *pb = cpu_buf->pb;
	void *data = e;

	/* user wants full control over parsing perf event */
	if (pb->event_cb)
		return pb->event_cb(pb->ctx, cpu_buf->cpu, e);

	switch (e->type) {
	case PERF_RECORD_SAMPLE: {
		struct perf_sample_raw *s = data;

		if (pb->sample_cb)
			pb->sample_cb(pb->ctx, cpu_buf->cpu, s->data, s->size);
		break;
	}
	case PERF_RECORD_LOST: {
		struct perf_sample_lost *s = data;

		if (pb->lost_cb)
			pb->lost_cb(pb->ctx, cpu_buf->cpu, s->lost);
		break;
	}
	default:
6190
		pr_warn("unknown perf sample type %d\n", e->type);
A
Andrii Nakryiko 已提交
6191 6192 6193 6194 6195 6196 6197 6198 6199 6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219
		return LIBBPF_PERF_EVENT_ERROR;
	}
	return LIBBPF_PERF_EVENT_CONT;
}

static int perf_buffer__process_records(struct perf_buffer *pb,
					struct perf_cpu_buf *cpu_buf)
{
	enum bpf_perf_event_ret ret;

	ret = bpf_perf_event_read_simple(cpu_buf->base, pb->mmap_size,
					 pb->page_size, &cpu_buf->buf,
					 &cpu_buf->buf_size,
					 perf_buffer__process_record, cpu_buf);
	if (ret != LIBBPF_PERF_EVENT_CONT)
		return ret;
	return 0;
}

int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms)
{
	int i, cnt, err;

	cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms);
	for (i = 0; i < cnt; i++) {
		struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr;

		err = perf_buffer__process_records(pb, cpu_buf);
		if (err) {
6220
			pr_warn("error while processing records: %d\n", err);
A
Andrii Nakryiko 已提交
6221 6222 6223 6224 6225 6226
			return err;
		}
	}
	return cnt < 0 ? -errno : cnt;
}

6227 6228 6229 6230 6231 6232 6233 6234 6235 6236 6237 6238 6239 6240 6241 6242 6243 6244 6245 6246 6247 6248 6249 6250 6251 6252 6253 6254 6255 6256 6257 6258 6259 6260 6261 6262 6263 6264 6265 6266 6267 6268 6269 6270 6271 6272 6273 6274 6275 6276 6277 6278 6279 6280 6281 6282 6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317 6318 6319 6320 6321 6322 6323 6324 6325 6326 6327 6328 6329 6330 6331 6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346 6347 6348 6349 6350 6351 6352 6353 6354 6355 6356 6357 6358 6359 6360 6361 6362 6363 6364 6365 6366 6367 6368 6369 6370 6371 6372 6373 6374 6375 6376 6377 6378 6379 6380 6381 6382 6383 6384 6385 6386 6387 6388 6389 6390 6391 6392 6393 6394 6395 6396 6397 6398 6399 6400 6401 6402 6403 6404 6405 6406 6407 6408 6409 6410 6411 6412 6413 6414 6415 6416
struct bpf_prog_info_array_desc {
	int	array_offset;	/* e.g. offset of jited_prog_insns */
	int	count_offset;	/* e.g. offset of jited_prog_len */
	int	size_offset;	/* > 0: offset of rec size,
				 * < 0: fix size of -size_offset
				 */
};

static struct bpf_prog_info_array_desc bpf_prog_info_array_desc[] = {
	[BPF_PROG_INFO_JITED_INSNS] = {
		offsetof(struct bpf_prog_info, jited_prog_insns),
		offsetof(struct bpf_prog_info, jited_prog_len),
		-1,
	},
	[BPF_PROG_INFO_XLATED_INSNS] = {
		offsetof(struct bpf_prog_info, xlated_prog_insns),
		offsetof(struct bpf_prog_info, xlated_prog_len),
		-1,
	},
	[BPF_PROG_INFO_MAP_IDS] = {
		offsetof(struct bpf_prog_info, map_ids),
		offsetof(struct bpf_prog_info, nr_map_ids),
		-(int)sizeof(__u32),
	},
	[BPF_PROG_INFO_JITED_KSYMS] = {
		offsetof(struct bpf_prog_info, jited_ksyms),
		offsetof(struct bpf_prog_info, nr_jited_ksyms),
		-(int)sizeof(__u64),
	},
	[BPF_PROG_INFO_JITED_FUNC_LENS] = {
		offsetof(struct bpf_prog_info, jited_func_lens),
		offsetof(struct bpf_prog_info, nr_jited_func_lens),
		-(int)sizeof(__u32),
	},
	[BPF_PROG_INFO_FUNC_INFO] = {
		offsetof(struct bpf_prog_info, func_info),
		offsetof(struct bpf_prog_info, nr_func_info),
		offsetof(struct bpf_prog_info, func_info_rec_size),
	},
	[BPF_PROG_INFO_LINE_INFO] = {
		offsetof(struct bpf_prog_info, line_info),
		offsetof(struct bpf_prog_info, nr_line_info),
		offsetof(struct bpf_prog_info, line_info_rec_size),
	},
	[BPF_PROG_INFO_JITED_LINE_INFO] = {
		offsetof(struct bpf_prog_info, jited_line_info),
		offsetof(struct bpf_prog_info, nr_jited_line_info),
		offsetof(struct bpf_prog_info, jited_line_info_rec_size),
	},
	[BPF_PROG_INFO_PROG_TAGS] = {
		offsetof(struct bpf_prog_info, prog_tags),
		offsetof(struct bpf_prog_info, nr_prog_tags),
		-(int)sizeof(__u8) * BPF_TAG_SIZE,
	},

};

static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info, int offset)
{
	__u32 *array = (__u32 *)info;

	if (offset >= 0)
		return array[offset / sizeof(__u32)];
	return -(int)offset;
}

static __u64 bpf_prog_info_read_offset_u64(struct bpf_prog_info *info, int offset)
{
	__u64 *array = (__u64 *)info;

	if (offset >= 0)
		return array[offset / sizeof(__u64)];
	return -(int)offset;
}

static void bpf_prog_info_set_offset_u32(struct bpf_prog_info *info, int offset,
					 __u32 val)
{
	__u32 *array = (__u32 *)info;

	if (offset >= 0)
		array[offset / sizeof(__u32)] = val;
}

static void bpf_prog_info_set_offset_u64(struct bpf_prog_info *info, int offset,
					 __u64 val)
{
	__u64 *array = (__u64 *)info;

	if (offset >= 0)
		array[offset / sizeof(__u64)] = val;
}

struct bpf_prog_info_linear *
bpf_program__get_prog_info_linear(int fd, __u64 arrays)
{
	struct bpf_prog_info_linear *info_linear;
	struct bpf_prog_info info = {};
	__u32 info_len = sizeof(info);
	__u32 data_len = 0;
	int i, err;
	void *ptr;

	if (arrays >> BPF_PROG_INFO_LAST_ARRAY)
		return ERR_PTR(-EINVAL);

	/* step 1: get array dimensions */
	err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
	if (err) {
		pr_debug("can't get prog info: %s", strerror(errno));
		return ERR_PTR(-EFAULT);
	}

	/* step 2: calculate total size of all arrays */
	for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
		bool include_array = (arrays & (1UL << i)) > 0;
		struct bpf_prog_info_array_desc *desc;
		__u32 count, size;

		desc = bpf_prog_info_array_desc + i;

		/* kernel is too old to support this field */
		if (info_len < desc->array_offset + sizeof(__u32) ||
		    info_len < desc->count_offset + sizeof(__u32) ||
		    (desc->size_offset > 0 && info_len < desc->size_offset))
			include_array = false;

		if (!include_array) {
			arrays &= ~(1UL << i);	/* clear the bit */
			continue;
		}

		count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
		size  = bpf_prog_info_read_offset_u32(&info, desc->size_offset);

		data_len += count * size;
	}

	/* step 3: allocate continuous memory */
	data_len = roundup(data_len, sizeof(__u64));
	info_linear = malloc(sizeof(struct bpf_prog_info_linear) + data_len);
	if (!info_linear)
		return ERR_PTR(-ENOMEM);

	/* step 4: fill data to info_linear->info */
	info_linear->arrays = arrays;
	memset(&info_linear->info, 0, sizeof(info));
	ptr = info_linear->data;

	for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
		struct bpf_prog_info_array_desc *desc;
		__u32 count, size;

		if ((arrays & (1UL << i)) == 0)
			continue;

		desc  = bpf_prog_info_array_desc + i;
		count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
		size  = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
		bpf_prog_info_set_offset_u32(&info_linear->info,
					     desc->count_offset, count);
		bpf_prog_info_set_offset_u32(&info_linear->info,
					     desc->size_offset, size);
		bpf_prog_info_set_offset_u64(&info_linear->info,
					     desc->array_offset,
					     ptr_to_u64(ptr));
		ptr += count * size;
	}

	/* step 5: call syscall again to get required arrays */
	err = bpf_obj_get_info_by_fd(fd, &info_linear->info, &info_len);
	if (err) {
		pr_debug("can't get prog info: %s", strerror(errno));
		free(info_linear);
		return ERR_PTR(-EFAULT);
	}

	/* step 6: verify the data */
	for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
		struct bpf_prog_info_array_desc *desc;
		__u32 v1, v2;

		if ((arrays & (1UL << i)) == 0)
			continue;

		desc = bpf_prog_info_array_desc + i;
		v1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
		v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
						   desc->count_offset);
		if (v1 != v2)
6417
			pr_warn("%s: mismatch in element count\n", __func__);
6418 6419 6420 6421 6422

		v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
		v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
						   desc->size_offset);
		if (v1 != v2)
6423
			pr_warn("%s: mismatch in rec size\n", __func__);
6424 6425 6426 6427 6428 6429 6430 6431 6432 6433 6434 6435 6436 6437 6438 6439 6440 6441 6442 6443 6444 6445 6446 6447 6448 6449 6450 6451 6452 6453 6454 6455 6456 6457 6458 6459 6460 6461 6462 6463 6464 6465 6466 6467 6468 6469 6470 6471
	}

	/* step 7: update info_len and data_len */
	info_linear->info_len = sizeof(struct bpf_prog_info);
	info_linear->data_len = data_len;

	return info_linear;
}

void bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear)
{
	int i;

	for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
		struct bpf_prog_info_array_desc *desc;
		__u64 addr, offs;

		if ((info_linear->arrays & (1UL << i)) == 0)
			continue;

		desc = bpf_prog_info_array_desc + i;
		addr = bpf_prog_info_read_offset_u64(&info_linear->info,
						     desc->array_offset);
		offs = addr - ptr_to_u64(info_linear->data);
		bpf_prog_info_set_offset_u64(&info_linear->info,
					     desc->array_offset, offs);
	}
}

void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear)
{
	int i;

	for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
		struct bpf_prog_info_array_desc *desc;
		__u64 addr, offs;

		if ((info_linear->arrays & (1UL << i)) == 0)
			continue;

		desc = bpf_prog_info_array_desc + i;
		offs = bpf_prog_info_read_offset_u64(&info_linear->info,
						     desc->array_offset);
		addr = offs + ptr_to_u64(info_linear->data);
		bpf_prog_info_set_offset_u64(&info_linear->info,
					     desc->array_offset, addr);
	}
}
6472 6473 6474 6475 6476 6477

int libbpf_num_possible_cpus(void)
{
	static const char *fcpu = "/sys/devices/system/cpu/possible";
	int len = 0, n = 0, il = 0, ir = 0;
	unsigned int start = 0, end = 0;
6478
	int tmp_cpus = 0;
6479 6480 6481 6482 6483
	static int cpus;
	char buf[128];
	int error = 0;
	int fd = -1;

6484 6485 6486
	tmp_cpus = READ_ONCE(cpus);
	if (tmp_cpus > 0)
		return tmp_cpus;
6487 6488 6489 6490

	fd = open(fcpu, O_RDONLY);
	if (fd < 0) {
		error = errno;
6491
		pr_warn("Failed to open file %s: %s\n", fcpu, strerror(error));
6492 6493 6494 6495 6496 6497
		return -error;
	}
	len = read(fd, buf, sizeof(buf));
	close(fd);
	if (len <= 0) {
		error = len ? errno : EINVAL;
6498 6499
		pr_warn("Failed to read # of possible cpus from %s: %s\n",
			fcpu, strerror(error));
6500 6501 6502
		return -error;
	}
	if (len == sizeof(buf)) {
6503
		pr_warn("File %s size overflow\n", fcpu);
6504 6505 6506 6507
		return -EOVERFLOW;
	}
	buf[len] = '\0';

6508
	for (ir = 0, tmp_cpus = 0; ir <= len; ir++) {
6509 6510 6511 6512 6513
		/* Each sub string separated by ',' has format \d+-\d+ or \d+ */
		if (buf[ir] == ',' || buf[ir] == '\0') {
			buf[ir] = '\0';
			n = sscanf(&buf[il], "%u-%u", &start, &end);
			if (n <= 0) {
6514 6515
				pr_warn("Failed to get # CPUs from %s\n",
					&buf[il]);
6516 6517 6518 6519
				return -EINVAL;
			} else if (n == 1) {
				end = start;
			}
6520
			tmp_cpus += end - start + 1;
6521 6522 6523
			il = ir + 1;
		}
	}
6524
	if (tmp_cpus <= 0) {
6525
		pr_warn("Invalid #CPUs %d from %s\n", tmp_cpus, fcpu);
6526 6527
		return -EINVAL;
	}
6528 6529 6530

	WRITE_ONCE(cpus, tmp_cpus);
	return tmp_cpus;
6531
}