bpf.h 74.3 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0-only */
2 3 4 5 6 7
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
 */
#ifndef _LINUX_BPF_H
#define _LINUX_BPF_H 1

#include <uapi/linux/bpf.h>
8

9
#include <linux/workqueue.h>
10
#include <linux/file.h>
11
#include <linux/percpu.h>
Z
Zi Shen Lim 已提交
12
#include <linux/err.h>
13
#include <linux/rbtree_latch.h>
14
#include <linux/numa.h>
15
#include <linux/mm_types.h>
16
#include <linux/wait.h>
A
Alexei Starovoitov 已提交
17 18
#include <linux/refcount.h>
#include <linux/mutex.h>
19
#include <linux/module.h>
J
Jiri Olsa 已提交
20
#include <linux/kallsyms.h>
A
Alexei Starovoitov 已提交
21
#include <linux/capability.h>
22 23
#include <linux/sched/mm.h>
#include <linux/slab.h>
A
Alexei Starovoitov 已提交
24
#include <linux/percpu-refcount.h>
25
#include <linux/bpfptr.h>
26
#include <linux/btf.h>
27

28
struct bpf_verifier_env;
29
struct bpf_verifier_log;
30
struct perf_event;
31
struct bpf_prog;
32
struct bpf_prog_aux;
33
struct bpf_map;
34
struct sock;
35
struct seq_file;
36
struct btf;
37
struct btf_type;
38
struct exception_table_entry;
39
struct seq_operations;
40
struct bpf_iter_aux_info;
K
KP Singh 已提交
41 42
struct bpf_local_storage;
struct bpf_local_storage_map;
43
struct kobject;
44
struct mem_cgroup;
45
struct module;
46
struct bpf_func_state;
47

48 49
extern struct idr btf_idr;
extern spinlock_t btf_idr_lock;
50
extern struct kobject *btf_kobj;
51

52
typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64);
53 54
typedef int (*bpf_iter_init_seq_priv_t)(void *private_data,
					struct bpf_iter_aux_info *aux);
55 56 57 58 59 60 61 62
typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data);
struct bpf_iter_seq_info {
	const struct seq_operations *seq_ops;
	bpf_iter_init_seq_priv_t init_seq_private;
	bpf_iter_fini_seq_priv_t fini_seq_private;
	u32 seq_priv_size;
};

63
/* map is generic key/value storage optionally accessible by eBPF programs */
64 65
struct bpf_map_ops {
	/* funcs callable from userspace (via syscall) */
66
	int (*map_alloc_check)(union bpf_attr *attr);
67
	struct bpf_map *(*map_alloc)(union bpf_attr *attr);
68 69
	void (*map_release)(struct bpf_map *map, struct file *map_file);
	void (*map_free)(struct bpf_map *map);
70
	int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
71
	void (*map_release_uref)(struct bpf_map *map);
72
	void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key);
73 74
	int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr,
				union bpf_attr __user *uattr);
75 76
	int (*map_lookup_and_delete_elem)(struct bpf_map *map, void *key,
					  void *value, u64 flags);
77 78 79
	int (*map_lookup_and_delete_batch)(struct bpf_map *map,
					   const union bpf_attr *attr,
					   union bpf_attr __user *uattr);
80 81 82 83
	int (*map_update_batch)(struct bpf_map *map, const union bpf_attr *attr,
				union bpf_attr __user *uattr);
	int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr,
				union bpf_attr __user *uattr);
84 85 86

	/* funcs callable from userspace and from eBPF programs */
	void *(*map_lookup_elem)(struct bpf_map *map, void *key);
87
	int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
88
	int (*map_delete_elem)(struct bpf_map *map, void *key);
M
Mauricio Vasquez B 已提交
89 90 91
	int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags);
	int (*map_pop_elem)(struct bpf_map *map, void *value);
	int (*map_peek_elem)(struct bpf_map *map, void *value);
92
	void *(*map_lookup_percpu_elem)(struct bpf_map *map, void *key, u32 cpu);
93 94

	/* funcs called by prog_array and perf_event_array map */
95 96 97
	void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
				int fd);
	void (*map_fd_put_ptr)(void *ptr);
98
	int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
99
	u32 (*map_fd_sys_lookup_elem)(void *ptr);
100 101
	void (*map_seq_show_elem)(struct bpf_map *map, void *key,
				  struct seq_file *m);
102
	int (*map_check_btf)(const struct bpf_map *map,
103
			     const struct btf *btf,
104 105
			     const struct btf_type *key_type,
			     const struct btf_type *value_type);
106

107 108 109 110 111 112
	/* Prog poke tracking helpers. */
	int (*map_poke_track)(struct bpf_map *map, struct bpf_prog_aux *aux);
	void (*map_poke_untrack)(struct bpf_map *map, struct bpf_prog_aux *aux);
	void (*map_poke_run)(struct bpf_map *map, u32 key, struct bpf_prog *old,
			     struct bpf_prog *new);

113 114 115 116 117
	/* Direct value access helpers. */
	int (*map_direct_value_addr)(const struct bpf_map *map,
				     u64 *imm, u32 off);
	int (*map_direct_value_meta)(const struct bpf_map *map,
				     u64 imm, u32 *off);
118
	int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma);
119 120
	__poll_t (*map_poll)(struct bpf_map *map, struct file *filp,
			     struct poll_table_struct *pts);
121

K
KP Singh 已提交
122 123 124 125 126 127
	/* Functions called by bpf_local_storage maps */
	int (*map_local_storage_charge)(struct bpf_local_storage_map *smap,
					void *owner, u32 size);
	void (*map_local_storage_uncharge)(struct bpf_local_storage_map *smap,
					   void *owner, u32 size);
	struct bpf_local_storage __rcu ** (*map_owner_storage_ptr)(void *owner);
128

129 130 131
	/* Misc helpers.*/
	int (*map_redirect)(struct bpf_map *map, u32 ifindex, u64 flags);

132 133 134 135 136 137 138 139 140 141 142 143
	/* map_meta_equal must be implemented for maps that can be
	 * used as an inner map.  It is a runtime check to ensure
	 * an inner map can be inserted to an outer map.
	 *
	 * Some properties of the inner map has been used during the
	 * verification time.  When inserting an inner map at the runtime,
	 * map_meta_equal has to ensure the inserting map has the same
	 * properties that the verifier has used earlier.
	 */
	bool (*map_meta_equal)(const struct bpf_map *meta0,
			       const struct bpf_map *meta1);

144 145 146 147

	int (*map_set_for_each_callback_args)(struct bpf_verifier_env *env,
					      struct bpf_func_state *caller,
					      struct bpf_func_state *callee);
148 149
	int (*map_for_each_callback)(struct bpf_map *map,
				     bpf_callback_t callback_fn,
150 151
				     void *callback_ctx, u64 flags);

152
	/* BTF id of struct allocated by map_alloc */
153
	int *map_btf_id;
154 155 156

	/* bpf_iter info used to open a seq_file */
	const struct bpf_iter_seq_info *iter_seq_info;
157 158
};

159 160 161
enum {
	/* Support at most 8 pointers in a BPF map value */
	BPF_MAP_VALUE_OFF_MAX = 8,
162 163 164
	BPF_MAP_OFF_ARR_MAX   = BPF_MAP_VALUE_OFF_MAX +
				1 + /* for bpf_spin_lock */
				1,  /* for bpf_timer */
165 166
};

167 168 169 170 171
enum bpf_kptr_type {
	BPF_KPTR_UNREF,
	BPF_KPTR_REF,
};

172 173
struct bpf_map_value_off_desc {
	u32 offset;
174
	enum bpf_kptr_type type;
175 176
	struct {
		struct btf *btf;
177 178
		struct module *module;
		btf_dtor_kfunc_t dtor;
179 180 181 182 183 184 185 186 187
		u32 btf_id;
	} kptr;
};

struct bpf_map_value_off {
	u32 nr_off;
	struct bpf_map_value_off_desc off[];
};

188 189 190 191 192 193
struct bpf_map_off_arr {
	u32 cnt;
	u32 field_off[BPF_MAP_OFF_ARR_MAX];
	u8 field_sz[BPF_MAP_OFF_ARR_MAX];
};

194
struct bpf_map {
195
	/* The first two cachelines with read-mostly members of which some
196 197 198 199 200 201 202
	 * are also accessed in fast-path (e.g. ops, max_entries).
	 */
	const struct bpf_map_ops *ops ____cacheline_aligned;
	struct bpf_map *inner_map_meta;
#ifdef CONFIG_SECURITY
	void *security;
#endif
203 204 205 206
	enum bpf_map_type map_type;
	u32 key_size;
	u32 value_size;
	u32 max_entries;
207
	u64 map_extra; /* any per-map-type extra fields */
208
	u32 map_flags;
209
	int spin_lock_off; /* >=0 valid offset, <0 error */
210
	struct bpf_map_value_off *kptr_off_tab;
A
Alexei Starovoitov 已提交
211
	int timer_off; /* >=0 valid offset, <0 error */
M
Martin KaFai Lau 已提交
212
	u32 id;
213
	int numa_node;
214 215
	u32 btf_key_type_id;
	u32 btf_value_type_id;
216
	u32 btf_vmlinux_value_type_id;
217
	struct btf *btf;
218 219 220
#ifdef CONFIG_MEMCG_KMEM
	struct mem_cgroup *memcg;
#endif
221
	char name[BPF_OBJ_NAME_LEN];
222
	struct bpf_map_off_arr *off_arr;
223
	/* The 3rd and 4th cacheline with misc members to avoid false sharing
224 225
	 * particularly with refcounting.
	 */
226 227
	atomic64_t refcnt ____cacheline_aligned;
	atomic64_t usercnt;
228
	struct work_struct work;
229
	struct mutex freeze_mutex;
230
	atomic64_t writecnt;
231 232 233 234 235 236 237 238 239 240 241
	/* 'Ownership' of program-containing map is claimed by the first program
	 * that is going to use this map or by the first program which FD is
	 * stored in the map to make sure that all callers and callees have the
	 * same prog type, JITed flag and xdp_has_frags flag.
	 */
	struct {
		spinlock_t lock;
		enum bpf_prog_type type;
		bool jited;
		bool xdp_has_frags;
	} owner;
242 243
	bool bypass_spec_v1;
	bool frozen; /* write-once; write-protected by freeze_mutex */
244 245
};

246 247 248 249 250
static inline bool map_value_has_spin_lock(const struct bpf_map *map)
{
	return map->spin_lock_off >= 0;
}

251
static inline bool map_value_has_timer(const struct bpf_map *map)
252
{
253
	return map->timer_off >= 0;
254 255
}

256 257 258 259 260
static inline bool map_value_has_kptrs(const struct bpf_map *map)
{
	return !IS_ERR_OR_NULL(map->kptr_off_tab);
}

261 262 263
static inline void check_and_init_map_value(struct bpf_map *map, void *dst)
{
	if (unlikely(map_value_has_spin_lock(map)))
264
		memset(dst + map->spin_lock_off, 0, sizeof(struct bpf_spin_lock));
265
	if (unlikely(map_value_has_timer(map)))
266
		memset(dst + map->timer_off, 0, sizeof(struct bpf_timer));
267 268 269 270 271 272 273
	if (unlikely(map_value_has_kptrs(map))) {
		struct bpf_map_value_off *tab = map->kptr_off_tab;
		int i;

		for (i = 0; i < tab->nr_off; i++)
			*(u64 *)(dst + tab->off[i].offset) = 0;
	}
274 275 276
}

/* copy everything but bpf_spin_lock and bpf_timer. There could be one of each. */
277 278
static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
{
279 280
	u32 curr_off = 0;
	int i;
281

282 283 284
	if (likely(!map->off_arr)) {
		memcpy(dst, src, map->value_size);
		return;
285
	}
286

287 288 289 290 291
	for (i = 0; i < map->off_arr->cnt; i++) {
		u32 next_off = map->off_arr->field_off[i];

		memcpy(dst + curr_off, src + curr_off, next_off - curr_off);
		curr_off += map->off_arr->field_sz[i];
292
	}
293
	memcpy(dst + curr_off, src + curr_off, map->value_size - curr_off);
294
}
295 296
void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
			   bool lock_src);
A
Alexei Starovoitov 已提交
297
void bpf_timer_cancel_and_free(void *timer);
298
int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size);
299

300
struct bpf_offload_dev;
301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325
struct bpf_offloaded_map;

struct bpf_map_dev_ops {
	int (*map_get_next_key)(struct bpf_offloaded_map *map,
				void *key, void *next_key);
	int (*map_lookup_elem)(struct bpf_offloaded_map *map,
			       void *key, void *value);
	int (*map_update_elem)(struct bpf_offloaded_map *map,
			       void *key, void *value, u64 flags);
	int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key);
};

struct bpf_offloaded_map {
	struct bpf_map map;
	struct net_device *netdev;
	const struct bpf_map_dev_ops *dev_ops;
	void *dev_priv;
	struct list_head offloads;
};

static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map)
{
	return container_of(map, struct bpf_offloaded_map, map);
}

326 327 328 329 330
static inline bool bpf_map_offload_neutral(const struct bpf_map *map)
{
	return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
}

331 332
static inline bool bpf_map_support_seq_show(const struct bpf_map *map)
{
333 334
	return (map->btf_value_type_id || map->btf_vmlinux_value_type_id) &&
		map->ops->map_seq_show_elem;
335 336
}

337
int map_check_no_btf(const struct bpf_map *map,
338
		     const struct btf *btf,
339 340 341
		     const struct btf_type *key_type,
		     const struct btf_type *value_type);

342 343 344
bool bpf_map_meta_equal(const struct bpf_map *meta0,
			const struct bpf_map *meta1);

345 346
extern const struct bpf_map_ops bpf_map_offload_ops;

347 348 349 350 351 352 353 354 355 356 357 358 359 360
/* bpf_type_flag contains a set of flags that are applicable to the values of
 * arg_type, ret_type and reg_type. For example, a pointer value may be null,
 * or a memory is read-only. We classify types into two categories: base types
 * and extended types. Extended types are base types combined with a type flag.
 *
 * Currently there are no more than 32 base types in arg_type, ret_type and
 * reg_types.
 */
#define BPF_BASE_TYPE_BITS	8

enum bpf_type_flag {
	/* PTR may be NULL. */
	PTR_MAYBE_NULL		= BIT(0 + BPF_BASE_TYPE_BITS),

361 362 363
	/* MEM is read-only. When applied on bpf_arg, it indicates the arg is
	 * compatible with both mutable and immutable memory.
	 */
H
Hao Luo 已提交
364 365
	MEM_RDONLY		= BIT(1 + BPF_BASE_TYPE_BITS),

366 367 368 369 370
	/* MEM was "allocated" from a different helper, and cannot be mixed
	 * with regular non-MEM_ALLOC'ed MEM types.
	 */
	MEM_ALLOC		= BIT(2 + BPF_BASE_TYPE_BITS),

371 372 373
	/* MEM is in user address space. */
	MEM_USER		= BIT(3 + BPF_BASE_TYPE_BITS),

374 375 376 377 378 379 380 381
	/* MEM is a percpu memory. MEM_PERCPU tags PTR_TO_BTF_ID. When tagged
	 * with MEM_PERCPU, PTR_TO_BTF_ID _cannot_ be directly accessed. In
	 * order to drop this tag, it must be passed into bpf_per_cpu_ptr()
	 * or bpf_this_cpu_ptr(), which will return the pointer corresponding
	 * to the specified cpu.
	 */
	MEM_PERCPU		= BIT(4 + BPF_BASE_TYPE_BITS),

382 383 384
	/* Indicates that the argument will be released. */
	OBJ_RELEASE		= BIT(5 + BPF_BASE_TYPE_BITS),

385 386 387 388 389 390 391 392
	/* PTR is not trusted. This is only used with PTR_TO_BTF_ID, to mark
	 * unreferenced and referenced kptr loaded from map value using a load
	 * instruction, so that they can only be dereferenced but not escape the
	 * BPF program into the kernel (i.e. cannot be passed as arguments to
	 * kfunc or bpf helpers).
	 */
	PTR_UNTRUSTED		= BIT(6 + BPF_BASE_TYPE_BITS),

393 394 395 396
	MEM_UNINIT		= BIT(7 + BPF_BASE_TYPE_BITS),

	__BPF_TYPE_FLAG_MAX,
	__BPF_TYPE_LAST_FLAG	= __BPF_TYPE_FLAG_MAX - 1,
397 398 399 400 401 402 403 404
};

/* Max number of base types. */
#define BPF_BASE_TYPE_LIMIT	(1UL << BPF_BASE_TYPE_BITS)

/* Max number of all types. */
#define BPF_TYPE_LIMIT		(__BPF_TYPE_LAST_FLAG | (__BPF_TYPE_LAST_FLAG - 1))

405 406
/* function argument constraints */
enum bpf_arg_type {
407
	ARG_DONTCARE = 0,	/* unused argument in helper function */
408 409 410 411 412 413 414 415

	/* the following constraints used to prototype
	 * bpf_map_lookup/update/delete_elem() functions
	 */
	ARG_CONST_MAP_PTR,	/* const argument used as pointer to bpf_map */
	ARG_PTR_TO_MAP_KEY,	/* pointer to stack used as map key */
	ARG_PTR_TO_MAP_VALUE,	/* pointer to stack used as map value */

416 417
	/* Used to prototype bpf_memcmp() and other functions that access data
	 * on eBPF program stack
418
	 */
419
	ARG_PTR_TO_MEM,		/* pointer to valid memory (stack, packet, map value) */
420

421 422
	ARG_CONST_SIZE,		/* number of bytes accessed from memory */
	ARG_CONST_SIZE_OR_ZERO,	/* number of bytes accessed from memory or 0 */
423

424
	ARG_PTR_TO_CTX,		/* pointer to context */
425
	ARG_ANYTHING,		/* any (initialized) argument is ok */
426
	ARG_PTR_TO_SPIN_LOCK,	/* pointer to bpf_spin_lock */
427
	ARG_PTR_TO_SOCK_COMMON,	/* pointer to sock_common */
428 429
	ARG_PTR_TO_INT,		/* pointer to int */
	ARG_PTR_TO_LONG,	/* pointer to long */
430
	ARG_PTR_TO_SOCKET,	/* pointer to bpf_sock (fullsock) */
431
	ARG_PTR_TO_BTF_ID,	/* pointer to in-kernel struct */
432 433
	ARG_PTR_TO_ALLOC_MEM,	/* pointer to dynamically allocated memory */
	ARG_CONST_ALLOC_SIZE_OR_ZERO,	/* number of allocated bytes requested */
434
	ARG_PTR_TO_BTF_ID_SOCK_COMMON,	/* pointer to in-kernel sock_common or bpf-mirrored bpf_sock */
H
Hao Luo 已提交
435
	ARG_PTR_TO_PERCPU_BTF_ID,	/* pointer to in-kernel percpu type */
436
	ARG_PTR_TO_FUNC,	/* pointer to a bpf program function */
437
	ARG_PTR_TO_STACK,	/* pointer to stack */
438
	ARG_PTR_TO_CONST_STR,	/* pointer to a null terminated read-only string */
A
Alexei Starovoitov 已提交
439
	ARG_PTR_TO_TIMER,	/* pointer to bpf_timer */
440
	ARG_PTR_TO_KPTR,	/* pointer to referenced kptr */
441
	__BPF_ARG_TYPE_MAX,
442

443 444 445 446 447 448 449
	/* Extended arg_types. */
	ARG_PTR_TO_MAP_VALUE_OR_NULL	= PTR_MAYBE_NULL | ARG_PTR_TO_MAP_VALUE,
	ARG_PTR_TO_MEM_OR_NULL		= PTR_MAYBE_NULL | ARG_PTR_TO_MEM,
	ARG_PTR_TO_CTX_OR_NULL		= PTR_MAYBE_NULL | ARG_PTR_TO_CTX,
	ARG_PTR_TO_SOCKET_OR_NULL	= PTR_MAYBE_NULL | ARG_PTR_TO_SOCKET,
	ARG_PTR_TO_ALLOC_MEM_OR_NULL	= PTR_MAYBE_NULL | ARG_PTR_TO_ALLOC_MEM,
	ARG_PTR_TO_STACK_OR_NULL	= PTR_MAYBE_NULL | ARG_PTR_TO_STACK,
450
	ARG_PTR_TO_BTF_ID_OR_NULL	= PTR_MAYBE_NULL | ARG_PTR_TO_BTF_ID,
451 452 453 454
	/* pointer to memory does not need to be initialized, helper function must fill
	 * all bytes or clear them in error case.
	 */
	ARG_PTR_TO_UNINIT_MEM		= MEM_UNINIT | ARG_PTR_TO_MEM,
455

456 457 458 459
	/* This must be the last entry. Its purpose is to ensure the enum is
	 * wide enough to hold the higher bits reserved for bpf_type_flag.
	 */
	__BPF_ARG_TYPE_LIMIT	= BPF_TYPE_LIMIT,
460
};
461
static_assert(__BPF_ARG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
462 463 464 465 466

/* type of values returned from helper functions */
enum bpf_return_type {
	RET_INTEGER,			/* function returns integer */
	RET_VOID,			/* function doesn't return anything */
467
	RET_PTR_TO_MAP_VALUE,		/* returns a pointer to map elem value */
468 469 470 471
	RET_PTR_TO_SOCKET,		/* returns a pointer to a socket */
	RET_PTR_TO_TCP_SOCK,		/* returns a pointer to a tcp_sock */
	RET_PTR_TO_SOCK_COMMON,		/* returns a pointer to a sock_common */
	RET_PTR_TO_ALLOC_MEM,		/* returns a pointer to dynamically allocated memory */
H
Hao Luo 已提交
472
	RET_PTR_TO_MEM_OR_BTF_ID,	/* returns a pointer to a valid memory or a btf_id */
473
	RET_PTR_TO_BTF_ID,		/* returns a pointer to a btf_id */
474 475
	__BPF_RET_TYPE_MAX,

476 477 478 479 480
	/* Extended ret_types. */
	RET_PTR_TO_MAP_VALUE_OR_NULL	= PTR_MAYBE_NULL | RET_PTR_TO_MAP_VALUE,
	RET_PTR_TO_SOCKET_OR_NULL	= PTR_MAYBE_NULL | RET_PTR_TO_SOCKET,
	RET_PTR_TO_TCP_SOCK_OR_NULL	= PTR_MAYBE_NULL | RET_PTR_TO_TCP_SOCK,
	RET_PTR_TO_SOCK_COMMON_OR_NULL	= PTR_MAYBE_NULL | RET_PTR_TO_SOCK_COMMON,
481
	RET_PTR_TO_ALLOC_MEM_OR_NULL	= PTR_MAYBE_NULL | MEM_ALLOC | RET_PTR_TO_ALLOC_MEM,
482 483
	RET_PTR_TO_BTF_ID_OR_NULL	= PTR_MAYBE_NULL | RET_PTR_TO_BTF_ID,

484 485 486 487
	/* This must be the last entry. Its purpose is to ensure the enum is
	 * wide enough to hold the higher bits reserved for bpf_type_flag.
	 */
	__BPF_RET_TYPE_LIMIT	= BPF_TYPE_LIMIT,
488
};
489
static_assert(__BPF_RET_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
490

491 492 493 494 495 496 497
/* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
 * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL
 * instructions after verifying
 */
struct bpf_func_proto {
	u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
	bool gpl_only;
498
	bool pkt_access;
499
	enum bpf_return_type ret_type;
500 501 502 503 504 505 506 507 508 509
	union {
		struct {
			enum bpf_arg_type arg1_type;
			enum bpf_arg_type arg2_type;
			enum bpf_arg_type arg3_type;
			enum bpf_arg_type arg4_type;
			enum bpf_arg_type arg5_type;
		};
		enum bpf_arg_type arg_type[5];
	};
510 511 512 513 514 515 516 517 518 519
	union {
		struct {
			u32 *arg1_btf_id;
			u32 *arg2_btf_id;
			u32 *arg3_btf_id;
			u32 *arg4_btf_id;
			u32 *arg5_btf_id;
		};
		u32 *arg_btf_id[5];
	};
520
	int *ret_btf_id; /* return value btf_id */
J
Jiri Olsa 已提交
521
	bool (*allowed)(const struct bpf_prog *prog);
522 523 524 525 526 527 528 529 530 531 532
};

/* bpf_context is intentionally undefined structure. Pointer to bpf_context is
 * the first argument to eBPF programs.
 * For socket filters: 'struct bpf_context *' == 'struct sk_buff *'
 */
struct bpf_context;

enum bpf_access_type {
	BPF_READ = 1,
	BPF_WRITE = 2
533 534
};

535
/* types of values stored in eBPF registers */
536 537 538 539 540 541 542 543 544
/* Pointer types represent:
 * pointer
 * pointer + imm
 * pointer + (u16) var
 * pointer + (u16) var + imm
 * if (range > 0) then [ptr, ptr + range - off) is safe to access
 * if (id > 0) means that some 'var' was added
 * if (off > 0) means that 'imm' was added
 */
545 546
enum bpf_reg_type {
	NOT_INIT = 0,		 /* nothing was written into register */
547
	SCALAR_VALUE,		 /* reg doesn't contain a valid pointer */
548 549 550
	PTR_TO_CTX,		 /* reg points to bpf_context */
	CONST_PTR_TO_MAP,	 /* reg points to struct bpf_map */
	PTR_TO_MAP_VALUE,	 /* reg points to map element value */
551
	PTR_TO_MAP_KEY,		 /* reg points to a map element key */
552
	PTR_TO_STACK,		 /* reg == frame_pointer + offset */
553
	PTR_TO_PACKET_META,	 /* skb->data - meta_len */
554
	PTR_TO_PACKET,		 /* reg points to skb->data */
555
	PTR_TO_PACKET_END,	 /* skb->data + headlen */
556
	PTR_TO_FLOW_KEYS,	 /* reg points to bpf_flow_keys */
557
	PTR_TO_SOCKET,		 /* reg points to struct bpf_sock */
558
	PTR_TO_SOCK_COMMON,	 /* reg points to sock_common */
559
	PTR_TO_TCP_SOCK,	 /* reg points to struct tcp_sock */
560
	PTR_TO_TP_BUFFER,	 /* reg points to a writable raw tp's buffer */
561
	PTR_TO_XDP_SOCK,	 /* reg points to struct xdp_sock */
562 563 564 565 566 567 568 569 570 571 572 573 574 575 576
	/* PTR_TO_BTF_ID points to a kernel struct that does not need
	 * to be null checked by the BPF program. This does not imply the
	 * pointer is _not_ null and in practice this can easily be a null
	 * pointer when reading pointer chains. The assumption is program
	 * context will handle null pointer dereference typically via fault
	 * handling. The verifier must keep this in mind and can make no
	 * assumptions about null or non-null when doing branch analysis.
	 * Further, when passed into helpers the helpers can not, without
	 * additional context, assume the value is non-null.
	 */
	PTR_TO_BTF_ID,
	/* PTR_TO_BTF_ID_OR_NULL points to a kernel struct that has not
	 * been checked for null. Used primarily to inform the verifier
	 * an explicit null check is required for this struct.
	 */
577
	PTR_TO_MEM,		 /* reg points to valid memory region */
H
Hao Luo 已提交
578
	PTR_TO_BUF,		 /* reg points to a read/write buffer */
579
	PTR_TO_FUNC,		 /* reg points to a bpf program function */
580
	__BPF_REG_TYPE_MAX,
581

582 583 584 585 586 587 588
	/* Extended reg_types. */
	PTR_TO_MAP_VALUE_OR_NULL	= PTR_MAYBE_NULL | PTR_TO_MAP_VALUE,
	PTR_TO_SOCKET_OR_NULL		= PTR_MAYBE_NULL | PTR_TO_SOCKET,
	PTR_TO_SOCK_COMMON_OR_NULL	= PTR_MAYBE_NULL | PTR_TO_SOCK_COMMON,
	PTR_TO_TCP_SOCK_OR_NULL		= PTR_MAYBE_NULL | PTR_TO_TCP_SOCK,
	PTR_TO_BTF_ID_OR_NULL		= PTR_MAYBE_NULL | PTR_TO_BTF_ID,

589 590 591 592
	/* This must be the last entry. Its purpose is to ensure the enum is
	 * wide enough to hold the higher bits reserved for bpf_type_flag.
	 */
	__BPF_REG_TYPE_LIMIT	= BPF_TYPE_LIMIT,
593
};
594
static_assert(__BPF_REG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
595

596 597 598 599 600
/* The information passed from prog-specific *_is_valid_access
 * back to the verifier.
 */
struct bpf_insn_access_aux {
	enum bpf_reg_type reg_type;
601 602
	union {
		int ctx_field_size;
603 604 605 606
		struct {
			struct btf *btf;
			u32 btf_id;
		};
607 608
	};
	struct bpf_verifier_log *log; /* for verbose logs */
609 610
};

611 612 613 614 615 616
static inline void
bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size)
{
	aux->ctx_field_size = size;
}

617 618 619 620 621 622
static inline bool bpf_pseudo_func(const struct bpf_insn *insn)
{
	return insn->code == (BPF_LD | BPF_IMM | BPF_DW) &&
	       insn->src_reg == BPF_PSEUDO_FUNC;
}

623 624 625 626 627
struct bpf_prog_ops {
	int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr,
			union bpf_attr __user *uattr);
};

628 629
struct bpf_verifier_ops {
	/* return eBPF function prototype for verification */
630 631 632
	const struct bpf_func_proto *
	(*get_func_proto)(enum bpf_func_id func_id,
			  const struct bpf_prog *prog);
633 634 635 636

	/* return true if 'size' wide access at offset 'off' within bpf_context
	 * with 'type' (read or write) is allowed
	 */
637
	bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
638
				const struct bpf_prog *prog,
639
				struct bpf_insn_access_aux *info);
640 641
	int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
			    const struct bpf_prog *prog);
642 643
	int (*gen_ld_abs)(const struct bpf_insn *orig,
			  struct bpf_insn *insn_buf);
644 645 646
	u32 (*convert_ctx_access)(enum bpf_access_type type,
				  const struct bpf_insn *src,
				  struct bpf_insn *dst,
647
				  struct bpf_prog *prog, u32 *target_size);
648
	int (*btf_struct_access)(struct bpf_verifier_log *log,
649
				 const struct btf *btf,
650 651
				 const struct btf_type *t, int off, int size,
				 enum bpf_access_type atype,
652
				 u32 *next_btf_id, enum bpf_type_flag *flag);
653 654
};

655
struct bpf_prog_offload_ops {
656
	/* verifier basic callbacks */
657 658
	int (*insn_hook)(struct bpf_verifier_env *env,
			 int insn_idx, int prev_insn_idx);
659
	int (*finalize)(struct bpf_verifier_env *env);
660 661 662 663 664
	/* verifier optimization callbacks (called after .finalize) */
	int (*replace_insn)(struct bpf_verifier_env *env, u32 off,
			    struct bpf_insn *insn);
	int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt);
	/* program management callbacks */
665 666
	int (*prepare)(struct bpf_prog *prog);
	int (*translate)(struct bpf_prog *prog);
667
	void (*destroy)(struct bpf_prog *prog);
668 669
};

670
struct bpf_prog_offload {
671 672
	struct bpf_prog		*prog;
	struct net_device	*netdev;
673
	struct bpf_offload_dev	*offdev;
674 675 676
	void			*dev_priv;
	struct list_head	offloads;
	bool			dev_state;
677
	bool			opt_failed;
678 679
	void			*jited_image;
	u32			jited_len;
680 681
};

682 683
enum bpf_cgroup_storage_type {
	BPF_CGROUP_STORAGE_SHARED,
684
	BPF_CGROUP_STORAGE_PERCPU,
685 686 687 688 689
	__BPF_CGROUP_STORAGE_MAX
};

#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX

690 691 692 693 694
/* The longest tracepoint has 12 args.
 * See include/trace/bpf_probe.h
 */
#define MAX_BPF_FUNC_ARGS 12

695 696 697 698 699
/* The maximum number of arguments passed through registers
 * a single function may have.
 */
#define MAX_BPF_FUNC_REG_ARGS 5

A
Alexei Starovoitov 已提交
700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718
struct btf_func_model {
	u8 ret_size;
	u8 nr_args;
	u8 arg_size[MAX_BPF_FUNC_ARGS];
};

/* Restore arguments before returning from trampoline to let original function
 * continue executing. This flag is used for fentry progs when there are no
 * fexit progs.
 */
#define BPF_TRAMP_F_RESTORE_REGS	BIT(0)
/* Call original function after fentry progs, but before fexit progs.
 * Makes sense for fentry/fexit, normal calls and indirect calls.
 */
#define BPF_TRAMP_F_CALL_ORIG		BIT(1)
/* Skip current frame and return to parent.  Makes sense for fentry/fexit
 * programs only. Should not be used with normal calls and indirect calls.
 */
#define BPF_TRAMP_F_SKIP_FRAME		BIT(2)
719 720 721 722
/* Store IP address of the caller on the trampoline stack,
 * so it's available for trampoline's programs.
 */
#define BPF_TRAMP_F_IP_ARG		BIT(3)
723 724
/* Return the return value of fentry prog. Only used by bpf_struct_ops. */
#define BPF_TRAMP_F_RET_FENTRY_RET	BIT(4)
725

K
KP Singh 已提交
726 727 728
/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
 * bytes on x86.  Pick a number to fit into BPF_IMAGE_SIZE / 2
 */
729
#define BPF_MAX_TRAMP_LINKS 38
K
KP Singh 已提交
730

731 732 733
struct bpf_tramp_links {
	struct bpf_tramp_link *links[BPF_MAX_TRAMP_LINKS];
	int nr_links;
K
KP Singh 已提交
734 735
};

736 737
struct bpf_tramp_run_ctx;

A
Alexei Starovoitov 已提交
738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757
/* Different use cases for BPF trampoline:
 * 1. replace nop at the function entry (kprobe equivalent)
 *    flags = BPF_TRAMP_F_RESTORE_REGS
 *    fentry = a set of programs to run before returning from trampoline
 *
 * 2. replace nop at the function entry (kprobe + kretprobe equivalent)
 *    flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME
 *    orig_call = fentry_ip + MCOUNT_INSN_SIZE
 *    fentry = a set of program to run before calling original function
 *    fexit = a set of program to run after original function
 *
 * 3. replace direct call instruction anywhere in the function body
 *    or assign a function pointer for indirect call (like tcp_congestion_ops->cong_avoid)
 *    With flags = 0
 *      fentry = a set of programs to run before returning from trampoline
 *    With flags = BPF_TRAMP_F_CALL_ORIG
 *      orig_call = original callback addr or direct function addr
 *      fentry = a set of program to run before calling original function
 *      fexit = a set of program to run after original function
 */
A
Alexei Starovoitov 已提交
758 759
struct bpf_tramp_image;
int arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end,
760
				const struct btf_func_model *m, u32 flags,
761
				struct bpf_tramp_links *tlinks,
A
Alexei Starovoitov 已提交
762 763
				void *orig_call);
/* these two functions are called from generated trampoline */
764 765 766 767 768
u64 notrace __bpf_prog_enter(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx);
void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start, struct bpf_tramp_run_ctx *run_ctx);
u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx);
void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start,
				       struct bpf_tramp_run_ctx *run_ctx);
A
Alexei Starovoitov 已提交
769 770
void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr);
void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr);
A
Alexei Starovoitov 已提交
771

J
Jiri Olsa 已提交
772 773 774
struct bpf_ksym {
	unsigned long		 start;
	unsigned long		 end;
J
Jiri Olsa 已提交
775
	char			 name[KSYM_NAME_LEN];
776
	struct list_head	 lnode;
J
Jiri Olsa 已提交
777
	struct latch_tree_node	 tnode;
778
	bool			 prog;
J
Jiri Olsa 已提交
779 780
};

A
Alexei Starovoitov 已提交
781 782 783
enum bpf_tramp_prog_type {
	BPF_TRAMP_FENTRY,
	BPF_TRAMP_FEXIT,
K
KP Singh 已提交
784
	BPF_TRAMP_MODIFY_RETURN,
785 786
	BPF_TRAMP_MAX,
	BPF_TRAMP_REPLACE, /* more than MAX */
A
Alexei Starovoitov 已提交
787 788
};

A
Alexei Starovoitov 已提交
789 790 791 792 793 794 795 796 797 798 799 800
struct bpf_tramp_image {
	void *image;
	struct bpf_ksym ksym;
	struct percpu_ref pcref;
	void *ip_after_call;
	void *ip_epilogue;
	union {
		struct rcu_head rcu;
		struct work_struct work;
	};
};

A
Alexei Starovoitov 已提交
801 802 803 804 805 806 807 808 809 810
struct bpf_trampoline {
	/* hlist for trampoline_table */
	struct hlist_node hlist;
	/* serializes access to fields of this trampoline */
	struct mutex mutex;
	refcount_t refcnt;
	u64 key;
	struct {
		struct btf_func_model model;
		void *addr;
811
		bool ftrace_managed;
A
Alexei Starovoitov 已提交
812
	} func;
813 814 815 816 817
	/* if !NULL this is BPF_PROG_TYPE_EXT program that extends another BPF
	 * program by replacing one of its functions. func.addr is the address
	 * of the function it replaced.
	 */
	struct bpf_prog *extension_prog;
A
Alexei Starovoitov 已提交
818 819 820 821 822
	/* list of BPF programs using this trampoline */
	struct hlist_head progs_hlist[BPF_TRAMP_MAX];
	/* Number of attached programs. A counter per kind. */
	int progs_cnt[BPF_TRAMP_MAX];
	/* Executable image of trampoline */
A
Alexei Starovoitov 已提交
823
	struct bpf_tramp_image *cur_image;
A
Alexei Starovoitov 已提交
824
	u64 selector;
825
	struct module *mod;
A
Alexei Starovoitov 已提交
826
};
B
Björn Töpel 已提交
827

828 829 830 831 832 833 834
struct bpf_attach_target_info {
	struct btf_func_model fmodel;
	long tgt_addr;
	const char *tgt_name;
	const struct btf_type *tgt_type;
};

835
#define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */
B
Björn Töpel 已提交
836 837 838 839 840 841 842 843 844 845 846 847 848 849

struct bpf_dispatcher_prog {
	struct bpf_prog *prog;
	refcount_t users;
};

struct bpf_dispatcher {
	/* dispatcher mutex */
	struct mutex mutex;
	void *func;
	struct bpf_dispatcher_prog progs[BPF_DISPATCHER_MAX];
	int num_progs;
	void *image;
	u32 image_off;
J
Jiri Olsa 已提交
850
	struct bpf_ksym ksym;
B
Björn Töpel 已提交
851 852
};

853
static __always_inline __nocfi unsigned int bpf_dispatcher_nop_func(
854 855 856 857 858 859 860
	const void *ctx,
	const struct bpf_insn *insnsi,
	unsigned int (*bpf_func)(const void *,
				 const struct bpf_insn *))
{
	return bpf_func(ctx, insnsi);
}
861

A
Alexei Starovoitov 已提交
862
#ifdef CONFIG_BPF_JIT
863 864
int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr);
int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr);
865 866
struct bpf_trampoline *bpf_trampoline_get(u64 key,
					  struct bpf_attach_target_info *tgt_info);
A
Alexei Starovoitov 已提交
867
void bpf_trampoline_put(struct bpf_trampoline *tr);
868
int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs);
J
Jiri Olsa 已提交
869 870 871 872 873 874 875 876 877 878 879
#define BPF_DISPATCHER_INIT(_name) {				\
	.mutex = __MUTEX_INITIALIZER(_name.mutex),		\
	.func = &_name##_func,					\
	.progs = {},						\
	.num_progs = 0,						\
	.image = NULL,						\
	.image_off = 0,						\
	.ksym = {						\
		.name  = #_name,				\
		.lnode = LIST_HEAD_INIT(_name.ksym.lnode),	\
	},							\
B
Björn Töpel 已提交
880 881 882
}

#define DEFINE_BPF_DISPATCHER(name)					\
883
	noinline __nocfi unsigned int bpf_dispatcher_##name##_func(	\
B
Björn Töpel 已提交
884 885 886 887 888 889 890
		const void *ctx,					\
		const struct bpf_insn *insnsi,				\
		unsigned int (*bpf_func)(const void *,			\
					 const struct bpf_insn *))	\
	{								\
		return bpf_func(ctx, insnsi);				\
	}								\
891 892 893
	EXPORT_SYMBOL(bpf_dispatcher_##name##_func);			\
	struct bpf_dispatcher bpf_dispatcher_##name =			\
		BPF_DISPATCHER_INIT(bpf_dispatcher_##name);
B
Björn Töpel 已提交
894
#define DECLARE_BPF_DISPATCHER(name)					\
895
	unsigned int bpf_dispatcher_##name##_func(			\
B
Björn Töpel 已提交
896 897 898 899
		const void *ctx,					\
		const struct bpf_insn *insnsi,				\
		unsigned int (*bpf_func)(const void *,			\
					 const struct bpf_insn *));	\
900 901 902
	extern struct bpf_dispatcher bpf_dispatcher_##name;
#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func
#define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name)
B
Björn Töpel 已提交
903 904
void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
				struct bpf_prog *to);
J
Jiri Olsa 已提交
905
/* Called only from JIT-enabled code, so there's no need for stubs. */
J
Jiri Olsa 已提交
906
void *bpf_jit_alloc_exec_page(void);
J
Jiri Olsa 已提交
907 908
void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym);
void bpf_image_ksym_del(struct bpf_ksym *ksym);
J
Jiri Olsa 已提交
909 910
void bpf_ksym_add(struct bpf_ksym *ksym);
void bpf_ksym_del(struct bpf_ksym *ksym);
911 912
int bpf_jit_charge_modmem(u32 size);
void bpf_jit_uncharge_modmem(u32 size);
913
bool bpf_prog_has_trampoline(const struct bpf_prog *prog);
A
Alexei Starovoitov 已提交
914
#else
915
static inline int bpf_trampoline_link_prog(struct bpf_tramp_link *link,
916
					   struct bpf_trampoline *tr)
A
Alexei Starovoitov 已提交
917 918 919
{
	return -ENOTSUPP;
}
920
static inline int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link,
921
					     struct bpf_trampoline *tr)
A
Alexei Starovoitov 已提交
922 923 924
{
	return -ENOTSUPP;
}
925 926 927 928 929
static inline struct bpf_trampoline *bpf_trampoline_get(u64 key,
							struct bpf_attach_target_info *tgt_info)
{
	return ERR_PTR(-EOPNOTSUPP);
}
A
Alexei Starovoitov 已提交
930
static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {}
B
Björn Töpel 已提交
931 932
#define DEFINE_BPF_DISPATCHER(name)
#define DECLARE_BPF_DISPATCHER(name)
933
#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nop_func
B
Björn Töpel 已提交
934 935 936 937
#define BPF_DISPATCHER_PTR(name) NULL
static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d,
					      struct bpf_prog *from,
					      struct bpf_prog *to) {}
938 939 940 941
static inline bool is_bpf_image_address(unsigned long address)
{
	return false;
}
942 943 944 945
static inline bool bpf_prog_has_trampoline(const struct bpf_prog *prog)
{
	return false;
}
A
Alexei Starovoitov 已提交
946 947
#endif

948
struct bpf_func_info_aux {
949
	u16 linkage;
950 951 952
	bool unreliable;
};

953 954 955 956 957 958
enum bpf_jit_poke_reason {
	BPF_POKE_REASON_TAIL_CALL,
};

/* Descriptor of pokes pointing /into/ the JITed image. */
struct bpf_jit_poke_descriptor {
959
	void *tailcall_target;
960 961
	void *tailcall_bypass;
	void *bypass_addr;
962
	void *aux;
963 964 965 966 967 968
	union {
		struct {
			struct bpf_map *map;
			u32 key;
		} tail_call;
	};
969
	bool tailcall_target_stable;
970 971
	u8 adj_off;
	u16 reason;
972
	u32 insn_idx;
973 974
};

975 976 977 978
/* reg_type info for ctx arguments */
struct bpf_ctx_arg_aux {
	u32 offset;
	enum bpf_reg_type reg_type;
979
	u32 btf_id;
980 981
};

982 983 984 985 986
struct btf_mod_pair {
	struct btf *btf;
	struct module *module;
};

987 988
struct bpf_kfunc_desc_tab;

989
struct bpf_prog_aux {
990
	atomic64_t refcnt;
991
	u32 used_map_cnt;
992
	u32 used_btf_cnt;
993
	u32 max_ctx_offset;
994
	u32 max_pkt_offset;
995
	u32 max_tp_access;
996
	u32 stack_depth;
M
Martin KaFai Lau 已提交
997
	u32 id;
998 999
	u32 func_cnt; /* used by non-func prog as the number of func progs */
	u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */
1000
	u32 attach_btf_id; /* in-kernel BTF type id to attach to */
1001
	u32 ctx_arg_info_size;
1002 1003
	u32 max_rdonly_access;
	u32 max_rdwr_access;
1004
	struct btf *attach_btf;
1005
	const struct bpf_ctx_arg_aux *ctx_arg_info;
1006 1007 1008
	struct mutex dst_mutex; /* protects dst_* pointers below, *after* prog becomes visible */
	struct bpf_prog *dst_prog;
	struct bpf_trampoline *dst_trampoline;
1009 1010
	enum bpf_prog_type saved_dst_prog_type;
	enum bpf_attach_type saved_dst_attach_type;
1011
	bool verifier_zext; /* Zero extensions has been inserted by verifier. */
1012
	bool offload_requested;
1013
	bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */
1014
	bool func_proto_unreliable;
1015
	bool sleepable;
1016
	bool tail_call_reachable;
1017
	bool xdp_has_frags;
1018
	bool use_bpf_prog_pack;
1019 1020 1021 1022
	/* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
	const struct btf_type *attach_func_proto;
	/* function name for valid attach_btf_id */
	const char *attach_func_name;
1023 1024
	struct bpf_prog **func;
	void *jit_data; /* JIT specific data. arch dependent */
1025
	struct bpf_jit_poke_descriptor *poke_tab;
1026
	struct bpf_kfunc_desc_tab *kfunc_tab;
1027
	struct bpf_kfunc_btf_tab *kfunc_btf_tab;
1028
	u32 size_poke_tab;
J
Jiri Olsa 已提交
1029
	struct bpf_ksym ksym;
1030
	const struct bpf_prog_ops *ops;
1031
	struct bpf_map **used_maps;
1032
	struct mutex used_maps_mutex; /* mutex for used_maps and used_map_cnt */
1033
	struct btf_mod_pair *used_btfs;
1034
	struct bpf_prog *prog;
1035
	struct user_struct *user;
1036
	u64 load_time; /* ns since boottime */
1037
	u32 verified_insns;
1038
	struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
1039
	char name[BPF_OBJ_NAME_LEN];
1040 1041 1042
#ifdef CONFIG_SECURITY
	void *security;
#endif
1043
	struct bpf_prog_offload *offload;
Y
Yonghong Song 已提交
1044
	struct btf *btf;
1045
	struct bpf_func_info *func_info;
1046
	struct bpf_func_info_aux *func_info_aux;
M
Martin KaFai Lau 已提交
1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061
	/* bpf_line_info loaded from userspace.  linfo->insn_off
	 * has the xlated insn offset.
	 * Both the main and sub prog share the same linfo.
	 * The subprog can access its first linfo by
	 * using the linfo_idx.
	 */
	struct bpf_line_info *linfo;
	/* jited_linfo is the jited addr of the linfo.  It has a
	 * one to one mapping to linfo:
	 * jited_linfo[i] is the jited addr for the linfo[i]->insn_off.
	 * Both the main and sub prog share the same jited_linfo.
	 * The subprog can access its first jited_linfo by
	 * using the linfo_idx.
	 */
	void **jited_linfo;
1062
	u32 func_info_cnt;
M
Martin KaFai Lau 已提交
1063 1064 1065 1066 1067 1068
	u32 nr_linfo;
	/* subprog can use linfo_idx to access its first linfo and
	 * jited_linfo.
	 * main prog always has linfo_idx == 0
	 */
	u32 linfo_idx;
1069 1070
	u32 num_exentries;
	struct exception_table_entry *extable;
1071 1072 1073 1074
	union {
		struct work_struct work;
		struct rcu_head	rcu;
	};
1075 1076
};

1077
struct bpf_array_aux {
1078 1079 1080 1081 1082
	/* Programs with direct jumps into programs part of this array. */
	struct list_head poke_progs;
	struct bpf_map *map;
	struct mutex poke_mutex;
	struct work_struct work;
1083 1084
};

1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096
struct bpf_link {
	atomic64_t refcnt;
	u32 id;
	enum bpf_link_type type;
	const struct bpf_link_ops *ops;
	struct bpf_prog *prog;
	struct work_struct work;
};

struct bpf_link_ops {
	void (*release)(struct bpf_link *link);
	void (*dealloc)(struct bpf_link *link);
1097
	int (*detach)(struct bpf_link *link);
1098 1099 1100 1101 1102 1103 1104
	int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog,
			   struct bpf_prog *old_prog);
	void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq);
	int (*fill_link_info)(const struct bpf_link *link,
			      struct bpf_link_info *info);
};

1105 1106 1107
struct bpf_tramp_link {
	struct bpf_link link;
	struct hlist_node tramp_hlist;
1108
	u64 cookie;
1109 1110 1111 1112 1113 1114 1115 1116 1117
};

struct bpf_tracing_link {
	struct bpf_tramp_link link;
	enum bpf_attach_type attach_type;
	struct bpf_trampoline *trampoline;
	struct bpf_prog *tgt_prog;
};

1118 1119 1120 1121 1122 1123 1124
struct bpf_link_primer {
	struct bpf_link *link;
	struct file *file;
	int fd;
	u32 id;
};

1125
struct bpf_struct_ops_value;
1126 1127 1128 1129 1130 1131 1132 1133
struct btf_member;

#define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64
struct bpf_struct_ops {
	const struct bpf_verifier_ops *verifier_ops;
	int (*init)(struct btf *btf);
	int (*check_member)(const struct btf_type *t,
			    const struct btf_member *member);
1134 1135 1136 1137 1138
	int (*init_member)(const struct btf_type *t,
			   const struct btf_member *member,
			   void *kdata, const void *udata);
	int (*reg)(void *kdata);
	void (*unreg)(void *kdata);
1139
	const struct btf_type *type;
1140
	const struct btf_type *value_type;
1141 1142 1143
	const char *name;
	struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS];
	u32 type_id;
1144
	u32 value_id;
1145 1146 1147
};

#if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)
1148
#define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA))
1149
const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id);
1150
void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log);
1151 1152 1153 1154
bool bpf_struct_ops_get(const void *kdata);
void bpf_struct_ops_put(const void *kdata);
int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
				       void *value);
1155 1156
int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks,
				      struct bpf_tramp_link *link,
1157 1158
				      const struct btf_func_model *model,
				      void *image, void *image_end);
1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172
static inline bool bpf_try_module_get(const void *data, struct module *owner)
{
	if (owner == BPF_MODULE_OWNER)
		return bpf_struct_ops_get(data);
	else
		return try_module_get(owner);
}
static inline void bpf_module_put(const void *data, struct module *owner)
{
	if (owner == BPF_MODULE_OWNER)
		bpf_struct_ops_put(data);
	else
		module_put(owner);
}
1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188

#ifdef CONFIG_NET
/* Define it here to avoid the use of forward declaration */
struct bpf_dummy_ops_state {
	int val;
};

struct bpf_dummy_ops {
	int (*test_1)(struct bpf_dummy_ops_state *cb);
	int (*test_2)(struct bpf_dummy_ops_state *cb, int a1, unsigned short a2,
		      char a3, unsigned long a4);
};

int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
			    union bpf_attr __user *uattr);
#endif
1189 1190 1191 1192 1193
#else
static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
{
	return NULL;
}
1194 1195 1196 1197
static inline void bpf_struct_ops_init(struct btf *btf,
				       struct bpf_verifier_log *log)
{
}
1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211
static inline bool bpf_try_module_get(const void *data, struct module *owner)
{
	return try_module_get(owner);
}
static inline void bpf_module_put(const void *data, struct module *owner)
{
	module_put(owner);
}
static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map,
						     void *key,
						     void *value)
{
	return -EINVAL;
}
1212 1213
#endif

1214 1215 1216
struct bpf_array {
	struct bpf_map map;
	u32 elem_size;
1217
	u32 index_mask;
1218
	struct bpf_array_aux *aux;
1219 1220
	union {
		char value[0] __aligned(8);
1221
		void *ptrs[0] __aligned(8);
1222
		void __percpu *pptrs[0] __aligned(8);
1223 1224
	};
};
1225

1226
#define BPF_COMPLEXITY_LIMIT_INSNS      1000000 /* yes. 1M insns */
1227
#define MAX_TAIL_CALL_CNT 33
1228

1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257
#define BPF_F_ACCESS_MASK	(BPF_F_RDONLY |		\
				 BPF_F_RDONLY_PROG |	\
				 BPF_F_WRONLY |		\
				 BPF_F_WRONLY_PROG)

#define BPF_MAP_CAN_READ	BIT(0)
#define BPF_MAP_CAN_WRITE	BIT(1)

static inline u32 bpf_map_flags_to_cap(struct bpf_map *map)
{
	u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);

	/* Combination of BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG is
	 * not possible.
	 */
	if (access_flags & BPF_F_RDONLY_PROG)
		return BPF_MAP_CAN_READ;
	else if (access_flags & BPF_F_WRONLY_PROG)
		return BPF_MAP_CAN_WRITE;
	else
		return BPF_MAP_CAN_READ | BPF_MAP_CAN_WRITE;
}

static inline bool bpf_map_flags_access_ok(u32 access_flags)
{
	return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) !=
	       (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
}

1258 1259 1260 1261 1262 1263 1264
struct bpf_event_entry {
	struct perf_event *event;
	struct file *perf_file;
	struct file *map_file;
	struct rcu_head rcu;
};

1265 1266 1267 1268 1269 1270 1271 1272
static inline bool map_type_contains_progs(struct bpf_map *map)
{
	return map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
	       map->map_type == BPF_MAP_TYPE_DEVMAP ||
	       map->map_type == BPF_MAP_TYPE_CPUMAP;
}

bool bpf_prog_map_compatible(struct bpf_map *map, const struct bpf_prog *fp);
1273
int bpf_prog_calc_tag(struct bpf_prog *fp);
1274

1275
const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
1276
const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void);
1277 1278

typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src,
1279
					unsigned long off, unsigned long len);
1280 1281 1282 1283 1284
typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type,
					const struct bpf_insn *src,
					struct bpf_insn *dst,
					struct bpf_prog *prog,
					u32 *target_size);
1285 1286 1287

u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
		     void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy);
1288

1289 1290 1291
/* an array of programs to be executed under rcu_lock.
 *
 * Typical usage:
1292
 * ret = bpf_prog_run_array(rcu_dereference(&bpf_prog_array), ctx, bpf_prog_run);
1293 1294 1295 1296 1297 1298 1299 1300
 *
 * the structure returned by bpf_prog_array_alloc() should be populated
 * with program pointers and the last pointer must be NULL.
 * The user has to keep refcnt on the program and make sure the program
 * is removed from the array before bpf_prog_put().
 * The 'struct bpf_prog_array *' should only be replaced with xchg()
 * since other cpus are walking the array of pointers in parallel.
 */
1301 1302
struct bpf_prog_array_item {
	struct bpf_prog *prog;
1303 1304 1305 1306
	union {
		struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
		u64 bpf_cookie;
	};
1307 1308
};

1309 1310
struct bpf_prog_array {
	struct rcu_head rcu;
1311
	struct bpf_prog_array_item items[];
1312 1313
};

1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326
struct bpf_empty_prog_array {
	struct bpf_prog_array hdr;
	struct bpf_prog *null_prog;
};

/* to avoid allocating empty bpf_prog_array for cgroups that
 * don't have bpf program attached use one global 'bpf_empty_prog_array'
 * It will not be modified the caller of bpf_prog_array_alloc()
 * (since caller requested prog_cnt == 0)
 * that pointer should be 'freed' by bpf_prog_array_free()
 */
extern struct bpf_empty_prog_array bpf_empty_prog_array;

1327
struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
1328 1329
void bpf_prog_array_free(struct bpf_prog_array *progs);
int bpf_prog_array_length(struct bpf_prog_array *progs);
1330
bool bpf_prog_array_is_empty(struct bpf_prog_array *array);
1331
int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs,
1332
				__u32 __user *prog_ids, u32 cnt);
1333

1334
void bpf_prog_array_delete_safe(struct bpf_prog_array *progs,
1335
				struct bpf_prog *old_prog);
1336 1337 1338
int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index);
int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
			     struct bpf_prog *prog);
1339
int bpf_prog_array_copy_info(struct bpf_prog_array *array,
1340 1341
			     u32 *prog_ids, u32 request_cnt,
			     u32 *prog_cnt);
1342
int bpf_prog_array_copy(struct bpf_prog_array *old_array,
1343 1344
			struct bpf_prog *exclude_prog,
			struct bpf_prog *include_prog,
1345
			u64 bpf_cookie,
1346 1347
			struct bpf_prog_array **new_array);

1348 1349 1350 1351
struct bpf_run_ctx {};

struct bpf_cg_run_ctx {
	struct bpf_run_ctx run_ctx;
1352
	const struct bpf_prog_array_item *prog_item;
1353
	int retval;
1354 1355
};

1356 1357 1358 1359 1360
struct bpf_trace_run_ctx {
	struct bpf_run_ctx run_ctx;
	u64 bpf_cookie;
};

1361 1362 1363 1364 1365 1366
struct bpf_tramp_run_ctx {
	struct bpf_run_ctx run_ctx;
	u64 bpf_cookie;
	struct bpf_run_ctx *saved_run_ctx;
};

1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384
static inline struct bpf_run_ctx *bpf_set_run_ctx(struct bpf_run_ctx *new_ctx)
{
	struct bpf_run_ctx *old_ctx = NULL;

#ifdef CONFIG_BPF_SYSCALL
	old_ctx = current->bpf_ctx;
	current->bpf_ctx = new_ctx;
#endif
	return old_ctx;
}

static inline void bpf_reset_run_ctx(struct bpf_run_ctx *old_ctx)
{
#ifdef CONFIG_BPF_SYSCALL
	current->bpf_ctx = old_ctx;
#endif
}

1385 1386 1387 1388 1389
/* BPF program asks to bypass CAP_NET_BIND_SERVICE in bind. */
#define BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE			(1 << 0)
/* BPF program asks to set CN on the packet. */
#define BPF_RET_SET_CN						(1 << 0)

1390 1391 1392
typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *prog, const void *ctx);

static __always_inline u32
1393
bpf_prog_run_array(const struct bpf_prog_array *array,
1394 1395 1396 1397
		   const void *ctx, bpf_prog_run_fn run_prog)
{
	const struct bpf_prog_array_item *item;
	const struct bpf_prog *prog;
1398 1399
	struct bpf_run_ctx *old_run_ctx;
	struct bpf_trace_run_ctx run_ctx;
1400 1401
	u32 ret = 1;

1402 1403
	RCU_LOCKDEP_WARN(!rcu_read_lock_held(), "no rcu lock held");

1404
	if (unlikely(!array))
1405 1406 1407
		return ret;

	migrate_disable();
1408
	old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
1409 1410
	item = &array->items[0];
	while ((prog = READ_ONCE(item->prog))) {
1411
		run_ctx.bpf_cookie = item->bpf_cookie;
1412 1413 1414
		ret &= run_prog(prog, ctx);
		item++;
	}
1415
	bpf_reset_run_ctx(old_run_ctx);
1416 1417 1418
	migrate_enable();
	return ret;
}
1419

1420
#ifdef CONFIG_BPF_SYSCALL
1421
DECLARE_PER_CPU(int, bpf_prog_active);
1422
extern struct mutex bpf_stats_enabled_mutex;
1423

1424 1425 1426 1427 1428 1429 1430 1431 1432
/*
 * Block execution of BPF programs attached to instrumentation (perf,
 * kprobes, tracepoints) to prevent deadlocks on map operations as any of
 * these events can happen inside a region which holds a map bucket lock
 * and can deadlock on it.
 */
static inline void bpf_disable_instrumentation(void)
{
	migrate_disable();
1433
	this_cpu_inc(bpf_prog_active);
1434 1435 1436 1437
}

static inline void bpf_enable_instrumentation(void)
{
1438
	this_cpu_dec(bpf_prog_active);
1439 1440 1441
	migrate_enable();
}

1442 1443
extern const struct file_operations bpf_map_fops;
extern const struct file_operations bpf_prog_fops;
Y
Yonghong Song 已提交
1444
extern const struct file_operations bpf_iter_fops;
1445

A
Alexei Starovoitov 已提交
1446
#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
1447 1448
	extern const struct bpf_prog_ops _name ## _prog_ops; \
	extern const struct bpf_verifier_ops _name ## _verifier_ops;
1449 1450
#define BPF_MAP_TYPE(_id, _ops) \
	extern const struct bpf_map_ops _ops;
1451
#define BPF_LINK_TYPE(_id, _name)
1452 1453
#include <linux/bpf_types.h>
#undef BPF_PROG_TYPE
1454
#undef BPF_MAP_TYPE
1455
#undef BPF_LINK_TYPE
1456

1457
extern const struct bpf_prog_ops bpf_offload_prog_ops;
1458 1459 1460
extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops;
extern const struct bpf_verifier_ops xdp_analyzer_ops;

1461
struct bpf_prog *bpf_prog_get(u32 ufd);
1462
struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
1463
				       bool attach_drv);
1464
void bpf_prog_add(struct bpf_prog *prog, int i);
1465
void bpf_prog_sub(struct bpf_prog *prog, int i);
1466
void bpf_prog_inc(struct bpf_prog *prog);
1467
struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog);
1468 1469
void bpf_prog_put(struct bpf_prog *prog);

1470
void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock);
1471
void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock);
1472

1473 1474 1475 1476
struct bpf_map_value_off_desc *bpf_map_kptr_off_contains(struct bpf_map *map, u32 offset);
void bpf_map_free_kptr_off_tab(struct bpf_map *map);
struct bpf_map_value_off *bpf_map_copy_kptr_off_tab(const struct bpf_map *map);
bool bpf_map_equal_kptr_off_tab(const struct bpf_map *map_a, const struct bpf_map *map_b);
1477
void bpf_map_free_kptrs(struct bpf_map *map, void *map_value);
1478

1479
struct bpf_map *bpf_map_get(u32 ufd);
1480
struct bpf_map *bpf_map_get_with_uref(u32 ufd);
1481
struct bpf_map *__bpf_map_get(struct fd f);
1482 1483 1484
void bpf_map_inc(struct bpf_map *map);
void bpf_map_inc_with_uref(struct bpf_map *map);
struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map);
1485
void bpf_map_put_with_uref(struct bpf_map *map);
1486
void bpf_map_put(struct bpf_map *map);
1487 1488
void *bpf_map_area_alloc(u64 size, int numa_node);
void *bpf_map_area_mmapable_alloc(u64 size, int numa_node);
1489
void bpf_map_area_free(void *base);
1490
bool bpf_map_write_active(const struct bpf_map *map);
1491
void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
1492 1493
int  generic_map_lookup_batch(struct bpf_map *map,
			      const union bpf_attr *attr,
1494 1495 1496 1497 1498 1499
			      union bpf_attr __user *uattr);
int  generic_map_update_batch(struct bpf_map *map,
			      const union bpf_attr *attr,
			      union bpf_attr __user *uattr);
int  generic_map_delete_batch(struct bpf_map *map,
			      const union bpf_attr *attr,
1500
			      union bpf_attr __user *uattr);
Y
Yonghong Song 已提交
1501
struct bpf_map *bpf_map_get_curr_or_next(u32 *id);
A
Alexei Starovoitov 已提交
1502
struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id);
1503

1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531
#ifdef CONFIG_MEMCG_KMEM
void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
			   int node);
void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags);
void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
				    size_t align, gfp_t flags);
#else
static inline void *
bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
		     int node)
{
	return kmalloc_node(size, flags, node);
}

static inline void *
bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
{
	return kzalloc(size, flags);
}

static inline void __percpu *
bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align,
		     gfp_t flags)
{
	return __alloc_percpu_gfp(size, align, flags);
}
#endif

1532 1533
extern int sysctl_unprivileged_bpf_disabled;

A
Alexei Starovoitov 已提交
1534 1535 1536 1537 1538
static inline bool bpf_allow_ptr_leaks(void)
{
	return perfmon_capable();
}

1539 1540 1541 1542 1543
static inline bool bpf_allow_uninit_stack(void)
{
	return perfmon_capable();
}

1544 1545 1546 1547 1548
static inline bool bpf_allow_ptr_to_map_access(void)
{
	return perfmon_capable();
}

A
Alexei Starovoitov 已提交
1549 1550 1551 1552 1553 1554 1555 1556 1557 1558
static inline bool bpf_bypass_spec_v1(void)
{
	return perfmon_capable();
}

static inline bool bpf_bypass_spec_v4(void)
{
	return perfmon_capable();
}

1559
int bpf_map_new_fd(struct bpf_map *map, int flags);
1560 1561
int bpf_prog_new_fd(struct bpf_prog *prog);

1562
void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
A
Andrii Nakryiko 已提交
1563 1564 1565 1566
		   const struct bpf_link_ops *ops, struct bpf_prog *prog);
int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer);
int bpf_link_settle(struct bpf_link_primer *primer);
void bpf_link_cleanup(struct bpf_link_primer *primer);
1567 1568 1569
void bpf_link_inc(struct bpf_link *link);
void bpf_link_put(struct bpf_link *link);
int bpf_link_new_fd(struct bpf_link *link);
1570
struct file *bpf_link_new_file(struct bpf_link *link, int *reserved_fd);
1571
struct bpf_link *bpf_link_get_from_fd(u32 ufd);
D
Dmitrii Dolgov 已提交
1572
struct bpf_link *bpf_link_get_curr_or_next(u32 *id);
1573

1574
int bpf_obj_pin_user(u32 ufd, const char __user *pathname);
1575
int bpf_obj_get_user(const char __user *pathname, int flags);
1576

1577
#define BPF_ITER_FUNC_PREFIX "bpf_iter_"
1578
#define DEFINE_BPF_ITER_FUNC(target, args...)			\
1579 1580
	extern int bpf_iter_ ## target(args);			\
	int __init bpf_iter_ ## target(args) { return 0; }
1581

1582
struct bpf_iter_aux_info {
1583
	struct bpf_map *map;
1584 1585
};

1586 1587 1588 1589
typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *prog,
					union bpf_iter_link_info *linfo,
					struct bpf_iter_aux_info *aux);
typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *aux);
1590 1591 1592 1593
typedef void (*bpf_iter_show_fdinfo_t) (const struct bpf_iter_aux_info *aux,
					struct seq_file *seq);
typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *aux,
					 struct bpf_link_info *info);
1594 1595 1596
typedef const struct bpf_func_proto *
(*bpf_iter_get_func_proto_t)(enum bpf_func_id func_id,
			     const struct bpf_prog *prog);
1597

1598 1599 1600 1601
enum bpf_iter_feature {
	BPF_ITER_RESCHED	= BIT(0),
};

1602
#define BPF_ITER_CTX_ARG_MAX 2
1603 1604
struct bpf_iter_reg {
	const char *target;
1605 1606
	bpf_iter_attach_target_t attach_target;
	bpf_iter_detach_target_t detach_target;
1607 1608
	bpf_iter_show_fdinfo_t show_fdinfo;
	bpf_iter_fill_link_info_t fill_link_info;
1609
	bpf_iter_get_func_proto_t get_func_proto;
1610
	u32 ctx_arg_info_size;
1611
	u32 feature;
1612
	struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX];
1613
	const struct bpf_iter_seq_info *seq_info;
1614 1615
};

1616 1617 1618 1619 1620 1621
struct bpf_iter_meta {
	__bpf_md_ptr(struct seq_file *, seq);
	u64 session_id;
	u64 seq_num;
};

1622 1623 1624 1625 1626 1627 1628
struct bpf_iter__bpf_map_elem {
	__bpf_md_ptr(struct bpf_iter_meta *, meta);
	__bpf_md_ptr(struct bpf_map *, map);
	__bpf_md_ptr(void *, key);
	__bpf_md_ptr(void *, value);
};

1629
int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info);
1630
void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info);
1631
bool bpf_iter_prog_supported(struct bpf_prog *prog);
1632 1633
const struct bpf_func_proto *
bpf_iter_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
1634
int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr, struct bpf_prog *prog);
1635
int bpf_iter_new_fd(struct bpf_link *link);
Y
Yonghong Song 已提交
1636
bool bpf_link_is_iter(struct bpf_link *link);
1637 1638
struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop);
int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx);
1639 1640 1641 1642
void bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info *aux,
			      struct seq_file *seq);
int bpf_iter_map_fill_link_info(const struct bpf_iter_aux_info *aux,
				struct bpf_link_info *info);
1643

1644 1645 1646 1647
int map_set_for_each_callback_args(struct bpf_verifier_env *env,
				   struct bpf_func_state *caller,
				   struct bpf_func_state *callee);

1648 1649 1650 1651 1652 1653
int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
			   u64 flags);
int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
			    u64 flags);
1654

1655
int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
1656

1657 1658
int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
				 void *key, void *value, u64 map_flags);
1659
int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
M
Martin KaFai Lau 已提交
1660 1661
int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
				void *key, void *value, u64 map_flags);
1662
int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
1663

1664
int bpf_get_file_flag(int flags);
1665
int bpf_check_uarg_tail_zero(bpfptr_t uaddr, size_t expected_size,
1666
			     size_t actual_size);
1667

1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683
/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
 * forced to use 'long' read/writes to try to atomically copy long counters.
 * Best-effort only.  No barriers here, since it _will_ race with concurrent
 * updates from BPF programs. Called from bpf syscall and mostly used with
 * size 8 or 16 bytes, so ask compiler to inline it.
 */
static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
{
	const long *lsrc = src;
	long *ldst = dst;

	size /= sizeof(long);
	while (size--)
		*ldst++ = *lsrc++;
}

1684
/* verify correctness of eBPF program */
1685
int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, bpfptr_t uattr);
1686 1687

#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1688
void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
1689
#endif
1690

1691 1692
struct btf *bpf_get_btf_vmlinux(void);

1693
/* Map specifics */
1694
struct xdp_frame;
1695
struct sk_buff;
1696 1697
struct bpf_dtab_netdev;
struct bpf_cpu_map_entry;
1698

1699
void __dev_flush(void);
1700
int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
1701
		    struct net_device *dev_rx);
1702
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
1703
		    struct net_device *dev_rx);
1704
int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
1705
			  struct bpf_map *map, bool exclude_ingress);
1706 1707
int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
			     struct bpf_prog *xdp_prog);
1708 1709 1710
int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
			   struct bpf_prog *xdp_prog, struct bpf_map *map,
			   bool exclude_ingress);
1711

1712
void __cpu_map_flush(void);
1713
int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf,
1714
		    struct net_device *dev_rx);
1715 1716
int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
			     struct sk_buff *skb);
1717

1718 1719 1720 1721 1722 1723 1724
/* Return map's numa specified by userspace */
static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
{
	return (attr->map_flags & BPF_F_NUMA_NODE) ?
		attr->numa_node : NUMA_NO_NODE;
}

1725
struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type);
1726
int array_map_alloc_check(union bpf_attr *attr);
1727

1728 1729 1730 1731
int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
			  union bpf_attr __user *uattr);
int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
			  union bpf_attr __user *uattr);
1732 1733 1734
int bpf_prog_test_run_tracing(struct bpf_prog *prog,
			      const union bpf_attr *kattr,
			      union bpf_attr __user *uattr);
1735 1736 1737
int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
				     const union bpf_attr *kattr,
				     union bpf_attr __user *uattr);
1738 1739 1740
int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
			     const union bpf_attr *kattr,
			     union bpf_attr __user *uattr);
1741 1742 1743
int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
				const union bpf_attr *kattr,
				union bpf_attr __user *uattr);
1744 1745 1746
bool btf_ctx_access(int off, int size, enum bpf_access_type type,
		    const struct bpf_prog *prog,
		    struct bpf_insn_access_aux *info);
1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769

static inline bool bpf_tracing_ctx_access(int off, int size,
					  enum bpf_access_type type)
{
	if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
		return false;
	if (type != BPF_READ)
		return false;
	if (off % size != 0)
		return false;
	return true;
}

static inline bool bpf_tracing_btf_ctx_access(int off, int size,
					      enum bpf_access_type type,
					      const struct bpf_prog *prog,
					      struct bpf_insn_access_aux *info)
{
	if (!bpf_tracing_ctx_access(off, size, type))
		return false;
	return btf_ctx_access(off, size, type, prog, info);
}

1770
int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf,
1771 1772
		      const struct btf_type *t, int off, int size,
		      enum bpf_access_type atype,
1773
		      u32 *next_btf_id, enum bpf_type_flag *flag);
1774
bool btf_struct_ids_match(struct bpf_verifier_log *log,
1775
			  const struct btf *btf, u32 id, int off,
1776 1777
			  const struct btf *need_btf, u32 need_type_id,
			  bool strict);
1778

A
Alexei Starovoitov 已提交
1779 1780 1781 1782 1783 1784
int btf_distill_func_proto(struct bpf_verifier_log *log,
			   struct btf *btf,
			   const struct btf_type *func_proto,
			   const char *func_name,
			   struct btf_func_model *m);

1785
struct bpf_reg_state;
1786 1787
int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog,
				struct bpf_reg_state *regs);
1788 1789 1790
int btf_check_kfunc_arg_match(struct bpf_verifier_env *env,
			      const struct btf *btf, u32 func_id,
			      struct bpf_reg_state *regs);
1791 1792
int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
			  struct bpf_reg_state *reg);
1793
int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog,
1794
			 struct btf *btf, const struct btf_type *t);
1795

1796
struct bpf_prog *bpf_prog_by_id(u32 id);
1797
struct bpf_link *bpf_link_by_id(u32 id);
1798

1799
const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id);
1800
void bpf_task_storage_free(struct task_struct *task);
1801 1802 1803 1804
bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog);
const struct btf_func_model *
bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
			 const struct bpf_insn *insn);
1805 1806 1807 1808 1809 1810 1811 1812
struct bpf_core_ctx {
	struct bpf_verifier_log *log;
	const struct btf *btf;
};

int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo,
		   int relo_idx, void *insn);

1813 1814 1815 1816 1817
static inline bool unprivileged_ebpf_enabled(void)
{
	return !sysctl_unprivileged_bpf_disabled;
}

1818
#else /* !CONFIG_BPF_SYSCALL */
1819 1820 1821 1822 1823
static inline struct bpf_prog *bpf_prog_get(u32 ufd)
{
	return ERR_PTR(-EOPNOTSUPP);
}

1824 1825
static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd,
						     enum bpf_prog_type type,
1826
						     bool attach_drv)
1827 1828 1829 1830
{
	return ERR_PTR(-EOPNOTSUPP);
}

1831
static inline void bpf_prog_add(struct bpf_prog *prog, int i)
1832 1833
{
}
1834

1835 1836 1837 1838
static inline void bpf_prog_sub(struct bpf_prog *prog, int i)
{
}

1839 1840 1841
static inline void bpf_prog_put(struct bpf_prog *prog)
{
}
1842

1843
static inline void bpf_prog_inc(struct bpf_prog *prog)
1844 1845
{
}
1846

1847 1848 1849 1850 1851 1852
static inline struct bpf_prog *__must_check
bpf_prog_inc_not_zero(struct bpf_prog *prog)
{
	return ERR_PTR(-EOPNOTSUPP);
}

1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881
static inline void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
				 const struct bpf_link_ops *ops,
				 struct bpf_prog *prog)
{
}

static inline int bpf_link_prime(struct bpf_link *link,
				 struct bpf_link_primer *primer)
{
	return -EOPNOTSUPP;
}

static inline int bpf_link_settle(struct bpf_link_primer *primer)
{
	return -EOPNOTSUPP;
}

static inline void bpf_link_cleanup(struct bpf_link_primer *primer)
{
}

static inline void bpf_link_inc(struct bpf_link *link)
{
}

static inline void bpf_link_put(struct bpf_link *link)
{
}

1882
static inline int bpf_obj_get_user(const char __user *pathname, int flags)
1883 1884 1885 1886
{
	return -EOPNOTSUPP;
}

1887
static inline void __dev_flush(void)
1888 1889
{
}
1890

1891
struct xdp_frame;
1892
struct bpf_dtab_netdev;
1893
struct bpf_cpu_map_entry;
1894

1895
static inline
1896
int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
1897 1898 1899 1900 1901
		    struct net_device *dev_rx)
{
	return 0;
}

1902
static inline
1903
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
1904
		    struct net_device *dev_rx)
1905 1906 1907 1908
{
	return 0;
}

1909
static inline
1910
int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
1911 1912 1913 1914 1915
			  struct bpf_map *map, bool exclude_ingress)
{
	return 0;
}

1916 1917 1918 1919 1920 1921 1922 1923 1924
struct sk_buff;

static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
					   struct sk_buff *skb,
					   struct bpf_prog *xdp_prog)
{
	return 0;
}

1925 1926 1927 1928 1929 1930 1931 1932
static inline
int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
			   struct bpf_prog *xdp_prog, struct bpf_map *map,
			   bool exclude_ingress)
{
	return 0;
}

1933
static inline void __cpu_map_flush(void)
1934 1935 1936 1937
{
}

static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
1938
				  struct xdp_frame *xdpf,
1939 1940 1941 1942
				  struct net_device *dev_rx)
{
	return 0;
}
1943

1944 1945 1946 1947 1948 1949
static inline int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
					   struct sk_buff *skb)
{
	return -EOPNOTSUPP;
}

1950 1951 1952 1953 1954
static inline struct bpf_prog *bpf_prog_get_type_path(const char *name,
				enum bpf_prog_type type)
{
	return ERR_PTR(-EOPNOTSUPP);
}
1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969

static inline int bpf_prog_test_run_xdp(struct bpf_prog *prog,
					const union bpf_attr *kattr,
					union bpf_attr __user *uattr)
{
	return -ENOTSUPP;
}

static inline int bpf_prog_test_run_skb(struct bpf_prog *prog,
					const union bpf_attr *kattr,
					union bpf_attr __user *uattr)
{
	return -ENOTSUPP;
}

1970 1971 1972 1973 1974 1975 1976
static inline int bpf_prog_test_run_tracing(struct bpf_prog *prog,
					    const union bpf_attr *kattr,
					    union bpf_attr __user *uattr)
{
	return -ENOTSUPP;
}

1977 1978 1979 1980 1981 1982
static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
						   const union bpf_attr *kattr,
						   union bpf_attr __user *uattr)
{
	return -ENOTSUPP;
}
1983

1984 1985 1986 1987 1988 1989 1990
static inline int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
					      const union bpf_attr *kattr,
					      union bpf_attr __user *uattr)
{
	return -ENOTSUPP;
}

1991 1992 1993
static inline void bpf_map_put(struct bpf_map *map)
{
}
1994 1995 1996 1997 1998

static inline struct bpf_prog *bpf_prog_by_id(u32 id)
{
	return ERR_PTR(-ENOTSUPP);
}
1999 2000 2001 2002 2003 2004

static inline const struct bpf_func_proto *
bpf_base_func_proto(enum bpf_func_id func_id)
{
	return NULL;
}
2005 2006 2007 2008

static inline void bpf_task_storage_free(struct task_struct *task)
{
}
2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020

static inline bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog)
{
	return false;
}

static inline const struct btf_func_model *
bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
			 const struct bpf_insn *insn)
{
	return NULL;
}
2021 2022 2023 2024 2025 2026

static inline bool unprivileged_ebpf_enabled(void)
{
	return false;
}

2027
#endif /* CONFIG_BPF_SYSCALL */
2028

2029 2030 2031
void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
			  struct btf_mod_pair *used_btfs, u32 len);

2032 2033 2034 2035 2036 2037
static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
						 enum bpf_prog_type type)
{
	return bpf_prog_get_type_dev(ufd, type, false);
}

2038 2039 2040
void __bpf_free_used_maps(struct bpf_prog_aux *aux,
			  struct bpf_map **used_maps, u32 len);

2041 2042
bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool);

2043 2044
int bpf_prog_offload_compile(struct bpf_prog *prog);
void bpf_prog_offload_destroy(struct bpf_prog *prog);
2045 2046
int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
			       struct bpf_prog *prog);
2047

2048 2049
int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map);

2050 2051 2052 2053 2054 2055 2056
int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value);
int bpf_map_offload_update_elem(struct bpf_map *map,
				void *key, void *value, u64 flags);
int bpf_map_offload_delete_elem(struct bpf_map *map, void *key);
int bpf_map_offload_get_next_key(struct bpf_map *map,
				 void *key, void *next_key);

2057
bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map);
2058

2059
struct bpf_offload_dev *
2060
bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv);
2061
void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev);
2062
void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev);
2063 2064 2065 2066
int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
				    struct net_device *netdev);
void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
				       struct net_device *netdev);
2067
bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev);
2068

2069 2070 2071
#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);

2072
static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux)
2073
{
2074
	return aux->offload_requested;
2075
}
2076 2077 2078 2079 2080 2081 2082 2083

static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
{
	return unlikely(map->ops == &bpf_map_offload_ops);
}

struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr);
void bpf_map_offload_map_free(struct bpf_map *map);
2084 2085 2086
int bpf_prog_test_run_syscall(struct bpf_prog *prog,
			      const union bpf_attr *kattr,
			      union bpf_attr __user *uattr);
2087 2088 2089 2090

int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags);
2091 2092 2093
int sock_map_bpf_prog_query(const union bpf_attr *attr,
			    union bpf_attr __user *uattr);

2094 2095
void sock_map_unhash(struct sock *sk);
void sock_map_close(struct sock *sk, long timeout);
2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106
#else
static inline int bpf_prog_offload_init(struct bpf_prog *prog,
					union bpf_attr *attr)
{
	return -EOPNOTSUPP;
}

static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
{
	return false;
}
2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120

static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
{
	return false;
}

static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
{
	return ERR_PTR(-EOPNOTSUPP);
}

static inline void bpf_map_offload_map_free(struct bpf_map *map)
{
}
2121 2122 2123 2124 2125 2126 2127

static inline int bpf_prog_test_run_syscall(struct bpf_prog *prog,
					    const union bpf_attr *kattr,
					    union bpf_attr __user *uattr)
{
	return -ENOTSUPP;
}
2128

2129
#ifdef CONFIG_BPF_SYSCALL
2130 2131
static inline int sock_map_get_from_fd(const union bpf_attr *attr,
				       struct bpf_prog *prog)
2132 2133 2134
{
	return -EINVAL;
}
2135 2136 2137 2138 2139 2140

static inline int sock_map_prog_detach(const union bpf_attr *attr,
				       enum bpf_prog_type ptype)
{
	return -EOPNOTSUPP;
}
2141 2142 2143 2144 2145 2146

static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value,
					   u64 flags)
{
	return -EOPNOTSUPP;
}
2147 2148 2149 2150 2151 2152

static inline int sock_map_bpf_prog_query(const union bpf_attr *attr,
					  union bpf_attr __user *uattr)
{
	return -EINVAL;
}
2153 2154
#endif /* CONFIG_BPF_SYSCALL */
#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
2155

2156 2157 2158 2159 2160 2161 2162 2163 2164 2165
#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
void bpf_sk_reuseport_detach(struct sock *sk);
int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
				       void *value);
int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
				       void *value, u64 map_flags);
#else
static inline void bpf_sk_reuseport_detach(struct sock *sk)
{
}
2166

2167
#ifdef CONFIG_BPF_SYSCALL
2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182
static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map,
						     void *key, void *value)
{
	return -EOPNOTSUPP;
}

static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map,
						     void *key, void *value,
						     u64 map_flags)
{
	return -EOPNOTSUPP;
}
#endif /* CONFIG_BPF_SYSCALL */
#endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */

2183
/* verifier prototypes for helper functions called from eBPF programs */
2184 2185 2186
extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
extern const struct bpf_func_proto bpf_map_update_elem_proto;
extern const struct bpf_func_proto bpf_map_delete_elem_proto;
M
Mauricio Vasquez B 已提交
2187 2188 2189
extern const struct bpf_func_proto bpf_map_push_elem_proto;
extern const struct bpf_func_proto bpf_map_pop_elem_proto;
extern const struct bpf_func_proto bpf_map_peek_elem_proto;
2190
extern const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto;
2191

2192
extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
2193
extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
2194
extern const struct bpf_func_proto bpf_get_numa_node_id_proto;
2195
extern const struct bpf_func_proto bpf_tail_call_proto;
2196
extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
2197
extern const struct bpf_func_proto bpf_ktime_get_boot_ns_proto;
2198 2199 2200
extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
extern const struct bpf_func_proto bpf_get_current_comm_proto;
2201
extern const struct bpf_func_proto bpf_get_stackid_proto;
Y
Yonghong Song 已提交
2202
extern const struct bpf_func_proto bpf_get_stack_proto;
2203
extern const struct bpf_func_proto bpf_get_task_stack_proto;
2204 2205
extern const struct bpf_func_proto bpf_get_stackid_proto_pe;
extern const struct bpf_func_proto bpf_get_stack_proto_pe;
2206
extern const struct bpf_func_proto bpf_sock_map_update_proto;
2207
extern const struct bpf_func_proto bpf_sock_hash_update_proto;
2208
extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
2209
extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto;
2210 2211 2212 2213
extern const struct bpf_func_proto bpf_msg_redirect_hash_proto;
extern const struct bpf_func_proto bpf_msg_redirect_map_proto;
extern const struct bpf_func_proto bpf_sk_redirect_hash_proto;
extern const struct bpf_func_proto bpf_sk_redirect_map_proto;
2214 2215
extern const struct bpf_func_proto bpf_spin_lock_proto;
extern const struct bpf_func_proto bpf_spin_unlock_proto;
2216
extern const struct bpf_func_proto bpf_get_local_storage_proto;
2217 2218
extern const struct bpf_func_proto bpf_strtol_proto;
extern const struct bpf_func_proto bpf_strtoul_proto;
2219
extern const struct bpf_func_proto bpf_tcp_sock_proto;
M
Martin KaFai Lau 已提交
2220
extern const struct bpf_func_proto bpf_jiffies64_proto;
2221
extern const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto;
2222
extern const struct bpf_func_proto bpf_event_output_data_proto;
2223 2224 2225 2226 2227
extern const struct bpf_func_proto bpf_ringbuf_output_proto;
extern const struct bpf_func_proto bpf_ringbuf_reserve_proto;
extern const struct bpf_func_proto bpf_ringbuf_submit_proto;
extern const struct bpf_func_proto bpf_ringbuf_discard_proto;
extern const struct bpf_func_proto bpf_ringbuf_query_proto;
2228
extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto;
2229 2230 2231
extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto;
extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto;
extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto;
2232
extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto;
2233
extern const struct bpf_func_proto bpf_skc_to_unix_sock_proto;
2234
extern const struct bpf_func_proto bpf_skc_to_mptcp_sock_proto;
2235
extern const struct bpf_func_proto bpf_copy_from_user_proto;
A
Alan Maguire 已提交
2236
extern const struct bpf_func_proto bpf_snprintf_btf_proto;
F
Florent Revest 已提交
2237
extern const struct bpf_func_proto bpf_snprintf_proto;
H
Hao Luo 已提交
2238
extern const struct bpf_func_proto bpf_per_cpu_ptr_proto;
H
Hao Luo 已提交
2239
extern const struct bpf_func_proto bpf_this_cpu_ptr_proto;
2240
extern const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto;
2241
extern const struct bpf_func_proto bpf_sock_from_file_proto;
2242
extern const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto;
2243 2244
extern const struct bpf_func_proto bpf_task_storage_get_proto;
extern const struct bpf_func_proto bpf_task_storage_delete_proto;
2245
extern const struct bpf_func_proto bpf_for_each_map_elem_proto;
2246
extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto;
2247 2248
extern const struct bpf_func_proto bpf_sk_setsockopt_proto;
extern const struct bpf_func_proto bpf_sk_getsockopt_proto;
2249
extern const struct bpf_func_proto bpf_kallsyms_lookup_name_proto;
S
Song Liu 已提交
2250
extern const struct bpf_func_proto bpf_find_vma_proto;
J
Joanne Koong 已提交
2251
extern const struct bpf_func_proto bpf_loop_proto;
H
Hou Tao 已提交
2252
extern const struct bpf_func_proto bpf_strncmp_proto;
2253
extern const struct bpf_func_proto bpf_copy_from_user_task_proto;
2254
extern const struct bpf_func_proto bpf_kptr_xchg_proto;
2255

2256 2257 2258
const struct bpf_func_proto *tracing_prog_func_proto(
  enum bpf_func_id func_id, const struct bpf_prog *prog);

2259 2260 2261
/* Shared helpers among cBPF and eBPF. */
void bpf_user_rnd_init_once(void);
u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
2262
u64 bpf_get_raw_cpu_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
2263

2264
#if defined(CONFIG_NET)
2265 2266 2267
bool bpf_sock_common_is_valid_access(int off, int size,
				     enum bpf_access_type type,
				     struct bpf_insn_access_aux *info);
2268 2269 2270 2271 2272 2273 2274 2275
bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
			      struct bpf_insn_access_aux *info);
u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
				const struct bpf_insn *si,
				struct bpf_insn *insn_buf,
				struct bpf_prog *prog,
				u32 *target_size);
#else
2276 2277 2278 2279 2280 2281
static inline bool bpf_sock_common_is_valid_access(int off, int size,
						   enum bpf_access_type type,
						   struct bpf_insn_access_aux *info)
{
	return false;
}
2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297
static inline bool bpf_sock_is_valid_access(int off, int size,
					    enum bpf_access_type type,
					    struct bpf_insn_access_aux *info)
{
	return false;
}
static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
					      const struct bpf_insn *si,
					      struct bpf_insn *insn_buf,
					      struct bpf_prog *prog,
					      u32 *target_size)
{
	return 0;
}
#endif

2298
#ifdef CONFIG_INET
A
Alexei Starovoitov 已提交
2299 2300 2301 2302
struct sk_reuseport_kern {
	struct sk_buff *skb;
	struct sock *sk;
	struct sock *selected_sk;
2303
	struct sock *migrating_sk;
A
Alexei Starovoitov 已提交
2304 2305 2306 2307 2308
	void *data_end;
	u32 hash;
	u32 reuseport_id;
	bool bind_inany;
};
2309 2310 2311 2312 2313 2314 2315 2316
bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
				  struct bpf_insn_access_aux *info);

u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
				    const struct bpf_insn *si,
				    struct bpf_insn *insn_buf,
				    struct bpf_prog *prog,
				    u32 *target_size);
2317 2318 2319 2320 2321 2322 2323 2324 2325

bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
				  struct bpf_insn_access_aux *info);

u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
				    const struct bpf_insn *si,
				    struct bpf_insn *insn_buf,
				    struct bpf_prog *prog,
				    u32 *target_size);
2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341
#else
static inline bool bpf_tcp_sock_is_valid_access(int off, int size,
						enum bpf_access_type type,
						struct bpf_insn_access_aux *info)
{
	return false;
}

static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
						  const struct bpf_insn *si,
						  struct bpf_insn *insn_buf,
						  struct bpf_prog *prog,
						  u32 *target_size)
{
	return 0;
}
2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356
static inline bool bpf_xdp_sock_is_valid_access(int off, int size,
						enum bpf_access_type type,
						struct bpf_insn_access_aux *info)
{
	return false;
}

static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
						  const struct bpf_insn *si,
						  struct bpf_insn *insn_buf,
						  struct bpf_prog *prog,
						  u32 *target_size)
{
	return 0;
}
2357 2358
#endif /* CONFIG_INET */

2359
enum bpf_text_poke_type {
2360 2361
	BPF_MOD_CALL,
	BPF_MOD_JUMP,
2362
};
2363

2364 2365 2366
int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
		       void *addr1, void *addr2);

S
Song Liu 已提交
2367 2368
void *bpf_arch_text_copy(void *dst, void *src, size_t len);

J
Jiri Olsa 已提交
2369
struct btf_id_set;
2370
bool btf_id_set_contains(const struct btf_id_set *set, u32 id);
J
Jiri Olsa 已提交
2371

2372 2373
#define MAX_BPRINTF_VARARGS		12

2374 2375 2376
int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
			u32 **bin_buf, u32 num_args);
void bpf_bprintf_cleanup(void);
2377

2378
#endif /* _LINUX_BPF_H */