bpf.h 62.3 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0-only */
2 3 4 5 6 7
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
 */
#ifndef _LINUX_BPF_H
#define _LINUX_BPF_H 1

#include <uapi/linux/bpf.h>
8

9
#include <linux/workqueue.h>
10
#include <linux/file.h>
11
#include <linux/percpu.h>
Z
Zi Shen Lim 已提交
12
#include <linux/err.h>
13
#include <linux/rbtree_latch.h>
14
#include <linux/numa.h>
15
#include <linux/mm_types.h>
16
#include <linux/wait.h>
A
Alexei Starovoitov 已提交
17
#include <linux/u64_stats_sync.h>
A
Alexei Starovoitov 已提交
18 19
#include <linux/refcount.h>
#include <linux/mutex.h>
20
#include <linux/module.h>
J
Jiri Olsa 已提交
21
#include <linux/kallsyms.h>
A
Alexei Starovoitov 已提交
22
#include <linux/capability.h>
23

24
struct bpf_verifier_env;
25
struct bpf_verifier_log;
26
struct perf_event;
27
struct bpf_prog;
28
struct bpf_prog_aux;
29
struct bpf_map;
30
struct sock;
31
struct seq_file;
32
struct btf;
33
struct btf_type;
34
struct exception_table_entry;
35
struct seq_operations;
36
struct bpf_iter_aux_info;
K
KP Singh 已提交
37 38
struct bpf_local_storage;
struct bpf_local_storage_map;
39

40 41 42
extern struct idr btf_idr;
extern spinlock_t btf_idr_lock;

43 44
typedef int (*bpf_iter_init_seq_priv_t)(void *private_data,
					struct bpf_iter_aux_info *aux);
45 46 47 48 49 50 51 52
typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data);
struct bpf_iter_seq_info {
	const struct seq_operations *seq_ops;
	bpf_iter_init_seq_priv_t init_seq_private;
	bpf_iter_fini_seq_priv_t fini_seq_private;
	u32 seq_priv_size;
};

53 54 55
/* map is generic key/value storage optionally accesible by eBPF programs */
struct bpf_map_ops {
	/* funcs callable from userspace (via syscall) */
56
	int (*map_alloc_check)(union bpf_attr *attr);
57
	struct bpf_map *(*map_alloc)(union bpf_attr *attr);
58 59
	void (*map_release)(struct bpf_map *map, struct file *map_file);
	void (*map_free)(struct bpf_map *map);
60
	int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
61
	void (*map_release_uref)(struct bpf_map *map);
62
	void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key);
63 64
	int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr,
				union bpf_attr __user *uattr);
65 66 67
	int (*map_lookup_and_delete_batch)(struct bpf_map *map,
					   const union bpf_attr *attr,
					   union bpf_attr __user *uattr);
68 69 70 71
	int (*map_update_batch)(struct bpf_map *map, const union bpf_attr *attr,
				union bpf_attr __user *uattr);
	int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr,
				union bpf_attr __user *uattr);
72 73 74

	/* funcs callable from userspace and from eBPF programs */
	void *(*map_lookup_elem)(struct bpf_map *map, void *key);
75
	int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
76
	int (*map_delete_elem)(struct bpf_map *map, void *key);
M
Mauricio Vasquez B 已提交
77 78 79
	int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags);
	int (*map_pop_elem)(struct bpf_map *map, void *value);
	int (*map_peek_elem)(struct bpf_map *map, void *value);
80 81

	/* funcs called by prog_array and perf_event_array map */
82 83 84
	void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
				int fd);
	void (*map_fd_put_ptr)(void *ptr);
85
	int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
86
	u32 (*map_fd_sys_lookup_elem)(void *ptr);
87 88
	void (*map_seq_show_elem)(struct bpf_map *map, void *key,
				  struct seq_file *m);
89
	int (*map_check_btf)(const struct bpf_map *map,
90
			     const struct btf *btf,
91 92
			     const struct btf_type *key_type,
			     const struct btf_type *value_type);
93

94 95 96 97 98 99
	/* Prog poke tracking helpers. */
	int (*map_poke_track)(struct bpf_map *map, struct bpf_prog_aux *aux);
	void (*map_poke_untrack)(struct bpf_map *map, struct bpf_prog_aux *aux);
	void (*map_poke_run)(struct bpf_map *map, u32 key, struct bpf_prog *old,
			     struct bpf_prog *new);

100 101 102 103 104
	/* Direct value access helpers. */
	int (*map_direct_value_addr)(const struct bpf_map *map,
				     u64 *imm, u32 off);
	int (*map_direct_value_meta)(const struct bpf_map *map,
				     u64 imm, u32 *off);
105
	int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma);
106 107
	__poll_t (*map_poll)(struct bpf_map *map, struct file *filp,
			     struct poll_table_struct *pts);
108

K
KP Singh 已提交
109 110 111 112 113 114
	/* Functions called by bpf_local_storage maps */
	int (*map_local_storage_charge)(struct bpf_local_storage_map *smap,
					void *owner, u32 size);
	void (*map_local_storage_uncharge)(struct bpf_local_storage_map *smap,
					   void *owner, u32 size);
	struct bpf_local_storage __rcu ** (*map_owner_storage_ptr)(void *owner);
115 116 117 118 119 120 121 122 123 124 125 126 127

	/* map_meta_equal must be implemented for maps that can be
	 * used as an inner map.  It is a runtime check to ensure
	 * an inner map can be inserted to an outer map.
	 *
	 * Some properties of the inner map has been used during the
	 * verification time.  When inserting an inner map at the runtime,
	 * map_meta_equal has to ensure the inserting map has the same
	 * properties that the verifier has used earlier.
	 */
	bool (*map_meta_equal)(const struct bpf_map *meta0,
			       const struct bpf_map *meta1);

128 129 130
	/* BTF name and id of struct allocated by map_alloc */
	const char * const map_btf_name;
	int *map_btf_id;
131 132 133

	/* bpf_iter info used to open a seq_file */
	const struct bpf_iter_seq_info *iter_seq_info;
134 135
};

136 137 138 139 140
struct bpf_map_memory {
	u32 pages;
	struct user_struct *user;
};

141
struct bpf_map {
142
	/* The first two cachelines with read-mostly members of which some
143 144 145 146 147 148 149
	 * are also accessed in fast-path (e.g. ops, max_entries).
	 */
	const struct bpf_map_ops *ops ____cacheline_aligned;
	struct bpf_map *inner_map_meta;
#ifdef CONFIG_SECURITY
	void *security;
#endif
150 151 152 153
	enum bpf_map_type map_type;
	u32 key_size;
	u32 value_size;
	u32 max_entries;
154
	u32 map_flags;
155
	int spin_lock_off; /* >=0 valid offset, <0 error */
M
Martin KaFai Lau 已提交
156
	u32 id;
157
	int numa_node;
158 159
	u32 btf_key_type_id;
	u32 btf_value_type_id;
160
	struct btf *btf;
161
	struct bpf_map_memory memory;
162
	char name[BPF_OBJ_NAME_LEN];
163
	u32 btf_vmlinux_value_type_id;
A
Alexei Starovoitov 已提交
164
	bool bypass_spec_v1;
165 166
	bool frozen; /* write-once; write-protected by freeze_mutex */
	/* 22 bytes hole */
167

168
	/* The 3rd and 4th cacheline with misc members to avoid false sharing
169 170
	 * particularly with refcounting.
	 */
171 172
	atomic64_t refcnt ____cacheline_aligned;
	atomic64_t usercnt;
173
	struct work_struct work;
174 175
	struct mutex freeze_mutex;
	u64 writecnt; /* writable mmap cnt; protected by freeze_mutex */
176 177
};

178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204
static inline bool map_value_has_spin_lock(const struct bpf_map *map)
{
	return map->spin_lock_off >= 0;
}

static inline void check_and_init_map_lock(struct bpf_map *map, void *dst)
{
	if (likely(!map_value_has_spin_lock(map)))
		return;
	*(struct bpf_spin_lock *)(dst + map->spin_lock_off) =
		(struct bpf_spin_lock){};
}

/* copy everything but bpf_spin_lock */
static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
{
	if (unlikely(map_value_has_spin_lock(map))) {
		u32 off = map->spin_lock_off;

		memcpy(dst, src, off);
		memcpy(dst + off + sizeof(struct bpf_spin_lock),
		       src + off + sizeof(struct bpf_spin_lock),
		       map->value_size - off - sizeof(struct bpf_spin_lock));
	} else {
		memcpy(dst, src, map->value_size);
	}
}
205 206
void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
			   bool lock_src);
207
int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size);
208

209
struct bpf_offload_dev;
210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234
struct bpf_offloaded_map;

struct bpf_map_dev_ops {
	int (*map_get_next_key)(struct bpf_offloaded_map *map,
				void *key, void *next_key);
	int (*map_lookup_elem)(struct bpf_offloaded_map *map,
			       void *key, void *value);
	int (*map_update_elem)(struct bpf_offloaded_map *map,
			       void *key, void *value, u64 flags);
	int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key);
};

struct bpf_offloaded_map {
	struct bpf_map map;
	struct net_device *netdev;
	const struct bpf_map_dev_ops *dev_ops;
	void *dev_priv;
	struct list_head offloads;
};

static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map)
{
	return container_of(map, struct bpf_offloaded_map, map);
}

235 236 237 238 239
static inline bool bpf_map_offload_neutral(const struct bpf_map *map)
{
	return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
}

240 241
static inline bool bpf_map_support_seq_show(const struct bpf_map *map)
{
242 243
	return (map->btf_value_type_id || map->btf_vmlinux_value_type_id) &&
		map->ops->map_seq_show_elem;
244 245
}

246
int map_check_no_btf(const struct bpf_map *map,
247
		     const struct btf *btf,
248 249 250
		     const struct btf_type *key_type,
		     const struct btf_type *value_type);

251 252 253
bool bpf_map_meta_equal(const struct bpf_map *meta0,
			const struct bpf_map *meta1);

254 255
extern const struct bpf_map_ops bpf_map_offload_ops;

256 257
/* function argument constraints */
enum bpf_arg_type {
258
	ARG_DONTCARE = 0,	/* unused argument in helper function */
259 260 261 262 263 264 265

	/* the following constraints used to prototype
	 * bpf_map_lookup/update/delete_elem() functions
	 */
	ARG_CONST_MAP_PTR,	/* const argument used as pointer to bpf_map */
	ARG_PTR_TO_MAP_KEY,	/* pointer to stack used as map key */
	ARG_PTR_TO_MAP_VALUE,	/* pointer to stack used as map value */
266
	ARG_PTR_TO_UNINIT_MAP_VALUE,	/* pointer to valid memory used to store a map value */
267
	ARG_PTR_TO_MAP_VALUE_OR_NULL,	/* pointer to stack used as map value or NULL */
268 269 270 271

	/* the following constraints used to prototype bpf_memcmp() and other
	 * functions that access data on eBPF program stack
	 */
272
	ARG_PTR_TO_MEM,		/* pointer to valid memory (stack, packet, map value) */
273
	ARG_PTR_TO_MEM_OR_NULL, /* pointer to valid memory or NULL */
274 275 276
	ARG_PTR_TO_UNINIT_MEM,	/* pointer to memory does not need to be initialized,
				 * helper function must fill all bytes or clear
				 * them in error case.
277 278
				 */

279 280
	ARG_CONST_SIZE,		/* number of bytes accessed from memory */
	ARG_CONST_SIZE_OR_ZERO,	/* number of bytes accessed from memory or 0 */
281

282
	ARG_PTR_TO_CTX,		/* pointer to context */
283
	ARG_PTR_TO_CTX_OR_NULL,	/* pointer to context or NULL */
284
	ARG_ANYTHING,		/* any (initialized) argument is ok */
285
	ARG_PTR_TO_SPIN_LOCK,	/* pointer to bpf_spin_lock */
286
	ARG_PTR_TO_SOCK_COMMON,	/* pointer to sock_common */
287 288
	ARG_PTR_TO_INT,		/* pointer to int */
	ARG_PTR_TO_LONG,	/* pointer to long */
289
	ARG_PTR_TO_SOCKET,	/* pointer to bpf_sock (fullsock) */
290
	ARG_PTR_TO_SOCKET_OR_NULL,	/* pointer to bpf_sock (fullsock) or NULL */
291
	ARG_PTR_TO_BTF_ID,	/* pointer to in-kernel struct */
292 293 294
	ARG_PTR_TO_ALLOC_MEM,	/* pointer to dynamically allocated memory */
	ARG_PTR_TO_ALLOC_MEM_OR_NULL,	/* pointer to dynamically allocated memory or NULL */
	ARG_CONST_ALLOC_SIZE_OR_ZERO,	/* number of allocated bytes requested */
295
	ARG_PTR_TO_BTF_ID_SOCK_COMMON,	/* pointer to in-kernel sock_common or bpf-mirrored bpf_sock */
H
Hao Luo 已提交
296
	ARG_PTR_TO_PERCPU_BTF_ID,	/* pointer to in-kernel percpu type */
297
	__BPF_ARG_TYPE_MAX,
298 299 300 301 302 303
};

/* type of values returned from helper functions */
enum bpf_return_type {
	RET_INTEGER,			/* function returns integer */
	RET_VOID,			/* function doesn't return anything */
304
	RET_PTR_TO_MAP_VALUE,		/* returns a pointer to map elem value */
305
	RET_PTR_TO_MAP_VALUE_OR_NULL,	/* returns a pointer to map elem value or NULL */
306
	RET_PTR_TO_SOCKET_OR_NULL,	/* returns a pointer to a socket or NULL */
307
	RET_PTR_TO_TCP_SOCK_OR_NULL,	/* returns a pointer to a tcp_sock or NULL */
308
	RET_PTR_TO_SOCK_COMMON_OR_NULL,	/* returns a pointer to a sock_common or NULL */
309
	RET_PTR_TO_ALLOC_MEM_OR_NULL,	/* returns a pointer to dynamically allocated memory or NULL */
310
	RET_PTR_TO_BTF_ID_OR_NULL,	/* returns a pointer to a btf_id or NULL */
H
Hao Luo 已提交
311
	RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL, /* returns a pointer to a valid memory or a btf_id or NULL */
H
Hao Luo 已提交
312
	RET_PTR_TO_MEM_OR_BTF_ID,	/* returns a pointer to a valid memory or a btf_id */
313
	RET_PTR_TO_BTF_ID,		/* returns a pointer to a btf_id */
314 315
};

316 317 318 319 320 321 322
/* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
 * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL
 * instructions after verifying
 */
struct bpf_func_proto {
	u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
	bool gpl_only;
323
	bool pkt_access;
324
	enum bpf_return_type ret_type;
325 326 327 328 329 330 331 332 333 334
	union {
		struct {
			enum bpf_arg_type arg1_type;
			enum bpf_arg_type arg2_type;
			enum bpf_arg_type arg3_type;
			enum bpf_arg_type arg4_type;
			enum bpf_arg_type arg5_type;
		};
		enum bpf_arg_type arg_type[5];
	};
335 336 337 338 339 340 341 342 343 344
	union {
		struct {
			u32 *arg1_btf_id;
			u32 *arg2_btf_id;
			u32 *arg3_btf_id;
			u32 *arg4_btf_id;
			u32 *arg5_btf_id;
		};
		u32 *arg_btf_id[5];
	};
345
	int *ret_btf_id; /* return value btf_id */
J
Jiri Olsa 已提交
346
	bool (*allowed)(const struct bpf_prog *prog);
347 348 349 350 351 352 353 354 355 356 357
};

/* bpf_context is intentionally undefined structure. Pointer to bpf_context is
 * the first argument to eBPF programs.
 * For socket filters: 'struct bpf_context *' == 'struct sk_buff *'
 */
struct bpf_context;

enum bpf_access_type {
	BPF_READ = 1,
	BPF_WRITE = 2
358 359
};

360
/* types of values stored in eBPF registers */
361 362 363 364 365 366 367 368 369
/* Pointer types represent:
 * pointer
 * pointer + imm
 * pointer + (u16) var
 * pointer + (u16) var + imm
 * if (range > 0) then [ptr, ptr + range - off) is safe to access
 * if (id > 0) means that some 'var' was added
 * if (off > 0) means that 'imm' was added
 */
370 371
enum bpf_reg_type {
	NOT_INIT = 0,		 /* nothing was written into register */
372
	SCALAR_VALUE,		 /* reg doesn't contain a valid pointer */
373 374 375 376
	PTR_TO_CTX,		 /* reg points to bpf_context */
	CONST_PTR_TO_MAP,	 /* reg points to struct bpf_map */
	PTR_TO_MAP_VALUE,	 /* reg points to map element value */
	PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */
377
	PTR_TO_STACK,		 /* reg == frame_pointer + offset */
378
	PTR_TO_PACKET_META,	 /* skb->data - meta_len */
379
	PTR_TO_PACKET,		 /* reg points to skb->data */
380
	PTR_TO_PACKET_END,	 /* skb->data + headlen */
381
	PTR_TO_FLOW_KEYS,	 /* reg points to bpf_flow_keys */
382 383
	PTR_TO_SOCKET,		 /* reg points to struct bpf_sock */
	PTR_TO_SOCKET_OR_NULL,	 /* reg points to struct bpf_sock or NULL */
384 385
	PTR_TO_SOCK_COMMON,	 /* reg points to sock_common */
	PTR_TO_SOCK_COMMON_OR_NULL, /* reg points to sock_common or NULL */
386 387
	PTR_TO_TCP_SOCK,	 /* reg points to struct tcp_sock */
	PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */
388
	PTR_TO_TP_BUFFER,	 /* reg points to a writable raw tp's buffer */
389
	PTR_TO_XDP_SOCK,	 /* reg points to struct xdp_sock */
390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405
	/* PTR_TO_BTF_ID points to a kernel struct that does not need
	 * to be null checked by the BPF program. This does not imply the
	 * pointer is _not_ null and in practice this can easily be a null
	 * pointer when reading pointer chains. The assumption is program
	 * context will handle null pointer dereference typically via fault
	 * handling. The verifier must keep this in mind and can make no
	 * assumptions about null or non-null when doing branch analysis.
	 * Further, when passed into helpers the helpers can not, without
	 * additional context, assume the value is non-null.
	 */
	PTR_TO_BTF_ID,
	/* PTR_TO_BTF_ID_OR_NULL points to a kernel struct that has not
	 * been checked for null. Used primarily to inform the verifier
	 * an explicit null check is required for this struct.
	 */
	PTR_TO_BTF_ID_OR_NULL,
406 407
	PTR_TO_MEM,		 /* reg points to valid memory region */
	PTR_TO_MEM_OR_NULL,	 /* reg points to valid memory region or NULL */
408 409 410 411
	PTR_TO_RDONLY_BUF,	 /* reg points to a readonly buffer */
	PTR_TO_RDONLY_BUF_OR_NULL, /* reg points to a readonly buffer or NULL */
	PTR_TO_RDWR_BUF,	 /* reg points to a read/write buffer */
	PTR_TO_RDWR_BUF_OR_NULL, /* reg points to a read/write buffer or NULL */
H
Hao Luo 已提交
412
	PTR_TO_PERCPU_BTF_ID,	 /* reg points to a percpu kernel variable */
413 414
};

415 416 417 418 419
/* The information passed from prog-specific *_is_valid_access
 * back to the verifier.
 */
struct bpf_insn_access_aux {
	enum bpf_reg_type reg_type;
420 421 422 423 424
	union {
		int ctx_field_size;
		u32 btf_id;
	};
	struct bpf_verifier_log *log; /* for verbose logs */
425 426
};

427 428 429 430 431 432
static inline void
bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size)
{
	aux->ctx_field_size = size;
}

433 434 435 436 437
struct bpf_prog_ops {
	int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr,
			union bpf_attr __user *uattr);
};

438 439
struct bpf_verifier_ops {
	/* return eBPF function prototype for verification */
440 441 442
	const struct bpf_func_proto *
	(*get_func_proto)(enum bpf_func_id func_id,
			  const struct bpf_prog *prog);
443 444 445 446

	/* return true if 'size' wide access at offset 'off' within bpf_context
	 * with 'type' (read or write) is allowed
	 */
447
	bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
448
				const struct bpf_prog *prog,
449
				struct bpf_insn_access_aux *info);
450 451
	int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
			    const struct bpf_prog *prog);
452 453
	int (*gen_ld_abs)(const struct bpf_insn *orig,
			  struct bpf_insn *insn_buf);
454 455 456
	u32 (*convert_ctx_access)(enum bpf_access_type type,
				  const struct bpf_insn *src,
				  struct bpf_insn *dst,
457
				  struct bpf_prog *prog, u32 *target_size);
458 459 460 461
	int (*btf_struct_access)(struct bpf_verifier_log *log,
				 const struct btf_type *t, int off, int size,
				 enum bpf_access_type atype,
				 u32 *next_btf_id);
462 463
};

464
struct bpf_prog_offload_ops {
465
	/* verifier basic callbacks */
466 467
	int (*insn_hook)(struct bpf_verifier_env *env,
			 int insn_idx, int prev_insn_idx);
468
	int (*finalize)(struct bpf_verifier_env *env);
469 470 471 472 473
	/* verifier optimization callbacks (called after .finalize) */
	int (*replace_insn)(struct bpf_verifier_env *env, u32 off,
			    struct bpf_insn *insn);
	int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt);
	/* program management callbacks */
474 475
	int (*prepare)(struct bpf_prog *prog);
	int (*translate)(struct bpf_prog *prog);
476
	void (*destroy)(struct bpf_prog *prog);
477 478
};

479
struct bpf_prog_offload {
480 481
	struct bpf_prog		*prog;
	struct net_device	*netdev;
482
	struct bpf_offload_dev	*offdev;
483 484 485
	void			*dev_priv;
	struct list_head	offloads;
	bool			dev_state;
486
	bool			opt_failed;
487 488
	void			*jited_image;
	u32			jited_len;
489 490
};

491 492
enum bpf_cgroup_storage_type {
	BPF_CGROUP_STORAGE_SHARED,
493
	BPF_CGROUP_STORAGE_PERCPU,
494 495 496 497 498
	__BPF_CGROUP_STORAGE_MAX
};

#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX

499 500 501 502 503
/* The longest tracepoint has 12 args.
 * See include/trace/bpf_probe.h
 */
#define MAX_BPF_FUNC_ARGS 12

A
Alexei Starovoitov 已提交
504 505 506 507
struct bpf_prog_stats {
	u64 cnt;
	u64 nsecs;
	struct u64_stats_sync syncp;
E
Eric Dumazet 已提交
508
} __aligned(2 * sizeof(u64));
A
Alexei Starovoitov 已提交
509

A
Alexei Starovoitov 已提交
510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529
struct btf_func_model {
	u8 ret_size;
	u8 nr_args;
	u8 arg_size[MAX_BPF_FUNC_ARGS];
};

/* Restore arguments before returning from trampoline to let original function
 * continue executing. This flag is used for fentry progs when there are no
 * fexit progs.
 */
#define BPF_TRAMP_F_RESTORE_REGS	BIT(0)
/* Call original function after fentry progs, but before fexit progs.
 * Makes sense for fentry/fexit, normal calls and indirect calls.
 */
#define BPF_TRAMP_F_CALL_ORIG		BIT(1)
/* Skip current frame and return to parent.  Makes sense for fentry/fexit
 * programs only. Should not be used with normal calls and indirect calls.
 */
#define BPF_TRAMP_F_SKIP_FRAME		BIT(2)

K
KP Singh 已提交
530 531 532 533 534 535 536 537 538 539
/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
 * bytes on x86.  Pick a number to fit into BPF_IMAGE_SIZE / 2
 */
#define BPF_MAX_TRAMP_PROGS 40

struct bpf_tramp_progs {
	struct bpf_prog *progs[BPF_MAX_TRAMP_PROGS];
	int nr_progs;
};

A
Alexei Starovoitov 已提交
540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559
/* Different use cases for BPF trampoline:
 * 1. replace nop at the function entry (kprobe equivalent)
 *    flags = BPF_TRAMP_F_RESTORE_REGS
 *    fentry = a set of programs to run before returning from trampoline
 *
 * 2. replace nop at the function entry (kprobe + kretprobe equivalent)
 *    flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME
 *    orig_call = fentry_ip + MCOUNT_INSN_SIZE
 *    fentry = a set of program to run before calling original function
 *    fexit = a set of program to run after original function
 *
 * 3. replace direct call instruction anywhere in the function body
 *    or assign a function pointer for indirect call (like tcp_congestion_ops->cong_avoid)
 *    With flags = 0
 *      fentry = a set of programs to run before returning from trampoline
 *    With flags = BPF_TRAMP_F_CALL_ORIG
 *      orig_call = original callback addr or direct function addr
 *      fentry = a set of program to run before calling original function
 *      fexit = a set of program to run after original function
 */
560 561
int arch_prepare_bpf_trampoline(void *image, void *image_end,
				const struct btf_func_model *m, u32 flags,
K
KP Singh 已提交
562
				struct bpf_tramp_progs *tprogs,
A
Alexei Starovoitov 已提交
563 564 565 566
				void *orig_call);
/* these two functions are called from generated trampoline */
u64 notrace __bpf_prog_enter(void);
void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start);
567 568
void notrace __bpf_prog_enter_sleepable(void);
void notrace __bpf_prog_exit_sleepable(void);
A
Alexei Starovoitov 已提交
569

J
Jiri Olsa 已提交
570 571 572
struct bpf_ksym {
	unsigned long		 start;
	unsigned long		 end;
J
Jiri Olsa 已提交
573
	char			 name[KSYM_NAME_LEN];
574
	struct list_head	 lnode;
J
Jiri Olsa 已提交
575
	struct latch_tree_node	 tnode;
576
	bool			 prog;
J
Jiri Olsa 已提交
577 578
};

A
Alexei Starovoitov 已提交
579 580 581
enum bpf_tramp_prog_type {
	BPF_TRAMP_FENTRY,
	BPF_TRAMP_FEXIT,
K
KP Singh 已提交
582
	BPF_TRAMP_MODIFY_RETURN,
583 584
	BPF_TRAMP_MAX,
	BPF_TRAMP_REPLACE, /* more than MAX */
A
Alexei Starovoitov 已提交
585 586 587 588 589 590 591 592 593 594 595 596
};

struct bpf_trampoline {
	/* hlist for trampoline_table */
	struct hlist_node hlist;
	/* serializes access to fields of this trampoline */
	struct mutex mutex;
	refcount_t refcnt;
	u64 key;
	struct {
		struct btf_func_model model;
		void *addr;
597
		bool ftrace_managed;
A
Alexei Starovoitov 已提交
598
	} func;
599 600 601 602 603
	/* if !NULL this is BPF_PROG_TYPE_EXT program that extends another BPF
	 * program by replacing one of its functions. func.addr is the address
	 * of the function it replaced.
	 */
	struct bpf_prog *extension_prog;
A
Alexei Starovoitov 已提交
604 605 606 607 608 609 610
	/* list of BPF programs using this trampoline */
	struct hlist_head progs_hlist[BPF_TRAMP_MAX];
	/* Number of attached programs. A counter per kind. */
	int progs_cnt[BPF_TRAMP_MAX];
	/* Executable image of trampoline */
	void *image;
	u64 selector;
J
Jiri Olsa 已提交
611
	struct bpf_ksym ksym;
A
Alexei Starovoitov 已提交
612
};
B
Björn Töpel 已提交
613

614 615 616 617 618 619 620
struct bpf_attach_target_info {
	struct btf_func_model fmodel;
	long tgt_addr;
	const char *tgt_name;
	const struct btf_type *tgt_type;
};

621
#define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */
B
Björn Töpel 已提交
622 623 624 625 626 627 628 629 630 631 632 633 634 635

struct bpf_dispatcher_prog {
	struct bpf_prog *prog;
	refcount_t users;
};

struct bpf_dispatcher {
	/* dispatcher mutex */
	struct mutex mutex;
	void *func;
	struct bpf_dispatcher_prog progs[BPF_DISPATCHER_MAX];
	int num_progs;
	void *image;
	u32 image_off;
J
Jiri Olsa 已提交
636
	struct bpf_ksym ksym;
B
Björn Töpel 已提交
637 638
};

639
static __always_inline unsigned int bpf_dispatcher_nop_func(
640 641 642 643 644 645 646
	const void *ctx,
	const struct bpf_insn *insnsi,
	unsigned int (*bpf_func)(const void *,
				 const struct bpf_insn *))
{
	return bpf_func(ctx, insnsi);
}
A
Alexei Starovoitov 已提交
647
#ifdef CONFIG_BPF_JIT
648 649
int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr);
int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr);
650 651
struct bpf_trampoline *bpf_trampoline_get(u64 key,
					  struct bpf_attach_target_info *tgt_info);
A
Alexei Starovoitov 已提交
652
void bpf_trampoline_put(struct bpf_trampoline *tr);
J
Jiri Olsa 已提交
653 654 655 656 657 658 659 660 661 662 663
#define BPF_DISPATCHER_INIT(_name) {				\
	.mutex = __MUTEX_INITIALIZER(_name.mutex),		\
	.func = &_name##_func,					\
	.progs = {},						\
	.num_progs = 0,						\
	.image = NULL,						\
	.image_off = 0,						\
	.ksym = {						\
		.name  = #_name,				\
		.lnode = LIST_HEAD_INIT(_name.ksym.lnode),	\
	},							\
B
Björn Töpel 已提交
664 665 666
}

#define DEFINE_BPF_DISPATCHER(name)					\
667
	noinline unsigned int bpf_dispatcher_##name##_func(		\
B
Björn Töpel 已提交
668 669 670 671 672 673 674
		const void *ctx,					\
		const struct bpf_insn *insnsi,				\
		unsigned int (*bpf_func)(const void *,			\
					 const struct bpf_insn *))	\
	{								\
		return bpf_func(ctx, insnsi);				\
	}								\
675 676 677
	EXPORT_SYMBOL(bpf_dispatcher_##name##_func);			\
	struct bpf_dispatcher bpf_dispatcher_##name =			\
		BPF_DISPATCHER_INIT(bpf_dispatcher_##name);
B
Björn Töpel 已提交
678
#define DECLARE_BPF_DISPATCHER(name)					\
679
	unsigned int bpf_dispatcher_##name##_func(			\
B
Björn Töpel 已提交
680 681 682 683
		const void *ctx,					\
		const struct bpf_insn *insnsi,				\
		unsigned int (*bpf_func)(const void *,			\
					 const struct bpf_insn *));	\
684 685 686
	extern struct bpf_dispatcher bpf_dispatcher_##name;
#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func
#define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name)
B
Björn Töpel 已提交
687 688
void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
				struct bpf_prog *to);
J
Jiri Olsa 已提交
689
/* Called only from JIT-enabled code, so there's no need for stubs. */
J
Jiri Olsa 已提交
690
void *bpf_jit_alloc_exec_page(void);
J
Jiri Olsa 已提交
691 692
void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym);
void bpf_image_ksym_del(struct bpf_ksym *ksym);
J
Jiri Olsa 已提交
693 694
void bpf_ksym_add(struct bpf_ksym *ksym);
void bpf_ksym_del(struct bpf_ksym *ksym);
A
Alexei Starovoitov 已提交
695
#else
696 697
static inline int bpf_trampoline_link_prog(struct bpf_prog *prog,
					   struct bpf_trampoline *tr)
A
Alexei Starovoitov 已提交
698 699 700
{
	return -ENOTSUPP;
}
701 702
static inline int bpf_trampoline_unlink_prog(struct bpf_prog *prog,
					     struct bpf_trampoline *tr)
A
Alexei Starovoitov 已提交
703 704 705
{
	return -ENOTSUPP;
}
706 707 708 709 710
static inline struct bpf_trampoline *bpf_trampoline_get(u64 key,
							struct bpf_attach_target_info *tgt_info)
{
	return ERR_PTR(-EOPNOTSUPP);
}
A
Alexei Starovoitov 已提交
711
static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {}
B
Björn Töpel 已提交
712 713
#define DEFINE_BPF_DISPATCHER(name)
#define DECLARE_BPF_DISPATCHER(name)
714
#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nop_func
B
Björn Töpel 已提交
715 716 717 718
#define BPF_DISPATCHER_PTR(name) NULL
static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d,
					      struct bpf_prog *from,
					      struct bpf_prog *to) {}
719 720 721 722
static inline bool is_bpf_image_address(unsigned long address)
{
	return false;
}
A
Alexei Starovoitov 已提交
723 724
#endif

725
struct bpf_func_info_aux {
726
	u16 linkage;
727 728 729
	bool unreliable;
};

730 731 732 733 734 735
enum bpf_jit_poke_reason {
	BPF_POKE_REASON_TAIL_CALL,
};

/* Descriptor of pokes pointing /into/ the JITed image. */
struct bpf_jit_poke_descriptor {
736
	void *tailcall_target;
737 738
	void *tailcall_bypass;
	void *bypass_addr;
739 740 741 742 743 744
	union {
		struct {
			struct bpf_map *map;
			u32 key;
		} tail_call;
	};
745
	bool tailcall_target_stable;
746 747
	u8 adj_off;
	u16 reason;
748
	u32 insn_idx;
749 750
};

751 752 753 754
/* reg_type info for ctx arguments */
struct bpf_ctx_arg_aux {
	u32 offset;
	enum bpf_reg_type reg_type;
755
	u32 btf_id;
756 757
};

758
struct bpf_prog_aux {
759
	atomic64_t refcnt;
760
	u32 used_map_cnt;
761
	u32 max_ctx_offset;
762
	u32 max_pkt_offset;
763
	u32 max_tp_access;
764
	u32 stack_depth;
M
Martin KaFai Lau 已提交
765
	u32 id;
766 767
	u32 func_cnt; /* used by non-func prog as the number of func progs */
	u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */
768
	u32 attach_btf_id; /* in-kernel BTF type id to attach to */
769
	u32 ctx_arg_info_size;
770 771
	u32 max_rdonly_access;
	u32 max_rdwr_access;
772
	const struct bpf_ctx_arg_aux *ctx_arg_info;
773 774 775
	struct mutex dst_mutex; /* protects dst_* pointers below, *after* prog becomes visible */
	struct bpf_prog *dst_prog;
	struct bpf_trampoline *dst_trampoline;
776 777
	enum bpf_prog_type saved_dst_prog_type;
	enum bpf_attach_type saved_dst_attach_type;
778
	bool verifier_zext; /* Zero extensions has been inserted by verifier. */
779
	bool offload_requested;
780
	bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */
781
	bool func_proto_unreliable;
782
	bool sleepable;
783
	bool tail_call_reachable;
A
Alexei Starovoitov 已提交
784 785
	enum bpf_tramp_prog_type trampoline_prog_type;
	struct hlist_node tramp_hlist;
786 787 788 789
	/* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
	const struct btf_type *attach_func_proto;
	/* function name for valid attach_btf_id */
	const char *attach_func_name;
790 791
	struct bpf_prog **func;
	void *jit_data; /* JIT specific data. arch dependent */
792 793
	struct bpf_jit_poke_descriptor *poke_tab;
	u32 size_poke_tab;
J
Jiri Olsa 已提交
794
	struct bpf_ksym ksym;
795
	const struct bpf_prog_ops *ops;
796
	struct bpf_map **used_maps;
797
	struct mutex used_maps_mutex; /* mutex for used_maps and used_map_cnt */
798
	struct bpf_prog *prog;
799
	struct user_struct *user;
800
	u64 load_time; /* ns since boottime */
801
	struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
802
	char name[BPF_OBJ_NAME_LEN];
803 804 805
#ifdef CONFIG_SECURITY
	void *security;
#endif
806
	struct bpf_prog_offload *offload;
Y
Yonghong Song 已提交
807
	struct btf *btf;
808
	struct bpf_func_info *func_info;
809
	struct bpf_func_info_aux *func_info_aux;
M
Martin KaFai Lau 已提交
810 811 812 813 814 815 816 817 818 819 820 821 822 823 824
	/* bpf_line_info loaded from userspace.  linfo->insn_off
	 * has the xlated insn offset.
	 * Both the main and sub prog share the same linfo.
	 * The subprog can access its first linfo by
	 * using the linfo_idx.
	 */
	struct bpf_line_info *linfo;
	/* jited_linfo is the jited addr of the linfo.  It has a
	 * one to one mapping to linfo:
	 * jited_linfo[i] is the jited addr for the linfo[i]->insn_off.
	 * Both the main and sub prog share the same jited_linfo.
	 * The subprog can access its first jited_linfo by
	 * using the linfo_idx.
	 */
	void **jited_linfo;
825
	u32 func_info_cnt;
M
Martin KaFai Lau 已提交
826 827 828 829 830 831
	u32 nr_linfo;
	/* subprog can use linfo_idx to access its first linfo and
	 * jited_linfo.
	 * main prog always has linfo_idx == 0
	 */
	u32 linfo_idx;
832 833
	u32 num_exentries;
	struct exception_table_entry *extable;
A
Alexei Starovoitov 已提交
834
	struct bpf_prog_stats __percpu *stats;
835 836 837 838
	union {
		struct work_struct work;
		struct rcu_head	rcu;
	};
839 840
};

841 842 843 844 845 846 847 848
struct bpf_array_aux {
	/* 'Ownership' of prog array is claimed by the first program that
	 * is going to use this map or by the first program which FD is
	 * stored in the map to make sure that all callers and callees have
	 * the same prog type and JITed flag.
	 */
	enum bpf_prog_type type;
	bool jited;
849 850 851 852 853
	/* Programs with direct jumps into programs part of this array. */
	struct list_head poke_progs;
	struct bpf_map *map;
	struct mutex poke_mutex;
	struct work_struct work;
854 855
};

856 857 858 859 860 861 862 863 864 865 866 867
struct bpf_link {
	atomic64_t refcnt;
	u32 id;
	enum bpf_link_type type;
	const struct bpf_link_ops *ops;
	struct bpf_prog *prog;
	struct work_struct work;
};

struct bpf_link_ops {
	void (*release)(struct bpf_link *link);
	void (*dealloc)(struct bpf_link *link);
868
	int (*detach)(struct bpf_link *link);
869 870 871 872 873 874 875 876 877 878 879 880 881 882
	int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog,
			   struct bpf_prog *old_prog);
	void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq);
	int (*fill_link_info)(const struct bpf_link *link,
			      struct bpf_link_info *info);
};

struct bpf_link_primer {
	struct bpf_link *link;
	struct file *file;
	int fd;
	u32 id;
};

883
struct bpf_struct_ops_value;
884 885 886 887 888 889 890 891 892
struct btf_type;
struct btf_member;

#define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64
struct bpf_struct_ops {
	const struct bpf_verifier_ops *verifier_ops;
	int (*init)(struct btf *btf);
	int (*check_member)(const struct btf_type *t,
			    const struct btf_member *member);
893 894 895 896 897
	int (*init_member)(const struct btf_type *t,
			   const struct btf_member *member,
			   void *kdata, const void *udata);
	int (*reg)(void *kdata);
	void (*unreg)(void *kdata);
898
	const struct btf_type *type;
899
	const struct btf_type *value_type;
900 901 902
	const char *name;
	struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS];
	u32 type_id;
903
	u32 value_id;
904 905 906
};

#if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)
907
#define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA))
908
const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id);
909
void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log);
910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927
bool bpf_struct_ops_get(const void *kdata);
void bpf_struct_ops_put(const void *kdata);
int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
				       void *value);
static inline bool bpf_try_module_get(const void *data, struct module *owner)
{
	if (owner == BPF_MODULE_OWNER)
		return bpf_struct_ops_get(data);
	else
		return try_module_get(owner);
}
static inline void bpf_module_put(const void *data, struct module *owner)
{
	if (owner == BPF_MODULE_OWNER)
		bpf_struct_ops_put(data);
	else
		module_put(owner);
}
928 929 930 931 932
#else
static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
{
	return NULL;
}
933 934 935 936
static inline void bpf_struct_ops_init(struct btf *btf,
				       struct bpf_verifier_log *log)
{
}
937 938 939 940 941 942 943 944 945 946 947 948 949 950
static inline bool bpf_try_module_get(const void *data, struct module *owner)
{
	return try_module_get(owner);
}
static inline void bpf_module_put(const void *data, struct module *owner)
{
	module_put(owner);
}
static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map,
						     void *key,
						     void *value)
{
	return -EINVAL;
}
951 952
#endif

953 954 955
struct bpf_array {
	struct bpf_map map;
	u32 elem_size;
956
	u32 index_mask;
957
	struct bpf_array_aux *aux;
958 959
	union {
		char value[0] __aligned(8);
960
		void *ptrs[0] __aligned(8);
961
		void __percpu *pptrs[0] __aligned(8);
962 963
	};
};
964

965
#define BPF_COMPLEXITY_LIMIT_INSNS      1000000 /* yes. 1M insns */
966 967
#define MAX_TAIL_CALL_CNT 32

968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996
#define BPF_F_ACCESS_MASK	(BPF_F_RDONLY |		\
				 BPF_F_RDONLY_PROG |	\
				 BPF_F_WRONLY |		\
				 BPF_F_WRONLY_PROG)

#define BPF_MAP_CAN_READ	BIT(0)
#define BPF_MAP_CAN_WRITE	BIT(1)

static inline u32 bpf_map_flags_to_cap(struct bpf_map *map)
{
	u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);

	/* Combination of BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG is
	 * not possible.
	 */
	if (access_flags & BPF_F_RDONLY_PROG)
		return BPF_MAP_CAN_READ;
	else if (access_flags & BPF_F_WRONLY_PROG)
		return BPF_MAP_CAN_WRITE;
	else
		return BPF_MAP_CAN_READ | BPF_MAP_CAN_WRITE;
}

static inline bool bpf_map_flags_access_ok(u32 access_flags)
{
	return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) !=
	       (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
}

997 998 999 1000 1001 1002 1003
struct bpf_event_entry {
	struct perf_event *event;
	struct file *perf_file;
	struct file *map_file;
	struct rcu_head rcu;
};

1004
bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
1005
int bpf_prog_calc_tag(struct bpf_prog *fp);
1006
const char *kernel_type_name(u32 btf_type_id);
1007

1008
const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
1009 1010

typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src,
1011
					unsigned long off, unsigned long len);
1012 1013 1014 1015 1016
typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type,
					const struct bpf_insn *src,
					struct bpf_insn *dst,
					struct bpf_prog *prog,
					u32 *target_size);
1017 1018 1019

u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
		     void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy);
1020

1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032
/* an array of programs to be executed under rcu_lock.
 *
 * Typical usage:
 * ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, BPF_PROG_RUN);
 *
 * the structure returned by bpf_prog_array_alloc() should be populated
 * with program pointers and the last pointer must be NULL.
 * The user has to keep refcnt on the program and make sure the program
 * is removed from the array before bpf_prog_put().
 * The 'struct bpf_prog_array *' should only be replaced with xchg()
 * since other cpus are walking the array of pointers in parallel.
 */
1033 1034
struct bpf_prog_array_item {
	struct bpf_prog *prog;
1035
	struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
1036 1037
};

1038 1039
struct bpf_prog_array {
	struct rcu_head rcu;
1040
	struct bpf_prog_array_item items[];
1041 1042
};

1043
struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
1044 1045
void bpf_prog_array_free(struct bpf_prog_array *progs);
int bpf_prog_array_length(struct bpf_prog_array *progs);
1046
bool bpf_prog_array_is_empty(struct bpf_prog_array *array);
1047
int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs,
1048
				__u32 __user *prog_ids, u32 cnt);
1049

1050
void bpf_prog_array_delete_safe(struct bpf_prog_array *progs,
1051
				struct bpf_prog *old_prog);
1052 1053 1054
int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index);
int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
			     struct bpf_prog *prog);
1055
int bpf_prog_array_copy_info(struct bpf_prog_array *array,
1056 1057
			     u32 *prog_ids, u32 request_cnt,
			     u32 *prog_cnt);
1058
int bpf_prog_array_copy(struct bpf_prog_array *old_array,
1059 1060 1061 1062 1063
			struct bpf_prog *exclude_prog,
			struct bpf_prog *include_prog,
			struct bpf_prog_array **new_array);

#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null)	\
1064
	({						\
1065 1066
		struct bpf_prog_array_item *_item;	\
		struct bpf_prog *_prog;			\
1067
		struct bpf_prog_array *_array;		\
1068
		u32 _ret = 1;				\
1069
		migrate_disable();			\
1070
		rcu_read_lock();			\
1071 1072 1073
		_array = rcu_dereference(array);	\
		if (unlikely(check_non_null && !_array))\
			goto _out;			\
1074 1075 1076 1077 1078
		_item = &_array->items[0];		\
		while ((_prog = READ_ONCE(_item->prog))) {		\
			bpf_cgroup_storage_set(_item->cgroup_storage);	\
			_ret &= func(_prog, ctx);	\
			_item++;			\
1079 1080
		}					\
_out:							\
1081
		rcu_read_unlock();			\
1082
		migrate_enable();			\
1083 1084 1085
		_ret;					\
	 })

1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115
/* To be used by __cgroup_bpf_run_filter_skb for EGRESS BPF progs
 * so BPF programs can request cwr for TCP packets.
 *
 * Current cgroup skb programs can only return 0 or 1 (0 to drop the
 * packet. This macro changes the behavior so the low order bit
 * indicates whether the packet should be dropped (0) or not (1)
 * and the next bit is a congestion notification bit. This could be
 * used by TCP to call tcp_enter_cwr()
 *
 * Hence, new allowed return values of CGROUP EGRESS BPF programs are:
 *   0: drop packet
 *   1: keep packet
 *   2: drop packet and cn
 *   3: keep packet and cn
 *
 * This macro then converts it to one of the NET_XMIT or an error
 * code that is then interpreted as drop packet (and no cn):
 *   0: NET_XMIT_SUCCESS  skb should be transmitted
 *   1: NET_XMIT_DROP     skb should be dropped and cn
 *   2: NET_XMIT_CN       skb should be transmitted and cn
 *   3: -EPERM            skb should be dropped
 */
#define BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(array, ctx, func)		\
	({						\
		struct bpf_prog_array_item *_item;	\
		struct bpf_prog *_prog;			\
		struct bpf_prog_array *_array;		\
		u32 ret;				\
		u32 _ret = 1;				\
		u32 _cn = 0;				\
1116
		migrate_disable();			\
1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127
		rcu_read_lock();			\
		_array = rcu_dereference(array);	\
		_item = &_array->items[0];		\
		while ((_prog = READ_ONCE(_item->prog))) {		\
			bpf_cgroup_storage_set(_item->cgroup_storage);	\
			ret = func(_prog, ctx);		\
			_ret &= (ret & 1);		\
			_cn |= (ret & 2);		\
			_item++;			\
		}					\
		rcu_read_unlock();			\
1128
		migrate_enable();			\
1129 1130 1131 1132 1133 1134 1135
		if (_ret)				\
			_ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS);	\
		else					\
			_ret = (_cn ? NET_XMIT_DROP : -EPERM);		\
		_ret;					\
	})

1136 1137 1138 1139 1140 1141
#define BPF_PROG_RUN_ARRAY(array, ctx, func)		\
	__BPF_PROG_RUN_ARRAY(array, ctx, func, false)

#define BPF_PROG_RUN_ARRAY_CHECK(array, ctx, func)	\
	__BPF_PROG_RUN_ARRAY(array, ctx, func, true)

1142
#ifdef CONFIG_BPF_SYSCALL
1143
DECLARE_PER_CPU(int, bpf_prog_active);
1144
extern struct mutex bpf_stats_enabled_mutex;
1145

1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175
/*
 * Block execution of BPF programs attached to instrumentation (perf,
 * kprobes, tracepoints) to prevent deadlocks on map operations as any of
 * these events can happen inside a region which holds a map bucket lock
 * and can deadlock on it.
 *
 * Use the preemption safe inc/dec variants on RT because migrate disable
 * is preemptible on RT and preemption in the middle of the RMW operation
 * might lead to inconsistent state. Use the raw variants for non RT
 * kernels as migrate_disable() maps to preempt_disable() so the slightly
 * more expensive save operation can be avoided.
 */
static inline void bpf_disable_instrumentation(void)
{
	migrate_disable();
	if (IS_ENABLED(CONFIG_PREEMPT_RT))
		this_cpu_inc(bpf_prog_active);
	else
		__this_cpu_inc(bpf_prog_active);
}

static inline void bpf_enable_instrumentation(void)
{
	if (IS_ENABLED(CONFIG_PREEMPT_RT))
		this_cpu_dec(bpf_prog_active);
	else
		__this_cpu_dec(bpf_prog_active);
	migrate_enable();
}

1176 1177
extern const struct file_operations bpf_map_fops;
extern const struct file_operations bpf_prog_fops;
Y
Yonghong Song 已提交
1178
extern const struct file_operations bpf_iter_fops;
1179

A
Alexei Starovoitov 已提交
1180
#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
1181 1182
	extern const struct bpf_prog_ops _name ## _prog_ops; \
	extern const struct bpf_verifier_ops _name ## _verifier_ops;
1183 1184
#define BPF_MAP_TYPE(_id, _ops) \
	extern const struct bpf_map_ops _ops;
1185
#define BPF_LINK_TYPE(_id, _name)
1186 1187
#include <linux/bpf_types.h>
#undef BPF_PROG_TYPE
1188
#undef BPF_MAP_TYPE
1189
#undef BPF_LINK_TYPE
1190

1191
extern const struct bpf_prog_ops bpf_offload_prog_ops;
1192 1193 1194
extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops;
extern const struct bpf_verifier_ops xdp_analyzer_ops;

1195
struct bpf_prog *bpf_prog_get(u32 ufd);
1196
struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
1197
				       bool attach_drv);
1198
void bpf_prog_add(struct bpf_prog *prog, int i);
1199
void bpf_prog_sub(struct bpf_prog *prog, int i);
1200
void bpf_prog_inc(struct bpf_prog *prog);
1201
struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog);
1202
void bpf_prog_put(struct bpf_prog *prog);
1203 1204
int __bpf_prog_charge(struct user_struct *user, u32 pages);
void __bpf_prog_uncharge(struct user_struct *user, u32 pages);
1205 1206
void __bpf_free_used_maps(struct bpf_prog_aux *aux,
			  struct bpf_map **used_maps, u32 len);
1207

1208
void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock);
1209
void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock);
1210

1211
struct bpf_map *bpf_map_get(u32 ufd);
1212
struct bpf_map *bpf_map_get_with_uref(u32 ufd);
1213
struct bpf_map *__bpf_map_get(struct fd f);
1214 1215 1216
void bpf_map_inc(struct bpf_map *map);
void bpf_map_inc_with_uref(struct bpf_map *map);
struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map);
1217
void bpf_map_put_with_uref(struct bpf_map *map);
1218
void bpf_map_put(struct bpf_map *map);
1219 1220
int bpf_map_charge_memlock(struct bpf_map *map, u32 pages);
void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages);
1221
int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size);
1222 1223 1224
void bpf_map_charge_finish(struct bpf_map_memory *mem);
void bpf_map_charge_move(struct bpf_map_memory *dst,
			 struct bpf_map_memory *src);
1225 1226
void *bpf_map_area_alloc(u64 size, int numa_node);
void *bpf_map_area_mmapable_alloc(u64 size, int numa_node);
1227
void bpf_map_area_free(void *base);
1228
void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
1229 1230
int  generic_map_lookup_batch(struct bpf_map *map,
			      const union bpf_attr *attr,
1231 1232 1233 1234 1235 1236
			      union bpf_attr __user *uattr);
int  generic_map_update_batch(struct bpf_map *map,
			      const union bpf_attr *attr,
			      union bpf_attr __user *uattr);
int  generic_map_delete_batch(struct bpf_map *map,
			      const union bpf_attr *attr,
1237
			      union bpf_attr __user *uattr);
Y
Yonghong Song 已提交
1238
struct bpf_map *bpf_map_get_curr_or_next(u32 *id);
A
Alexei Starovoitov 已提交
1239
struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id);
1240

1241 1242
extern int sysctl_unprivileged_bpf_disabled;

A
Alexei Starovoitov 已提交
1243 1244 1245 1246 1247
static inline bool bpf_allow_ptr_leaks(void)
{
	return perfmon_capable();
}

1248 1249 1250 1251 1252
static inline bool bpf_allow_ptr_to_map_access(void)
{
	return perfmon_capable();
}

A
Alexei Starovoitov 已提交
1253 1254 1255 1256 1257 1258 1259 1260 1261 1262
static inline bool bpf_bypass_spec_v1(void)
{
	return perfmon_capable();
}

static inline bool bpf_bypass_spec_v4(void)
{
	return perfmon_capable();
}

1263
int bpf_map_new_fd(struct bpf_map *map, int flags);
1264 1265
int bpf_prog_new_fd(struct bpf_prog *prog);

1266
void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
A
Andrii Nakryiko 已提交
1267 1268 1269 1270
		   const struct bpf_link_ops *ops, struct bpf_prog *prog);
int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer);
int bpf_link_settle(struct bpf_link_primer *primer);
void bpf_link_cleanup(struct bpf_link_primer *primer);
1271 1272 1273
void bpf_link_inc(struct bpf_link *link);
void bpf_link_put(struct bpf_link *link);
int bpf_link_new_fd(struct bpf_link *link);
1274
struct file *bpf_link_new_file(struct bpf_link *link, int *reserved_fd);
1275 1276
struct bpf_link *bpf_link_get_from_fd(u32 ufd);

1277
int bpf_obj_pin_user(u32 ufd, const char __user *pathname);
1278
int bpf_obj_get_user(const char __user *pathname, int flags);
1279

1280
#define BPF_ITER_FUNC_PREFIX "bpf_iter_"
1281
#define DEFINE_BPF_ITER_FUNC(target, args...)			\
1282 1283
	extern int bpf_iter_ ## target(args);			\
	int __init bpf_iter_ ## target(args) { return 0; }
1284

1285
struct bpf_iter_aux_info {
1286
	struct bpf_map *map;
1287 1288
};

1289 1290 1291 1292
typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *prog,
					union bpf_iter_link_info *linfo,
					struct bpf_iter_aux_info *aux);
typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *aux);
1293 1294 1295 1296
typedef void (*bpf_iter_show_fdinfo_t) (const struct bpf_iter_aux_info *aux,
					struct seq_file *seq);
typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *aux,
					 struct bpf_link_info *info);
1297

1298 1299 1300 1301
enum bpf_iter_feature {
	BPF_ITER_RESCHED	= BIT(0),
};

1302
#define BPF_ITER_CTX_ARG_MAX 2
1303 1304
struct bpf_iter_reg {
	const char *target;
1305 1306
	bpf_iter_attach_target_t attach_target;
	bpf_iter_detach_target_t detach_target;
1307 1308
	bpf_iter_show_fdinfo_t show_fdinfo;
	bpf_iter_fill_link_info_t fill_link_info;
1309
	u32 ctx_arg_info_size;
1310
	u32 feature;
1311
	struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX];
1312
	const struct bpf_iter_seq_info *seq_info;
1313 1314
};

1315 1316 1317 1318 1319 1320
struct bpf_iter_meta {
	__bpf_md_ptr(struct seq_file *, seq);
	u64 session_id;
	u64 seq_num;
};

1321 1322 1323 1324 1325 1326 1327
struct bpf_iter__bpf_map_elem {
	__bpf_md_ptr(struct bpf_iter_meta *, meta);
	__bpf_md_ptr(struct bpf_map *, map);
	__bpf_md_ptr(void *, key);
	__bpf_md_ptr(void *, value);
};

1328
int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info);
1329
void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info);
1330
bool bpf_iter_prog_supported(struct bpf_prog *prog);
1331
int bpf_iter_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
1332
int bpf_iter_new_fd(struct bpf_link *link);
Y
Yonghong Song 已提交
1333
bool bpf_link_is_iter(struct bpf_link *link);
1334 1335
struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop);
int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx);
1336 1337 1338 1339
void bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info *aux,
			      struct seq_file *seq);
int bpf_iter_map_fill_link_info(const struct bpf_iter_aux_info *aux,
				struct bpf_link_info *info);
1340

1341 1342 1343 1344 1345 1346
int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
			   u64 flags);
int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
			    u64 flags);
1347

1348
int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
1349

1350 1351
int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
				 void *key, void *value, u64 map_flags);
1352
int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
M
Martin KaFai Lau 已提交
1353 1354
int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
				void *key, void *value, u64 map_flags);
1355
int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
1356

1357
int bpf_get_file_flag(int flags);
1358 1359
int bpf_check_uarg_tail_zero(void __user *uaddr, size_t expected_size,
			     size_t actual_size);
1360

1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376
/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
 * forced to use 'long' read/writes to try to atomically copy long counters.
 * Best-effort only.  No barriers here, since it _will_ race with concurrent
 * updates from BPF programs. Called from bpf syscall and mostly used with
 * size 8 or 16 bytes, so ask compiler to inline it.
 */
static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
{
	const long *lsrc = src;
	long *ldst = dst;

	size /= sizeof(long);
	while (size--)
		*ldst++ = *lsrc++;
}

1377
/* verify correctness of eBPF program */
Y
Yonghong Song 已提交
1378 1379
int bpf_check(struct bpf_prog **fp, union bpf_attr *attr,
	      union bpf_attr __user *uattr);
1380
void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
1381

1382 1383
struct btf *bpf_get_btf_vmlinux(void);

1384
/* Map specifics */
1385
struct xdp_buff;
1386
struct sk_buff;
1387 1388

struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
1389
struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key);
1390 1391 1392
void __dev_flush(void);
int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
		    struct net_device *dev_rx);
1393 1394
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
		    struct net_device *dev_rx);
1395 1396
int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
			     struct bpf_prog *xdp_prog);
1397
bool dev_map_can_have_prog(struct bpf_map *map);
1398

1399
struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key);
1400
void __cpu_map_flush(void);
1401 1402
int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
		    struct net_device *dev_rx);
1403
bool cpu_map_prog_allowed(struct bpf_map *map);
1404

1405 1406 1407 1408 1409 1410 1411
/* Return map's numa specified by userspace */
static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
{
	return (attr->map_flags & BPF_F_NUMA_NODE) ?
		attr->numa_node : NUMA_NO_NODE;
}

1412
struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type);
1413
int array_map_alloc_check(union bpf_attr *attr);
1414

1415 1416 1417 1418
int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
			  union bpf_attr __user *uattr);
int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
			  union bpf_attr __user *uattr);
1419 1420 1421
int bpf_prog_test_run_tracing(struct bpf_prog *prog,
			      const union bpf_attr *kattr,
			      union bpf_attr __user *uattr);
1422 1423 1424
int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
				     const union bpf_attr *kattr,
				     union bpf_attr __user *uattr);
1425 1426 1427
int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
			     const union bpf_attr *kattr,
			     union bpf_attr __user *uattr);
1428 1429 1430 1431 1432 1433 1434
bool btf_ctx_access(int off, int size, enum bpf_access_type type,
		    const struct bpf_prog *prog,
		    struct bpf_insn_access_aux *info);
int btf_struct_access(struct bpf_verifier_log *log,
		      const struct btf_type *t, int off, int size,
		      enum bpf_access_type atype,
		      u32 *next_btf_id);
1435 1436
bool btf_struct_ids_match(struct bpf_verifier_log *log,
			  int off, u32 id, u32 need_type_id);
1437

A
Alexei Starovoitov 已提交
1438 1439 1440 1441 1442 1443
int btf_distill_func_proto(struct bpf_verifier_log *log,
			   struct btf *btf,
			   const struct btf_type *func_proto,
			   const char *func_name,
			   struct btf_func_model *m);

1444 1445 1446 1447 1448
struct bpf_reg_state;
int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog,
			     struct bpf_reg_state *regs);
int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
			  struct bpf_reg_state *reg);
1449
int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog,
1450
			 struct btf *btf, const struct btf_type *t);
1451

1452
struct bpf_prog *bpf_prog_by_id(u32 id);
1453
struct bpf_link *bpf_link_by_id(u32 id);
1454

1455
const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id);
1456
#else /* !CONFIG_BPF_SYSCALL */
1457 1458 1459 1460 1461
static inline struct bpf_prog *bpf_prog_get(u32 ufd)
{
	return ERR_PTR(-EOPNOTSUPP);
}

1462 1463
static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd,
						     enum bpf_prog_type type,
1464
						     bool attach_drv)
1465 1466 1467 1468
{
	return ERR_PTR(-EOPNOTSUPP);
}

1469
static inline void bpf_prog_add(struct bpf_prog *prog, int i)
1470 1471
{
}
1472

1473 1474 1475 1476
static inline void bpf_prog_sub(struct bpf_prog *prog, int i)
{
}

1477 1478 1479
static inline void bpf_prog_put(struct bpf_prog *prog)
{
}
1480

1481
static inline void bpf_prog_inc(struct bpf_prog *prog)
1482 1483
{
}
1484

1485 1486 1487 1488 1489 1490
static inline struct bpf_prog *__must_check
bpf_prog_inc_not_zero(struct bpf_prog *prog)
{
	return ERR_PTR(-EOPNOTSUPP);
}

1491 1492 1493 1494 1495 1496 1497 1498
static inline int __bpf_prog_charge(struct user_struct *user, u32 pages)
{
	return 0;
}

static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
{
}
1499

1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528
static inline void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
				 const struct bpf_link_ops *ops,
				 struct bpf_prog *prog)
{
}

static inline int bpf_link_prime(struct bpf_link *link,
				 struct bpf_link_primer *primer)
{
	return -EOPNOTSUPP;
}

static inline int bpf_link_settle(struct bpf_link_primer *primer)
{
	return -EOPNOTSUPP;
}

static inline void bpf_link_cleanup(struct bpf_link_primer *primer)
{
}

static inline void bpf_link_inc(struct bpf_link *link)
{
}

static inline void bpf_link_put(struct bpf_link *link)
{
}

1529
static inline int bpf_obj_get_user(const char __user *pathname, int flags)
1530 1531 1532 1533
{
	return -EOPNOTSUPP;
}

1534 1535 1536 1537 1538 1539
static inline struct net_device  *__dev_map_lookup_elem(struct bpf_map *map,
						       u32 key)
{
	return NULL;
}

1540 1541 1542 1543 1544
static inline struct net_device  *__dev_map_hash_lookup_elem(struct bpf_map *map,
							     u32 key)
{
	return NULL;
}
1545 1546 1547 1548
static inline bool dev_map_can_have_prog(struct bpf_map *map)
{
	return false;
}
1549

1550
static inline void __dev_flush(void)
1551 1552
{
}
1553

1554 1555 1556
struct xdp_buff;
struct bpf_dtab_netdev;

1557 1558 1559 1560 1561 1562 1563
static inline
int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
		    struct net_device *dev_rx)
{
	return 0;
}

1564
static inline
1565 1566
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
		    struct net_device *dev_rx)
1567 1568 1569 1570
{
	return 0;
}

1571 1572 1573 1574 1575 1576 1577 1578 1579
struct sk_buff;

static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
					   struct sk_buff *skb,
					   struct bpf_prog *xdp_prog)
{
	return 0;
}

1580 1581 1582 1583 1584 1585
static inline
struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
{
	return NULL;
}

1586
static inline void __cpu_map_flush(void)
1587 1588 1589 1590 1591 1592 1593 1594 1595
{
}

static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
				  struct xdp_buff *xdp,
				  struct net_device *dev_rx)
{
	return 0;
}
1596

1597 1598 1599 1600 1601
static inline bool cpu_map_prog_allowed(struct bpf_map *map)
{
	return false;
}

1602 1603 1604 1605 1606
static inline struct bpf_prog *bpf_prog_get_type_path(const char *name,
				enum bpf_prog_type type)
{
	return ERR_PTR(-EOPNOTSUPP);
}
1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621

static inline int bpf_prog_test_run_xdp(struct bpf_prog *prog,
					const union bpf_attr *kattr,
					union bpf_attr __user *uattr)
{
	return -ENOTSUPP;
}

static inline int bpf_prog_test_run_skb(struct bpf_prog *prog,
					const union bpf_attr *kattr,
					union bpf_attr __user *uattr)
{
	return -ENOTSUPP;
}

1622 1623 1624 1625 1626 1627 1628
static inline int bpf_prog_test_run_tracing(struct bpf_prog *prog,
					    const union bpf_attr *kattr,
					    union bpf_attr __user *uattr)
{
	return -ENOTSUPP;
}

1629 1630 1631 1632 1633 1634
static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
						   const union bpf_attr *kattr,
						   union bpf_attr __user *uattr)
{
	return -ENOTSUPP;
}
1635 1636 1637 1638

static inline void bpf_map_put(struct bpf_map *map)
{
}
1639 1640 1641 1642 1643

static inline struct bpf_prog *bpf_prog_by_id(u32 id)
{
	return ERR_PTR(-ENOTSUPP);
}
1644 1645 1646 1647 1648 1649

static inline const struct bpf_func_proto *
bpf_base_func_proto(enum bpf_func_id func_id)
{
	return NULL;
}
1650
#endif /* CONFIG_BPF_SYSCALL */
1651

1652 1653 1654 1655 1656 1657
static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
						 enum bpf_prog_type type)
{
	return bpf_prog_get_type_dev(ufd, type, false);
}

1658 1659
bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool);

1660 1661
int bpf_prog_offload_compile(struct bpf_prog *prog);
void bpf_prog_offload_destroy(struct bpf_prog *prog);
1662 1663
int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
			       struct bpf_prog *prog);
1664

1665 1666
int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map);

1667 1668 1669 1670 1671 1672 1673
int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value);
int bpf_map_offload_update_elem(struct bpf_map *map,
				void *key, void *value, u64 flags);
int bpf_map_offload_delete_elem(struct bpf_map *map, void *key);
int bpf_map_offload_get_next_key(struct bpf_map *map,
				 void *key, void *next_key);

1674
bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map);
1675

1676
struct bpf_offload_dev *
1677
bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv);
1678
void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev);
1679
void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev);
1680 1681 1682 1683
int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
				    struct net_device *netdev);
void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
				       struct net_device *netdev);
1684
bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev);
1685

1686 1687 1688
#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);

1689
static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux)
1690
{
1691
	return aux->offload_requested;
1692
}
1693 1694 1695 1696 1697 1698 1699 1700

static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
{
	return unlikely(map->ops == &bpf_map_offload_ops);
}

struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr);
void bpf_map_offload_map_free(struct bpf_map *map);
1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711
#else
static inline int bpf_prog_offload_init(struct bpf_prog *prog,
					union bpf_attr *attr)
{
	return -EOPNOTSUPP;
}

static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
{
	return false;
}
1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725

static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
{
	return false;
}

static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
{
	return ERR_PTR(-EOPNOTSUPP);
}

static inline void bpf_map_offload_map_free(struct bpf_map *map)
{
}
1726 1727
#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */

1728
#if defined(CONFIG_BPF_STREAM_PARSER)
1729 1730
int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
			 struct bpf_prog *old, u32 which);
1731
int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
1732
int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
1733
int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags);
1734 1735
void sock_map_unhash(struct sock *sk);
void sock_map_close(struct sock *sk, long timeout);
1736
#else
1737
static inline int sock_map_prog_update(struct bpf_map *map,
1738 1739
				       struct bpf_prog *prog,
				       struct bpf_prog *old, u32 which)
1740 1741 1742
{
	return -EOPNOTSUPP;
}
1743

1744 1745
static inline int sock_map_get_from_fd(const union bpf_attr *attr,
				       struct bpf_prog *prog)
1746 1747 1748
{
	return -EINVAL;
}
1749 1750 1751 1752 1753 1754

static inline int sock_map_prog_detach(const union bpf_attr *attr,
				       enum bpf_prog_type ptype)
{
	return -EOPNOTSUPP;
}
1755 1756 1757 1758 1759 1760

static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value,
					   u64 flags)
{
	return -EOPNOTSUPP;
}
1761
#endif /* CONFIG_BPF_STREAM_PARSER */
1762

1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789
#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
void bpf_sk_reuseport_detach(struct sock *sk);
int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
				       void *value);
int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
				       void *value, u64 map_flags);
#else
static inline void bpf_sk_reuseport_detach(struct sock *sk)
{
}

#ifdef CONFIG_BPF_SYSCALL
static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map,
						     void *key, void *value)
{
	return -EOPNOTSUPP;
}

static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map,
						     void *key, void *value,
						     u64 map_flags)
{
	return -EOPNOTSUPP;
}
#endif /* CONFIG_BPF_SYSCALL */
#endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */

1790
/* verifier prototypes for helper functions called from eBPF programs */
1791 1792 1793
extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
extern const struct bpf_func_proto bpf_map_update_elem_proto;
extern const struct bpf_func_proto bpf_map_delete_elem_proto;
M
Mauricio Vasquez B 已提交
1794 1795 1796
extern const struct bpf_func_proto bpf_map_push_elem_proto;
extern const struct bpf_func_proto bpf_map_pop_elem_proto;
extern const struct bpf_func_proto bpf_map_peek_elem_proto;
1797

1798
extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
1799
extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
1800
extern const struct bpf_func_proto bpf_get_numa_node_id_proto;
1801
extern const struct bpf_func_proto bpf_tail_call_proto;
1802
extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
1803
extern const struct bpf_func_proto bpf_ktime_get_boot_ns_proto;
1804 1805 1806
extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
extern const struct bpf_func_proto bpf_get_current_comm_proto;
1807
extern const struct bpf_func_proto bpf_get_stackid_proto;
Y
Yonghong Song 已提交
1808
extern const struct bpf_func_proto bpf_get_stack_proto;
1809
extern const struct bpf_func_proto bpf_get_task_stack_proto;
1810 1811
extern const struct bpf_func_proto bpf_get_stackid_proto_pe;
extern const struct bpf_func_proto bpf_get_stack_proto_pe;
1812
extern const struct bpf_func_proto bpf_sock_map_update_proto;
1813
extern const struct bpf_func_proto bpf_sock_hash_update_proto;
1814
extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
1815
extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto;
1816 1817 1818 1819
extern const struct bpf_func_proto bpf_msg_redirect_hash_proto;
extern const struct bpf_func_proto bpf_msg_redirect_map_proto;
extern const struct bpf_func_proto bpf_sk_redirect_hash_proto;
extern const struct bpf_func_proto bpf_sk_redirect_map_proto;
1820 1821
extern const struct bpf_func_proto bpf_spin_lock_proto;
extern const struct bpf_func_proto bpf_spin_unlock_proto;
1822
extern const struct bpf_func_proto bpf_get_local_storage_proto;
1823 1824
extern const struct bpf_func_proto bpf_strtol_proto;
extern const struct bpf_func_proto bpf_strtoul_proto;
1825
extern const struct bpf_func_proto bpf_tcp_sock_proto;
M
Martin KaFai Lau 已提交
1826
extern const struct bpf_func_proto bpf_jiffies64_proto;
1827
extern const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto;
1828
extern const struct bpf_func_proto bpf_event_output_data_proto;
1829 1830 1831 1832 1833
extern const struct bpf_func_proto bpf_ringbuf_output_proto;
extern const struct bpf_func_proto bpf_ringbuf_reserve_proto;
extern const struct bpf_func_proto bpf_ringbuf_submit_proto;
extern const struct bpf_func_proto bpf_ringbuf_discard_proto;
extern const struct bpf_func_proto bpf_ringbuf_query_proto;
1834
extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto;
1835 1836 1837
extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto;
extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto;
extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto;
1838
extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto;
1839
extern const struct bpf_func_proto bpf_copy_from_user_proto;
A
Alan Maguire 已提交
1840
extern const struct bpf_func_proto bpf_snprintf_btf_proto;
H
Hao Luo 已提交
1841
extern const struct bpf_func_proto bpf_per_cpu_ptr_proto;
H
Hao Luo 已提交
1842
extern const struct bpf_func_proto bpf_this_cpu_ptr_proto;
1843

K
KP Singh 已提交
1844 1845 1846
const struct bpf_func_proto *bpf_tracing_func_proto(
	enum bpf_func_id func_id, const struct bpf_prog *prog);

1847 1848 1849
const struct bpf_func_proto *tracing_prog_func_proto(
  enum bpf_func_id func_id, const struct bpf_prog *prog);

1850 1851 1852
/* Shared helpers among cBPF and eBPF. */
void bpf_user_rnd_init_once(void);
u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
1853
u64 bpf_get_raw_cpu_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
1854

1855
#if defined(CONFIG_NET)
1856 1857 1858
bool bpf_sock_common_is_valid_access(int off, int size,
				     enum bpf_access_type type,
				     struct bpf_insn_access_aux *info);
1859 1860 1861 1862 1863 1864 1865 1866
bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
			      struct bpf_insn_access_aux *info);
u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
				const struct bpf_insn *si,
				struct bpf_insn *insn_buf,
				struct bpf_prog *prog,
				u32 *target_size);
#else
1867 1868 1869 1870 1871 1872
static inline bool bpf_sock_common_is_valid_access(int off, int size,
						   enum bpf_access_type type,
						   struct bpf_insn_access_aux *info)
{
	return false;
}
1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888
static inline bool bpf_sock_is_valid_access(int off, int size,
					    enum bpf_access_type type,
					    struct bpf_insn_access_aux *info)
{
	return false;
}
static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
					      const struct bpf_insn *si,
					      struct bpf_insn *insn_buf,
					      struct bpf_prog *prog,
					      u32 *target_size)
{
	return 0;
}
#endif

1889
#ifdef CONFIG_INET
A
Alexei Starovoitov 已提交
1890 1891 1892 1893 1894 1895 1896 1897 1898
struct sk_reuseport_kern {
	struct sk_buff *skb;
	struct sock *sk;
	struct sock *selected_sk;
	void *data_end;
	u32 hash;
	u32 reuseport_id;
	bool bind_inany;
};
1899 1900 1901 1902 1903 1904 1905 1906
bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
				  struct bpf_insn_access_aux *info);

u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
				    const struct bpf_insn *si,
				    struct bpf_insn *insn_buf,
				    struct bpf_prog *prog,
				    u32 *target_size);
1907 1908 1909 1910 1911 1912 1913 1914 1915

bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
				  struct bpf_insn_access_aux *info);

u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
				    const struct bpf_insn *si,
				    struct bpf_insn *insn_buf,
				    struct bpf_prog *prog,
				    u32 *target_size);
1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931
#else
static inline bool bpf_tcp_sock_is_valid_access(int off, int size,
						enum bpf_access_type type,
						struct bpf_insn_access_aux *info)
{
	return false;
}

static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
						  const struct bpf_insn *si,
						  struct bpf_insn *insn_buf,
						  struct bpf_prog *prog,
						  u32 *target_size)
{
	return 0;
}
1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946
static inline bool bpf_xdp_sock_is_valid_access(int off, int size,
						enum bpf_access_type type,
						struct bpf_insn_access_aux *info)
{
	return false;
}

static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
						  const struct bpf_insn *si,
						  struct bpf_insn *insn_buf,
						  struct bpf_prog *prog,
						  u32 *target_size)
{
	return 0;
}
1947 1948
#endif /* CONFIG_INET */

1949
enum bpf_text_poke_type {
1950 1951
	BPF_MOD_CALL,
	BPF_MOD_JUMP,
1952
};
1953

1954 1955 1956
int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
		       void *addr1, void *addr2);

J
Jiri Olsa 已提交
1957
struct btf_id_set;
1958
bool btf_id_set_contains(const struct btf_id_set *set, u32 id);
J
Jiri Olsa 已提交
1959

1960
#endif /* _LINUX_BPF_H */