bpf.h 40.2 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0-only */
2 3 4 5 6 7
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
 */
#ifndef _LINUX_BPF_H
#define _LINUX_BPF_H 1

#include <uapi/linux/bpf.h>
8

9
#include <linux/workqueue.h>
10
#include <linux/file.h>
11
#include <linux/percpu.h>
Z
Zi Shen Lim 已提交
12
#include <linux/err.h>
13
#include <linux/rbtree_latch.h>
14
#include <linux/numa.h>
15
#include <linux/wait.h>
A
Alexei Starovoitov 已提交
16
#include <linux/u64_stats_sync.h>
A
Alexei Starovoitov 已提交
17 18
#include <linux/refcount.h>
#include <linux/mutex.h>
19

20
struct bpf_verifier_env;
21
struct bpf_verifier_log;
22
struct perf_event;
23
struct bpf_prog;
24
struct bpf_map;
25
struct sock;
26
struct seq_file;
27
struct btf;
28
struct btf_type;
29
struct exception_table_entry;
30

31 32 33
extern struct idr btf_idr;
extern spinlock_t btf_idr_lock;

34 35 36
/* map is generic key/value storage optionally accesible by eBPF programs */
struct bpf_map_ops {
	/* funcs callable from userspace (via syscall) */
37
	int (*map_alloc_check)(union bpf_attr *attr);
38
	struct bpf_map *(*map_alloc)(union bpf_attr *attr);
39 40
	void (*map_release)(struct bpf_map *map, struct file *map_file);
	void (*map_free)(struct bpf_map *map);
41
	int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
42
	void (*map_release_uref)(struct bpf_map *map);
43
	void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key);
44 45 46

	/* funcs callable from userspace and from eBPF programs */
	void *(*map_lookup_elem)(struct bpf_map *map, void *key);
47
	int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
48
	int (*map_delete_elem)(struct bpf_map *map, void *key);
M
Mauricio Vasquez B 已提交
49 50 51
	int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags);
	int (*map_pop_elem)(struct bpf_map *map, void *value);
	int (*map_peek_elem)(struct bpf_map *map, void *value);
52 53

	/* funcs called by prog_array and perf_event_array map */
54 55 56
	void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
				int fd);
	void (*map_fd_put_ptr)(void *ptr);
57
	u32 (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
58
	u32 (*map_fd_sys_lookup_elem)(void *ptr);
59 60
	void (*map_seq_show_elem)(struct bpf_map *map, void *key,
				  struct seq_file *m);
61
	int (*map_check_btf)(const struct bpf_map *map,
62
			     const struct btf *btf,
63 64
			     const struct btf_type *key_type,
			     const struct btf_type *value_type);
65 66 67 68 69 70

	/* Direct value access helpers. */
	int (*map_direct_value_addr)(const struct bpf_map *map,
				     u64 *imm, u32 off);
	int (*map_direct_value_meta)(const struct bpf_map *map,
				     u64 imm, u32 *off);
71 72
};

73 74 75 76 77
struct bpf_map_memory {
	u32 pages;
	struct user_struct *user;
};

78
struct bpf_map {
79
	/* The first two cachelines with read-mostly members of which some
80 81 82 83 84 85 86
	 * are also accessed in fast-path (e.g. ops, max_entries).
	 */
	const struct bpf_map_ops *ops ____cacheline_aligned;
	struct bpf_map *inner_map_meta;
#ifdef CONFIG_SECURITY
	void *security;
#endif
87 88 89 90
	enum bpf_map_type map_type;
	u32 key_size;
	u32 value_size;
	u32 max_entries;
91
	u32 map_flags;
92
	int spin_lock_off; /* >=0 valid offset, <0 error */
M
Martin KaFai Lau 已提交
93
	u32 id;
94
	int numa_node;
95 96
	u32 btf_key_type_id;
	u32 btf_value_type_id;
97
	struct btf *btf;
98
	struct bpf_map_memory memory;
99
	bool unpriv_array;
100 101
	bool frozen; /* write-once */
	/* 48 bytes hole */
102

103
	/* The 3rd and 4th cacheline with misc members to avoid false sharing
104 105
	 * particularly with refcounting.
	 */
106
	atomic_t refcnt ____cacheline_aligned;
107
	atomic_t usercnt;
108
	struct work_struct work;
109
	char name[BPF_OBJ_NAME_LEN];
110 111
};

112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138
static inline bool map_value_has_spin_lock(const struct bpf_map *map)
{
	return map->spin_lock_off >= 0;
}

static inline void check_and_init_map_lock(struct bpf_map *map, void *dst)
{
	if (likely(!map_value_has_spin_lock(map)))
		return;
	*(struct bpf_spin_lock *)(dst + map->spin_lock_off) =
		(struct bpf_spin_lock){};
}

/* copy everything but bpf_spin_lock */
static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
{
	if (unlikely(map_value_has_spin_lock(map))) {
		u32 off = map->spin_lock_off;

		memcpy(dst, src, off);
		memcpy(dst + off + sizeof(struct bpf_spin_lock),
		       src + off + sizeof(struct bpf_spin_lock),
		       map->value_size - off - sizeof(struct bpf_spin_lock));
	} else {
		memcpy(dst, src, map->value_size);
	}
}
139 140
void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
			   bool lock_src);
141

142
struct bpf_offload_dev;
143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
struct bpf_offloaded_map;

struct bpf_map_dev_ops {
	int (*map_get_next_key)(struct bpf_offloaded_map *map,
				void *key, void *next_key);
	int (*map_lookup_elem)(struct bpf_offloaded_map *map,
			       void *key, void *value);
	int (*map_update_elem)(struct bpf_offloaded_map *map,
			       void *key, void *value, u64 flags);
	int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key);
};

struct bpf_offloaded_map {
	struct bpf_map map;
	struct net_device *netdev;
	const struct bpf_map_dev_ops *dev_ops;
	void *dev_priv;
	struct list_head offloads;
};

static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map)
{
	return container_of(map, struct bpf_offloaded_map, map);
}

168 169 170 171 172
static inline bool bpf_map_offload_neutral(const struct bpf_map *map)
{
	return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
}

173 174
static inline bool bpf_map_support_seq_show(const struct bpf_map *map)
{
175
	return map->btf && map->ops->map_seq_show_elem;
176 177
}

178
int map_check_no_btf(const struct bpf_map *map,
179
		     const struct btf *btf,
180 181 182
		     const struct btf_type *key_type,
		     const struct btf_type *value_type);

183 184
extern const struct bpf_map_ops bpf_map_offload_ops;

185 186
/* function argument constraints */
enum bpf_arg_type {
187
	ARG_DONTCARE = 0,	/* unused argument in helper function */
188 189 190 191 192 193 194

	/* the following constraints used to prototype
	 * bpf_map_lookup/update/delete_elem() functions
	 */
	ARG_CONST_MAP_PTR,	/* const argument used as pointer to bpf_map */
	ARG_PTR_TO_MAP_KEY,	/* pointer to stack used as map key */
	ARG_PTR_TO_MAP_VALUE,	/* pointer to stack used as map value */
195
	ARG_PTR_TO_UNINIT_MAP_VALUE,	/* pointer to valid memory used to store a map value */
196
	ARG_PTR_TO_MAP_VALUE_OR_NULL,	/* pointer to stack used as map value or NULL */
197 198 199 200

	/* the following constraints used to prototype bpf_memcmp() and other
	 * functions that access data on eBPF program stack
	 */
201
	ARG_PTR_TO_MEM,		/* pointer to valid memory (stack, packet, map value) */
202
	ARG_PTR_TO_MEM_OR_NULL, /* pointer to valid memory or NULL */
203 204 205
	ARG_PTR_TO_UNINIT_MEM,	/* pointer to memory does not need to be initialized,
				 * helper function must fill all bytes or clear
				 * them in error case.
206 207
				 */

208 209
	ARG_CONST_SIZE,		/* number of bytes accessed from memory */
	ARG_CONST_SIZE_OR_ZERO,	/* number of bytes accessed from memory or 0 */
210

211
	ARG_PTR_TO_CTX,		/* pointer to context */
212
	ARG_ANYTHING,		/* any (initialized) argument is ok */
213
	ARG_PTR_TO_SPIN_LOCK,	/* pointer to bpf_spin_lock */
214
	ARG_PTR_TO_SOCK_COMMON,	/* pointer to sock_common */
215 216
	ARG_PTR_TO_INT,		/* pointer to int */
	ARG_PTR_TO_LONG,	/* pointer to long */
217
	ARG_PTR_TO_SOCKET,	/* pointer to bpf_sock (fullsock) */
218
	ARG_PTR_TO_BTF_ID,	/* pointer to in-kernel struct */
219 220 221 222 223 224
};

/* type of values returned from helper functions */
enum bpf_return_type {
	RET_INTEGER,			/* function returns integer */
	RET_VOID,			/* function doesn't return anything */
225
	RET_PTR_TO_MAP_VALUE,		/* returns a pointer to map elem value */
226
	RET_PTR_TO_MAP_VALUE_OR_NULL,	/* returns a pointer to map elem value or NULL */
227
	RET_PTR_TO_SOCKET_OR_NULL,	/* returns a pointer to a socket or NULL */
228
	RET_PTR_TO_TCP_SOCK_OR_NULL,	/* returns a pointer to a tcp_sock or NULL */
229
	RET_PTR_TO_SOCK_COMMON_OR_NULL,	/* returns a pointer to a sock_common or NULL */
230 231
};

232 233 234 235 236 237 238
/* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
 * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL
 * instructions after verifying
 */
struct bpf_func_proto {
	u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
	bool gpl_only;
239
	bool pkt_access;
240
	enum bpf_return_type ret_type;
241 242 243 244 245 246 247 248 249 250
	union {
		struct {
			enum bpf_arg_type arg1_type;
			enum bpf_arg_type arg2_type;
			enum bpf_arg_type arg3_type;
			enum bpf_arg_type arg4_type;
			enum bpf_arg_type arg5_type;
		};
		enum bpf_arg_type arg_type[5];
	};
251
	int *btf_id; /* BTF ids of arguments */
252 253 254 255 256 257 258 259 260 261 262
};

/* bpf_context is intentionally undefined structure. Pointer to bpf_context is
 * the first argument to eBPF programs.
 * For socket filters: 'struct bpf_context *' == 'struct sk_buff *'
 */
struct bpf_context;

enum bpf_access_type {
	BPF_READ = 1,
	BPF_WRITE = 2
263 264
};

265
/* types of values stored in eBPF registers */
266 267 268 269 270 271 272 273 274
/* Pointer types represent:
 * pointer
 * pointer + imm
 * pointer + (u16) var
 * pointer + (u16) var + imm
 * if (range > 0) then [ptr, ptr + range - off) is safe to access
 * if (id > 0) means that some 'var' was added
 * if (off > 0) means that 'imm' was added
 */
275 276
enum bpf_reg_type {
	NOT_INIT = 0,		 /* nothing was written into register */
277
	SCALAR_VALUE,		 /* reg doesn't contain a valid pointer */
278 279 280 281
	PTR_TO_CTX,		 /* reg points to bpf_context */
	CONST_PTR_TO_MAP,	 /* reg points to struct bpf_map */
	PTR_TO_MAP_VALUE,	 /* reg points to map element value */
	PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */
282
	PTR_TO_STACK,		 /* reg == frame_pointer + offset */
283
	PTR_TO_PACKET_META,	 /* skb->data - meta_len */
284
	PTR_TO_PACKET,		 /* reg points to skb->data */
285
	PTR_TO_PACKET_END,	 /* skb->data + headlen */
286
	PTR_TO_FLOW_KEYS,	 /* reg points to bpf_flow_keys */
287 288
	PTR_TO_SOCKET,		 /* reg points to struct bpf_sock */
	PTR_TO_SOCKET_OR_NULL,	 /* reg points to struct bpf_sock or NULL */
289 290
	PTR_TO_SOCK_COMMON,	 /* reg points to sock_common */
	PTR_TO_SOCK_COMMON_OR_NULL, /* reg points to sock_common or NULL */
291 292
	PTR_TO_TCP_SOCK,	 /* reg points to struct tcp_sock */
	PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */
293
	PTR_TO_TP_BUFFER,	 /* reg points to a writable raw tp's buffer */
294
	PTR_TO_XDP_SOCK,	 /* reg points to struct xdp_sock */
295
	PTR_TO_BTF_ID,		 /* reg points to kernel struct */
296 297
};

298 299 300 301 302
/* The information passed from prog-specific *_is_valid_access
 * back to the verifier.
 */
struct bpf_insn_access_aux {
	enum bpf_reg_type reg_type;
303 304 305 306 307
	union {
		int ctx_field_size;
		u32 btf_id;
	};
	struct bpf_verifier_log *log; /* for verbose logs */
308 309
};

310 311 312 313 314 315
static inline void
bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size)
{
	aux->ctx_field_size = size;
}

316 317 318 319 320
struct bpf_prog_ops {
	int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr,
			union bpf_attr __user *uattr);
};

321 322
struct bpf_verifier_ops {
	/* return eBPF function prototype for verification */
323 324 325
	const struct bpf_func_proto *
	(*get_func_proto)(enum bpf_func_id func_id,
			  const struct bpf_prog *prog);
326 327 328 329

	/* return true if 'size' wide access at offset 'off' within bpf_context
	 * with 'type' (read or write) is allowed
	 */
330
	bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
331
				const struct bpf_prog *prog,
332
				struct bpf_insn_access_aux *info);
333 334
	int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
			    const struct bpf_prog *prog);
335 336
	int (*gen_ld_abs)(const struct bpf_insn *orig,
			  struct bpf_insn *insn_buf);
337 338 339
	u32 (*convert_ctx_access)(enum bpf_access_type type,
				  const struct bpf_insn *src,
				  struct bpf_insn *dst,
340
				  struct bpf_prog *prog, u32 *target_size);
341 342
};

343
struct bpf_prog_offload_ops {
344
	/* verifier basic callbacks */
345 346
	int (*insn_hook)(struct bpf_verifier_env *env,
			 int insn_idx, int prev_insn_idx);
347
	int (*finalize)(struct bpf_verifier_env *env);
348 349 350 351 352
	/* verifier optimization callbacks (called after .finalize) */
	int (*replace_insn)(struct bpf_verifier_env *env, u32 off,
			    struct bpf_insn *insn);
	int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt);
	/* program management callbacks */
353 354
	int (*prepare)(struct bpf_prog *prog);
	int (*translate)(struct bpf_prog *prog);
355
	void (*destroy)(struct bpf_prog *prog);
356 357
};

358
struct bpf_prog_offload {
359 360
	struct bpf_prog		*prog;
	struct net_device	*netdev;
361
	struct bpf_offload_dev	*offdev;
362 363 364
	void			*dev_priv;
	struct list_head	offloads;
	bool			dev_state;
365
	bool			opt_failed;
366 367
	void			*jited_image;
	u32			jited_len;
368 369
};

370 371
enum bpf_cgroup_storage_type {
	BPF_CGROUP_STORAGE_SHARED,
372
	BPF_CGROUP_STORAGE_PERCPU,
373 374 375 376 377
	__BPF_CGROUP_STORAGE_MAX
};

#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX

378 379 380 381 382
/* The longest tracepoint has 12 args.
 * See include/trace/bpf_probe.h
 */
#define MAX_BPF_FUNC_ARGS 12

A
Alexei Starovoitov 已提交
383 384 385 386
struct bpf_prog_stats {
	u64 cnt;
	u64 nsecs;
	struct u64_stats_sync syncp;
E
Eric Dumazet 已提交
387
} __aligned(2 * sizeof(u64));
A
Alexei Starovoitov 已提交
388

A
Alexei Starovoitov 已提交
389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482
struct btf_func_model {
	u8 ret_size;
	u8 nr_args;
	u8 arg_size[MAX_BPF_FUNC_ARGS];
};

/* Restore arguments before returning from trampoline to let original function
 * continue executing. This flag is used for fentry progs when there are no
 * fexit progs.
 */
#define BPF_TRAMP_F_RESTORE_REGS	BIT(0)
/* Call original function after fentry progs, but before fexit progs.
 * Makes sense for fentry/fexit, normal calls and indirect calls.
 */
#define BPF_TRAMP_F_CALL_ORIG		BIT(1)
/* Skip current frame and return to parent.  Makes sense for fentry/fexit
 * programs only. Should not be used with normal calls and indirect calls.
 */
#define BPF_TRAMP_F_SKIP_FRAME		BIT(2)

/* Different use cases for BPF trampoline:
 * 1. replace nop at the function entry (kprobe equivalent)
 *    flags = BPF_TRAMP_F_RESTORE_REGS
 *    fentry = a set of programs to run before returning from trampoline
 *
 * 2. replace nop at the function entry (kprobe + kretprobe equivalent)
 *    flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME
 *    orig_call = fentry_ip + MCOUNT_INSN_SIZE
 *    fentry = a set of program to run before calling original function
 *    fexit = a set of program to run after original function
 *
 * 3. replace direct call instruction anywhere in the function body
 *    or assign a function pointer for indirect call (like tcp_congestion_ops->cong_avoid)
 *    With flags = 0
 *      fentry = a set of programs to run before returning from trampoline
 *    With flags = BPF_TRAMP_F_CALL_ORIG
 *      orig_call = original callback addr or direct function addr
 *      fentry = a set of program to run before calling original function
 *      fexit = a set of program to run after original function
 */
int arch_prepare_bpf_trampoline(void *image, struct btf_func_model *m, u32 flags,
				struct bpf_prog **fentry_progs, int fentry_cnt,
				struct bpf_prog **fexit_progs, int fexit_cnt,
				void *orig_call);
/* these two functions are called from generated trampoline */
u64 notrace __bpf_prog_enter(void);
void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start);

enum bpf_tramp_prog_type {
	BPF_TRAMP_FENTRY,
	BPF_TRAMP_FEXIT,
	BPF_TRAMP_MAX
};

struct bpf_trampoline {
	/* hlist for trampoline_table */
	struct hlist_node hlist;
	/* serializes access to fields of this trampoline */
	struct mutex mutex;
	refcount_t refcnt;
	u64 key;
	struct {
		struct btf_func_model model;
		void *addr;
	} func;
	/* list of BPF programs using this trampoline */
	struct hlist_head progs_hlist[BPF_TRAMP_MAX];
	/* Number of attached programs. A counter per kind. */
	int progs_cnt[BPF_TRAMP_MAX];
	/* Executable image of trampoline */
	void *image;
	u64 selector;
};
#ifdef CONFIG_BPF_JIT
struct bpf_trampoline *bpf_trampoline_lookup(u64 key);
int bpf_trampoline_link_prog(struct bpf_prog *prog);
int bpf_trampoline_unlink_prog(struct bpf_prog *prog);
void bpf_trampoline_put(struct bpf_trampoline *tr);
#else
static inline struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
{
	return NULL;
}
static inline int bpf_trampoline_link_prog(struct bpf_prog *prog)
{
	return -ENOTSUPP;
}
static inline int bpf_trampoline_unlink_prog(struct bpf_prog *prog)
{
	return -ENOTSUPP;
}
static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {}
#endif

483 484
struct bpf_prog_aux {
	atomic_t refcnt;
485
	u32 used_map_cnt;
486
	u32 max_ctx_offset;
487
	u32 max_pkt_offset;
488
	u32 max_tp_access;
489
	u32 stack_depth;
M
Martin KaFai Lau 已提交
490
	u32 id;
491 492
	u32 func_cnt; /* used by non-func prog as the number of func progs */
	u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */
493
	u32 attach_btf_id; /* in-kernel BTF type id to attach to */
494
	bool verifier_zext; /* Zero extensions has been inserted by verifier. */
495
	bool offload_requested;
496
	bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */
A
Alexei Starovoitov 已提交
497 498 499
	enum bpf_tramp_prog_type trampoline_prog_type;
	struct bpf_trampoline *trampoline;
	struct hlist_node tramp_hlist;
500 501 502 503
	/* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
	const struct btf_type *attach_func_proto;
	/* function name for valid attach_btf_id */
	const char *attach_func_name;
504 505
	struct bpf_prog **func;
	void *jit_data; /* JIT specific data. arch dependent */
506 507
	struct latch_tree_node ksym_tnode;
	struct list_head ksym_lnode;
508
	const struct bpf_prog_ops *ops;
509 510
	struct bpf_map **used_maps;
	struct bpf_prog *prog;
511
	struct user_struct *user;
512
	u64 load_time; /* ns since boottime */
513
	struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
514
	char name[BPF_OBJ_NAME_LEN];
515 516 517
#ifdef CONFIG_SECURITY
	void *security;
#endif
518
	struct bpf_prog_offload *offload;
Y
Yonghong Song 已提交
519
	struct btf *btf;
520
	struct bpf_func_info *func_info;
M
Martin KaFai Lau 已提交
521 522 523 524 525 526 527 528 529 530 531 532 533 534 535
	/* bpf_line_info loaded from userspace.  linfo->insn_off
	 * has the xlated insn offset.
	 * Both the main and sub prog share the same linfo.
	 * The subprog can access its first linfo by
	 * using the linfo_idx.
	 */
	struct bpf_line_info *linfo;
	/* jited_linfo is the jited addr of the linfo.  It has a
	 * one to one mapping to linfo:
	 * jited_linfo[i] is the jited addr for the linfo[i]->insn_off.
	 * Both the main and sub prog share the same jited_linfo.
	 * The subprog can access its first jited_linfo by
	 * using the linfo_idx.
	 */
	void **jited_linfo;
536
	u32 func_info_cnt;
M
Martin KaFai Lau 已提交
537 538 539 540 541 542
	u32 nr_linfo;
	/* subprog can use linfo_idx to access its first linfo and
	 * jited_linfo.
	 * main prog always has linfo_idx == 0
	 */
	u32 linfo_idx;
543 544
	u32 num_exentries;
	struct exception_table_entry *extable;
A
Alexei Starovoitov 已提交
545
	struct bpf_prog_stats __percpu *stats;
546 547 548 549
	union {
		struct work_struct work;
		struct rcu_head	rcu;
	};
550 551
};

552 553 554
struct bpf_array {
	struct bpf_map map;
	u32 elem_size;
555
	u32 index_mask;
556 557 558 559 560 561 562 563 564
	/* 'ownership' of prog_array is claimed by the first program that
	 * is going to use this map or by the first program which FD is stored
	 * in the map to make sure that all callers and callees have the same
	 * prog_type and JITed flag
	 */
	enum bpf_prog_type owner_prog_type;
	bool owner_jited;
	union {
		char value[0] __aligned(8);
565
		void *ptrs[0] __aligned(8);
566
		void __percpu *pptrs[0] __aligned(8);
567 568
	};
};
569

570
#define BPF_COMPLEXITY_LIMIT_INSNS      1000000 /* yes. 1M insns */
571 572
#define MAX_TAIL_CALL_CNT 32

573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601
#define BPF_F_ACCESS_MASK	(BPF_F_RDONLY |		\
				 BPF_F_RDONLY_PROG |	\
				 BPF_F_WRONLY |		\
				 BPF_F_WRONLY_PROG)

#define BPF_MAP_CAN_READ	BIT(0)
#define BPF_MAP_CAN_WRITE	BIT(1)

static inline u32 bpf_map_flags_to_cap(struct bpf_map *map)
{
	u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);

	/* Combination of BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG is
	 * not possible.
	 */
	if (access_flags & BPF_F_RDONLY_PROG)
		return BPF_MAP_CAN_READ;
	else if (access_flags & BPF_F_WRONLY_PROG)
		return BPF_MAP_CAN_WRITE;
	else
		return BPF_MAP_CAN_READ | BPF_MAP_CAN_WRITE;
}

static inline bool bpf_map_flags_access_ok(u32 access_flags)
{
	return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) !=
	       (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
}

602 603 604 605 606 607 608
struct bpf_event_entry {
	struct perf_event *event;
	struct file *perf_file;
	struct file *map_file;
	struct rcu_head rcu;
};

609
bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
610
int bpf_prog_calc_tag(struct bpf_prog *fp);
611
const char *kernel_type_name(u32 btf_type_id);
612

613
const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
614 615

typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src,
616
					unsigned long off, unsigned long len);
617 618 619 620 621
typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type,
					const struct bpf_insn *src,
					struct bpf_insn *dst,
					struct bpf_prog *prog,
					u32 *target_size);
622 623 624

u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
		     void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy);
625

626 627 628 629 630 631 632 633 634 635 636 637
/* an array of programs to be executed under rcu_lock.
 *
 * Typical usage:
 * ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, BPF_PROG_RUN);
 *
 * the structure returned by bpf_prog_array_alloc() should be populated
 * with program pointers and the last pointer must be NULL.
 * The user has to keep refcnt on the program and make sure the program
 * is removed from the array before bpf_prog_put().
 * The 'struct bpf_prog_array *' should only be replaced with xchg()
 * since other cpus are walking the array of pointers in parallel.
 */
638 639
struct bpf_prog_array_item {
	struct bpf_prog *prog;
640
	struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
641 642
};

643 644
struct bpf_prog_array {
	struct rcu_head rcu;
645
	struct bpf_prog_array_item items[0];
646 647
};

648
struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
649 650
void bpf_prog_array_free(struct bpf_prog_array *progs);
int bpf_prog_array_length(struct bpf_prog_array *progs);
651
bool bpf_prog_array_is_empty(struct bpf_prog_array *array);
652
int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs,
653
				__u32 __user *prog_ids, u32 cnt);
654

655
void bpf_prog_array_delete_safe(struct bpf_prog_array *progs,
656
				struct bpf_prog *old_prog);
657
int bpf_prog_array_copy_info(struct bpf_prog_array *array,
658 659
			     u32 *prog_ids, u32 request_cnt,
			     u32 *prog_cnt);
660
int bpf_prog_array_copy(struct bpf_prog_array *old_array,
661 662 663 664 665
			struct bpf_prog *exclude_prog,
			struct bpf_prog *include_prog,
			struct bpf_prog_array **new_array);

#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null)	\
666
	({						\
667 668
		struct bpf_prog_array_item *_item;	\
		struct bpf_prog *_prog;			\
669
		struct bpf_prog_array *_array;		\
670
		u32 _ret = 1;				\
671
		preempt_disable();			\
672
		rcu_read_lock();			\
673 674 675
		_array = rcu_dereference(array);	\
		if (unlikely(check_non_null && !_array))\
			goto _out;			\
676 677 678 679 680
		_item = &_array->items[0];		\
		while ((_prog = READ_ONCE(_item->prog))) {		\
			bpf_cgroup_storage_set(_item->cgroup_storage);	\
			_ret &= func(_prog, ctx);	\
			_item++;			\
681 682
		}					\
_out:							\
683
		rcu_read_unlock();			\
684
		preempt_enable();			\
685 686 687
		_ret;					\
	 })

688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737
/* To be used by __cgroup_bpf_run_filter_skb for EGRESS BPF progs
 * so BPF programs can request cwr for TCP packets.
 *
 * Current cgroup skb programs can only return 0 or 1 (0 to drop the
 * packet. This macro changes the behavior so the low order bit
 * indicates whether the packet should be dropped (0) or not (1)
 * and the next bit is a congestion notification bit. This could be
 * used by TCP to call tcp_enter_cwr()
 *
 * Hence, new allowed return values of CGROUP EGRESS BPF programs are:
 *   0: drop packet
 *   1: keep packet
 *   2: drop packet and cn
 *   3: keep packet and cn
 *
 * This macro then converts it to one of the NET_XMIT or an error
 * code that is then interpreted as drop packet (and no cn):
 *   0: NET_XMIT_SUCCESS  skb should be transmitted
 *   1: NET_XMIT_DROP     skb should be dropped and cn
 *   2: NET_XMIT_CN       skb should be transmitted and cn
 *   3: -EPERM            skb should be dropped
 */
#define BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(array, ctx, func)		\
	({						\
		struct bpf_prog_array_item *_item;	\
		struct bpf_prog *_prog;			\
		struct bpf_prog_array *_array;		\
		u32 ret;				\
		u32 _ret = 1;				\
		u32 _cn = 0;				\
		preempt_disable();			\
		rcu_read_lock();			\
		_array = rcu_dereference(array);	\
		_item = &_array->items[0];		\
		while ((_prog = READ_ONCE(_item->prog))) {		\
			bpf_cgroup_storage_set(_item->cgroup_storage);	\
			ret = func(_prog, ctx);		\
			_ret &= (ret & 1);		\
			_cn |= (ret & 2);		\
			_item++;			\
		}					\
		rcu_read_unlock();			\
		preempt_enable();			\
		if (_ret)				\
			_ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS);	\
		else					\
			_ret = (_cn ? NET_XMIT_DROP : -EPERM);		\
		_ret;					\
	})

738 739 740 741 742 743
#define BPF_PROG_RUN_ARRAY(array, ctx, func)		\
	__BPF_PROG_RUN_ARRAY(array, ctx, func, false)

#define BPF_PROG_RUN_ARRAY_CHECK(array, ctx, func)	\
	__BPF_PROG_RUN_ARRAY(array, ctx, func, true)

744
#ifdef CONFIG_BPF_SYSCALL
745 746
DECLARE_PER_CPU(int, bpf_prog_active);

747 748 749
extern const struct file_operations bpf_map_fops;
extern const struct file_operations bpf_prog_fops;

750 751 752
#define BPF_PROG_TYPE(_id, _name) \
	extern const struct bpf_prog_ops _name ## _prog_ops; \
	extern const struct bpf_verifier_ops _name ## _verifier_ops;
753 754
#define BPF_MAP_TYPE(_id, _ops) \
	extern const struct bpf_map_ops _ops;
755 756
#include <linux/bpf_types.h>
#undef BPF_PROG_TYPE
757
#undef BPF_MAP_TYPE
758

759
extern const struct bpf_prog_ops bpf_offload_prog_ops;
760 761 762
extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops;
extern const struct bpf_verifier_ops xdp_analyzer_ops;

763
struct bpf_prog *bpf_prog_get(u32 ufd);
764
struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
765
				       bool attach_drv);
766
struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, int i);
767
void bpf_prog_sub(struct bpf_prog *prog, int i);
768
struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog);
769
struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog);
770
void bpf_prog_put(struct bpf_prog *prog);
771 772
int __bpf_prog_charge(struct user_struct *user, u32 pages);
void __bpf_prog_uncharge(struct user_struct *user, u32 pages);
773

774
void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock);
775
void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock);
776

777
struct bpf_map *bpf_map_get_with_uref(u32 ufd);
778
struct bpf_map *__bpf_map_get(struct fd f);
779
struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref);
780 781
struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map,
						   bool uref);
782
void bpf_map_put_with_uref(struct bpf_map *map);
783
void bpf_map_put(struct bpf_map *map);
784 785
int bpf_map_charge_memlock(struct bpf_map *map, u32 pages);
void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages);
786
int bpf_map_charge_init(struct bpf_map_memory *mem, size_t size);
787 788 789
void bpf_map_charge_finish(struct bpf_map_memory *mem);
void bpf_map_charge_move(struct bpf_map_memory *dst,
			 struct bpf_map_memory *src);
790
void *bpf_map_area_alloc(size_t size, int numa_node);
791
void bpf_map_area_free(void *base);
792
void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
793

794 795
extern int sysctl_unprivileged_bpf_disabled;

796
int bpf_map_new_fd(struct bpf_map *map, int flags);
797 798 799
int bpf_prog_new_fd(struct bpf_prog *prog);

int bpf_obj_pin_user(u32 ufd, const char __user *pathname);
800
int bpf_obj_get_user(const char __user *pathname, int flags);
801

802 803 804 805 806 807
int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
			   u64 flags);
int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
			    u64 flags);
808

809
int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
810

811 812
int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
				 void *key, void *value, u64 map_flags);
813
int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
M
Martin KaFai Lau 已提交
814 815
int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
				void *key, void *value, u64 map_flags);
816
int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
817

818
int bpf_get_file_flag(int flags);
819 820
int bpf_check_uarg_tail_zero(void __user *uaddr, size_t expected_size,
			     size_t actual_size);
821

822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837
/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
 * forced to use 'long' read/writes to try to atomically copy long counters.
 * Best-effort only.  No barriers here, since it _will_ race with concurrent
 * updates from BPF programs. Called from bpf syscall and mostly used with
 * size 8 or 16 bytes, so ask compiler to inline it.
 */
static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
{
	const long *lsrc = src;
	long *ldst = dst;

	size /= sizeof(long);
	while (size--)
		*ldst++ = *lsrc++;
}

838
/* verify correctness of eBPF program */
Y
Yonghong Song 已提交
839 840
int bpf_check(struct bpf_prog **fp, union bpf_attr *attr,
	      union bpf_attr __user *uattr);
841
void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
842 843

/* Map specifics */
844
struct xdp_buff;
845
struct sk_buff;
846 847

struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
848
struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key);
849
void __dev_map_flush(struct bpf_map *map);
850 851
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
		    struct net_device *dev_rx);
852 853
int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
			     struct bpf_prog *xdp_prog);
854

855 856 857 858 859
struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key);
void __cpu_map_flush(struct bpf_map *map);
int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
		    struct net_device *dev_rx);

860 861 862 863 864 865 866
/* Return map's numa specified by userspace */
static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
{
	return (attr->map_flags & BPF_F_NUMA_NODE) ?
		attr->numa_node : NUMA_NO_NODE;
}

867
struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type);
868
int array_map_alloc_check(union bpf_attr *attr);
869

870 871 872 873 874 875 876
int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
			  union bpf_attr __user *uattr);
int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
			  union bpf_attr __user *uattr);
int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
				     const union bpf_attr *kattr,
				     union bpf_attr __user *uattr);
877 878 879 880 881 882 883
bool btf_ctx_access(int off, int size, enum bpf_access_type type,
		    const struct bpf_prog *prog,
		    struct bpf_insn_access_aux *info);
int btf_struct_access(struct bpf_verifier_log *log,
		      const struct btf_type *t, int off, int size,
		      enum bpf_access_type atype,
		      u32 *next_btf_id);
884 885
int btf_resolve_helper_id(struct bpf_verifier_log *log,
			  const struct bpf_func_proto *fn, int);
886

A
Alexei Starovoitov 已提交
887 888 889 890 891 892
int btf_distill_func_proto(struct bpf_verifier_log *log,
			   struct btf *btf,
			   const struct btf_type *func_proto,
			   const char *func_name,
			   struct btf_func_model *m);

893
#else /* !CONFIG_BPF_SYSCALL */
894 895 896 897 898
static inline struct bpf_prog *bpf_prog_get(u32 ufd)
{
	return ERR_PTR(-EOPNOTSUPP);
}

899 900
static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd,
						     enum bpf_prog_type type,
901
						     bool attach_drv)
902 903 904 905
{
	return ERR_PTR(-EOPNOTSUPP);
}

906 907
static inline struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog,
							  int i)
908 909 910
{
	return ERR_PTR(-EOPNOTSUPP);
}
911

912 913 914 915
static inline void bpf_prog_sub(struct bpf_prog *prog, int i)
{
}

916 917 918
static inline void bpf_prog_put(struct bpf_prog *prog)
{
}
919 920

static inline struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog)
921 922 923
{
	return ERR_PTR(-EOPNOTSUPP);
}
924

925 926 927 928 929 930
static inline struct bpf_prog *__must_check
bpf_prog_inc_not_zero(struct bpf_prog *prog)
{
	return ERR_PTR(-EOPNOTSUPP);
}

931 932 933 934 935 936 937 938
static inline int __bpf_prog_charge(struct user_struct *user, u32 pages)
{
	return 0;
}

static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
{
}
939

940
static inline int bpf_obj_get_user(const char __user *pathname, int flags)
941 942 943 944
{
	return -EOPNOTSUPP;
}

945 946 947 948 949 950
static inline struct net_device  *__dev_map_lookup_elem(struct bpf_map *map,
						       u32 key)
{
	return NULL;
}

951 952 953 954 955 956
static inline struct net_device  *__dev_map_hash_lookup_elem(struct bpf_map *map,
							     u32 key)
{
	return NULL;
}

957 958 959
static inline void __dev_map_flush(struct bpf_map *map)
{
}
960

961 962 963 964
struct xdp_buff;
struct bpf_dtab_netdev;

static inline
965 966
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
		    struct net_device *dev_rx)
967 968 969 970
{
	return 0;
}

971 972 973 974 975 976 977 978 979
struct sk_buff;

static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
					   struct sk_buff *skb,
					   struct bpf_prog *xdp_prog)
{
	return 0;
}

980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995
static inline
struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
{
	return NULL;
}

static inline void __cpu_map_flush(struct bpf_map *map)
{
}

static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
				  struct xdp_buff *xdp,
				  struct net_device *dev_rx)
{
	return 0;
}
996 997 998 999 1000 1001

static inline struct bpf_prog *bpf_prog_get_type_path(const char *name,
				enum bpf_prog_type type)
{
	return ERR_PTR(-EOPNOTSUPP);
}
1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022

static inline int bpf_prog_test_run_xdp(struct bpf_prog *prog,
					const union bpf_attr *kattr,
					union bpf_attr __user *uattr)
{
	return -ENOTSUPP;
}

static inline int bpf_prog_test_run_skb(struct bpf_prog *prog,
					const union bpf_attr *kattr,
					union bpf_attr __user *uattr)
{
	return -ENOTSUPP;
}

static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
						   const union bpf_attr *kattr,
						   union bpf_attr __user *uattr)
{
	return -ENOTSUPP;
}
1023
#endif /* CONFIG_BPF_SYSCALL */
1024

1025 1026 1027 1028 1029 1030
static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
						 enum bpf_prog_type type)
{
	return bpf_prog_get_type_dev(ufd, type, false);
}

1031 1032
bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool);

1033 1034
int bpf_prog_offload_compile(struct bpf_prog *prog);
void bpf_prog_offload_destroy(struct bpf_prog *prog);
1035 1036
int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
			       struct bpf_prog *prog);
1037

1038 1039
int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map);

1040 1041 1042 1043 1044 1045 1046
int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value);
int bpf_map_offload_update_elem(struct bpf_map *map,
				void *key, void *value, u64 flags);
int bpf_map_offload_delete_elem(struct bpf_map *map, void *key);
int bpf_map_offload_get_next_key(struct bpf_map *map,
				 void *key, void *next_key);

1047
bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map);
1048

1049
struct bpf_offload_dev *
1050
bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv);
1051
void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev);
1052
void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev);
1053 1054 1055 1056
int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
				    struct net_device *netdev);
void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
				       struct net_device *netdev);
1057
bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev);
1058

1059 1060 1061
#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);

1062
static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux)
1063
{
1064
	return aux->offload_requested;
1065
}
1066 1067 1068 1069 1070 1071 1072 1073

static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
{
	return unlikely(map->ops == &bpf_map_offload_ops);
}

struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr);
void bpf_map_offload_map_free(struct bpf_map *map);
1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084
#else
static inline int bpf_prog_offload_init(struct bpf_prog *prog,
					union bpf_attr *attr)
{
	return -EOPNOTSUPP;
}

static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
{
	return false;
}
1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098

static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
{
	return false;
}

static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
{
	return ERR_PTR(-EOPNOTSUPP);
}

static inline void bpf_map_offload_map_free(struct bpf_map *map)
{
}
1099 1100
#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */

1101 1102 1103
#if defined(CONFIG_BPF_STREAM_PARSER)
int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, u32 which);
int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
1104
#else
1105 1106
static inline int sock_map_prog_update(struct bpf_map *map,
				       struct bpf_prog *prog, u32 which)
1107 1108 1109
{
	return -EOPNOTSUPP;
}
1110

1111 1112
static inline int sock_map_get_from_fd(const union bpf_attr *attr,
				       struct bpf_prog *prog)
1113 1114 1115
{
	return -EINVAL;
}
1116 1117
#endif

1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144
#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
void bpf_sk_reuseport_detach(struct sock *sk);
int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
				       void *value);
int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
				       void *value, u64 map_flags);
#else
static inline void bpf_sk_reuseport_detach(struct sock *sk)
{
}

#ifdef CONFIG_BPF_SYSCALL
static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map,
						     void *key, void *value)
{
	return -EOPNOTSUPP;
}

static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map,
						     void *key, void *value,
						     u64 map_flags)
{
	return -EOPNOTSUPP;
}
#endif /* CONFIG_BPF_SYSCALL */
#endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */

1145
/* verifier prototypes for helper functions called from eBPF programs */
1146 1147 1148
extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
extern const struct bpf_func_proto bpf_map_update_elem_proto;
extern const struct bpf_func_proto bpf_map_delete_elem_proto;
M
Mauricio Vasquez B 已提交
1149 1150 1151
extern const struct bpf_func_proto bpf_map_push_elem_proto;
extern const struct bpf_func_proto bpf_map_pop_elem_proto;
extern const struct bpf_func_proto bpf_map_peek_elem_proto;
1152

1153
extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
1154
extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
1155
extern const struct bpf_func_proto bpf_get_numa_node_id_proto;
1156
extern const struct bpf_func_proto bpf_tail_call_proto;
1157
extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
1158 1159 1160
extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
extern const struct bpf_func_proto bpf_get_current_comm_proto;
1161
extern const struct bpf_func_proto bpf_get_stackid_proto;
Y
Yonghong Song 已提交
1162
extern const struct bpf_func_proto bpf_get_stack_proto;
1163
extern const struct bpf_func_proto bpf_sock_map_update_proto;
1164
extern const struct bpf_func_proto bpf_sock_hash_update_proto;
1165
extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
1166 1167 1168 1169
extern const struct bpf_func_proto bpf_msg_redirect_hash_proto;
extern const struct bpf_func_proto bpf_msg_redirect_map_proto;
extern const struct bpf_func_proto bpf_sk_redirect_hash_proto;
extern const struct bpf_func_proto bpf_sk_redirect_map_proto;
1170 1171
extern const struct bpf_func_proto bpf_spin_lock_proto;
extern const struct bpf_func_proto bpf_spin_unlock_proto;
1172
extern const struct bpf_func_proto bpf_get_local_storage_proto;
1173 1174
extern const struct bpf_func_proto bpf_strtol_proto;
extern const struct bpf_func_proto bpf_strtoul_proto;
1175
extern const struct bpf_func_proto bpf_tcp_sock_proto;
1176

1177 1178 1179 1180
/* Shared helpers among cBPF and eBPF. */
void bpf_user_rnd_init_once(void);
u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);

1181
#if defined(CONFIG_NET)
1182 1183 1184
bool bpf_sock_common_is_valid_access(int off, int size,
				     enum bpf_access_type type,
				     struct bpf_insn_access_aux *info);
1185 1186 1187 1188 1189 1190 1191 1192
bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
			      struct bpf_insn_access_aux *info);
u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
				const struct bpf_insn *si,
				struct bpf_insn *insn_buf,
				struct bpf_prog *prog,
				u32 *target_size);
#else
1193 1194 1195 1196 1197 1198
static inline bool bpf_sock_common_is_valid_access(int off, int size,
						   enum bpf_access_type type,
						   struct bpf_insn_access_aux *info)
{
	return false;
}
1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214
static inline bool bpf_sock_is_valid_access(int off, int size,
					    enum bpf_access_type type,
					    struct bpf_insn_access_aux *info)
{
	return false;
}
static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
					      const struct bpf_insn *si,
					      struct bpf_insn *insn_buf,
					      struct bpf_prog *prog,
					      u32 *target_size)
{
	return 0;
}
#endif

1215 1216 1217 1218 1219 1220 1221 1222 1223
#ifdef CONFIG_INET
bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
				  struct bpf_insn_access_aux *info);

u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
				    const struct bpf_insn *si,
				    struct bpf_insn *insn_buf,
				    struct bpf_prog *prog,
				    u32 *target_size);
1224 1225 1226 1227 1228 1229 1230 1231 1232

bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
				  struct bpf_insn_access_aux *info);

u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
				    const struct bpf_insn *si,
				    struct bpf_insn *insn_buf,
				    struct bpf_prog *prog,
				    u32 *target_size);
1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248
#else
static inline bool bpf_tcp_sock_is_valid_access(int off, int size,
						enum bpf_access_type type,
						struct bpf_insn_access_aux *info)
{
	return false;
}

static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
						  const struct bpf_insn *si,
						  struct bpf_insn *insn_buf,
						  struct bpf_prog *prog,
						  u32 *target_size)
{
	return 0;
}
1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263
static inline bool bpf_xdp_sock_is_valid_access(int off, int size,
						enum bpf_access_type type,
						struct bpf_insn_access_aux *info)
{
	return false;
}

static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
						  const struct bpf_insn *si,
						  struct bpf_insn *insn_buf,
						  struct bpf_prog *prog,
						  u32 *target_size)
{
	return 0;
}
1264 1265
#endif /* CONFIG_INET */

1266 1267 1268 1269 1270 1271 1272 1273
enum bpf_text_poke_type {
	BPF_MOD_NOP_TO_CALL,
	BPF_MOD_CALL_TO_CALL,
	BPF_MOD_CALL_TO_NOP,
};
int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
		       void *addr1, void *addr2);

1274
#endif /* _LINUX_BPF_H */