bpf.h 33.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of version 2 of the GNU General Public
 * License as published by the Free Software Foundation.
 */
#ifndef _LINUX_BPF_H
#define _LINUX_BPF_H 1

#include <uapi/linux/bpf.h>
11

12
#include <linux/workqueue.h>
13
#include <linux/file.h>
14
#include <linux/percpu.h>
Z
Zi Shen Lim 已提交
15
#include <linux/err.h>
16
#include <linux/rbtree_latch.h>
17
#include <linux/numa.h>
18
#include <linux/wait.h>
A
Alexei Starovoitov 已提交
19
#include <linux/u64_stats_sync.h>
20

21
struct bpf_verifier_env;
22
struct perf_event;
23
struct bpf_prog;
24
struct bpf_map;
25
struct sock;
26
struct seq_file;
27
struct btf;
28
struct btf_type;
29 30 31 32

/* map is generic key/value storage optionally accesible by eBPF programs */
struct bpf_map_ops {
	/* funcs callable from userspace (via syscall) */
33
	int (*map_alloc_check)(union bpf_attr *attr);
34
	struct bpf_map *(*map_alloc)(union bpf_attr *attr);
35 36
	void (*map_release)(struct bpf_map *map, struct file *map_file);
	void (*map_free)(struct bpf_map *map);
37
	int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
38
	void (*map_release_uref)(struct bpf_map *map);
39
	void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key);
40 41 42

	/* funcs callable from userspace and from eBPF programs */
	void *(*map_lookup_elem)(struct bpf_map *map, void *key);
43
	int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
44
	int (*map_delete_elem)(struct bpf_map *map, void *key);
M
Mauricio Vasquez B 已提交
45 46 47
	int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags);
	int (*map_pop_elem)(struct bpf_map *map, void *value);
	int (*map_peek_elem)(struct bpf_map *map, void *value);
48 49

	/* funcs called by prog_array and perf_event_array map */
50 51 52
	void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
				int fd);
	void (*map_fd_put_ptr)(void *ptr);
53
	u32 (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
54
	u32 (*map_fd_sys_lookup_elem)(void *ptr);
55 56
	void (*map_seq_show_elem)(struct bpf_map *map, void *key,
				  struct seq_file *m);
57
	int (*map_check_btf)(const struct bpf_map *map,
58
			     const struct btf *btf,
59 60
			     const struct btf_type *key_type,
			     const struct btf_type *value_type);
61 62 63 64 65 66

	/* Direct value access helpers. */
	int (*map_direct_value_addr)(const struct bpf_map *map,
				     u64 *imm, u32 off);
	int (*map_direct_value_meta)(const struct bpf_map *map,
				     u64 imm, u32 *off);
67 68 69
};

struct bpf_map {
70
	/* The first two cachelines with read-mostly members of which some
71 72 73 74 75 76 77
	 * are also accessed in fast-path (e.g. ops, max_entries).
	 */
	const struct bpf_map_ops *ops ____cacheline_aligned;
	struct bpf_map *inner_map_meta;
#ifdef CONFIG_SECURITY
	void *security;
#endif
78 79 80 81
	enum bpf_map_type map_type;
	u32 key_size;
	u32 value_size;
	u32 max_entries;
82
	u32 map_flags;
83
	int spin_lock_off; /* >=0 valid offset, <0 error */
M
Martin KaFai Lau 已提交
84
	u32 id;
85
	int numa_node;
86 87
	u32 btf_key_type_id;
	u32 btf_value_type_id;
88
	struct btf *btf;
89
	u32 pages;
90
	bool unpriv_array;
91 92
	bool frozen; /* write-once */
	/* 48 bytes hole */
93

94
	/* The 3rd and 4th cacheline with misc members to avoid false sharing
95 96 97 98
	 * particularly with refcounting.
	 */
	struct user_struct *user ____cacheline_aligned;
	atomic_t refcnt;
99
	atomic_t usercnt;
100
	struct work_struct work;
101
	char name[BPF_OBJ_NAME_LEN];
102 103
};

104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130
static inline bool map_value_has_spin_lock(const struct bpf_map *map)
{
	return map->spin_lock_off >= 0;
}

static inline void check_and_init_map_lock(struct bpf_map *map, void *dst)
{
	if (likely(!map_value_has_spin_lock(map)))
		return;
	*(struct bpf_spin_lock *)(dst + map->spin_lock_off) =
		(struct bpf_spin_lock){};
}

/* copy everything but bpf_spin_lock */
static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
{
	if (unlikely(map_value_has_spin_lock(map))) {
		u32 off = map->spin_lock_off;

		memcpy(dst, src, off);
		memcpy(dst + off + sizeof(struct bpf_spin_lock),
		       src + off + sizeof(struct bpf_spin_lock),
		       map->value_size - off - sizeof(struct bpf_spin_lock));
	} else {
		memcpy(dst, src, map->value_size);
	}
}
131 132
void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
			   bool lock_src);
133

134
struct bpf_offload_dev;
135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159
struct bpf_offloaded_map;

struct bpf_map_dev_ops {
	int (*map_get_next_key)(struct bpf_offloaded_map *map,
				void *key, void *next_key);
	int (*map_lookup_elem)(struct bpf_offloaded_map *map,
			       void *key, void *value);
	int (*map_update_elem)(struct bpf_offloaded_map *map,
			       void *key, void *value, u64 flags);
	int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key);
};

struct bpf_offloaded_map {
	struct bpf_map map;
	struct net_device *netdev;
	const struct bpf_map_dev_ops *dev_ops;
	void *dev_priv;
	struct list_head offloads;
};

static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map)
{
	return container_of(map, struct bpf_offloaded_map, map);
}

160 161 162 163 164
static inline bool bpf_map_offload_neutral(const struct bpf_map *map)
{
	return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
}

165 166
static inline bool bpf_map_support_seq_show(const struct bpf_map *map)
{
167
	return map->btf && map->ops->map_seq_show_elem;
168 169
}

170
int map_check_no_btf(const struct bpf_map *map,
171
		     const struct btf *btf,
172 173 174
		     const struct btf_type *key_type,
		     const struct btf_type *value_type);

175 176
extern const struct bpf_map_ops bpf_map_offload_ops;

177 178
/* function argument constraints */
enum bpf_arg_type {
179
	ARG_DONTCARE = 0,	/* unused argument in helper function */
180 181 182 183 184 185 186

	/* the following constraints used to prototype
	 * bpf_map_lookup/update/delete_elem() functions
	 */
	ARG_CONST_MAP_PTR,	/* const argument used as pointer to bpf_map */
	ARG_PTR_TO_MAP_KEY,	/* pointer to stack used as map key */
	ARG_PTR_TO_MAP_VALUE,	/* pointer to stack used as map value */
187
	ARG_PTR_TO_UNINIT_MAP_VALUE,	/* pointer to valid memory used to store a map value */
188
	ARG_PTR_TO_MAP_VALUE_OR_NULL,	/* pointer to stack used as map value or NULL */
189 190 191 192

	/* the following constraints used to prototype bpf_memcmp() and other
	 * functions that access data on eBPF program stack
	 */
193
	ARG_PTR_TO_MEM,		/* pointer to valid memory (stack, packet, map value) */
194
	ARG_PTR_TO_MEM_OR_NULL, /* pointer to valid memory or NULL */
195 196 197
	ARG_PTR_TO_UNINIT_MEM,	/* pointer to memory does not need to be initialized,
				 * helper function must fill all bytes or clear
				 * them in error case.
198 199
				 */

200 201
	ARG_CONST_SIZE,		/* number of bytes accessed from memory */
	ARG_CONST_SIZE_OR_ZERO,	/* number of bytes accessed from memory or 0 */
202

203
	ARG_PTR_TO_CTX,		/* pointer to context */
204
	ARG_ANYTHING,		/* any (initialized) argument is ok */
205
	ARG_PTR_TO_SPIN_LOCK,	/* pointer to bpf_spin_lock */
206
	ARG_PTR_TO_SOCK_COMMON,	/* pointer to sock_common */
207 208
	ARG_PTR_TO_INT,		/* pointer to int */
	ARG_PTR_TO_LONG,	/* pointer to long */
209
	ARG_PTR_TO_SOCKET,	/* pointer to bpf_sock (fullsock) */
210 211 212 213 214 215
};

/* type of values returned from helper functions */
enum bpf_return_type {
	RET_INTEGER,			/* function returns integer */
	RET_VOID,			/* function doesn't return anything */
216
	RET_PTR_TO_MAP_VALUE,		/* returns a pointer to map elem value */
217
	RET_PTR_TO_MAP_VALUE_OR_NULL,	/* returns a pointer to map elem value or NULL */
218
	RET_PTR_TO_SOCKET_OR_NULL,	/* returns a pointer to a socket or NULL */
219
	RET_PTR_TO_TCP_SOCK_OR_NULL,	/* returns a pointer to a tcp_sock or NULL */
220
	RET_PTR_TO_SOCK_COMMON_OR_NULL,	/* returns a pointer to a sock_common or NULL */
221 222
};

223 224 225 226 227 228 229
/* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
 * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL
 * instructions after verifying
 */
struct bpf_func_proto {
	u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
	bool gpl_only;
230
	bool pkt_access;
231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247
	enum bpf_return_type ret_type;
	enum bpf_arg_type arg1_type;
	enum bpf_arg_type arg2_type;
	enum bpf_arg_type arg3_type;
	enum bpf_arg_type arg4_type;
	enum bpf_arg_type arg5_type;
};

/* bpf_context is intentionally undefined structure. Pointer to bpf_context is
 * the first argument to eBPF programs.
 * For socket filters: 'struct bpf_context *' == 'struct sk_buff *'
 */
struct bpf_context;

enum bpf_access_type {
	BPF_READ = 1,
	BPF_WRITE = 2
248 249
};

250
/* types of values stored in eBPF registers */
251 252 253 254 255 256 257 258 259
/* Pointer types represent:
 * pointer
 * pointer + imm
 * pointer + (u16) var
 * pointer + (u16) var + imm
 * if (range > 0) then [ptr, ptr + range - off) is safe to access
 * if (id > 0) means that some 'var' was added
 * if (off > 0) means that 'imm' was added
 */
260 261
enum bpf_reg_type {
	NOT_INIT = 0,		 /* nothing was written into register */
262
	SCALAR_VALUE,		 /* reg doesn't contain a valid pointer */
263 264 265 266
	PTR_TO_CTX,		 /* reg points to bpf_context */
	CONST_PTR_TO_MAP,	 /* reg points to struct bpf_map */
	PTR_TO_MAP_VALUE,	 /* reg points to map element value */
	PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */
267
	PTR_TO_STACK,		 /* reg == frame_pointer + offset */
268
	PTR_TO_PACKET_META,	 /* skb->data - meta_len */
269
	PTR_TO_PACKET,		 /* reg points to skb->data */
270
	PTR_TO_PACKET_END,	 /* skb->data + headlen */
271
	PTR_TO_FLOW_KEYS,	 /* reg points to bpf_flow_keys */
272 273
	PTR_TO_SOCKET,		 /* reg points to struct bpf_sock */
	PTR_TO_SOCKET_OR_NULL,	 /* reg points to struct bpf_sock or NULL */
274 275
	PTR_TO_SOCK_COMMON,	 /* reg points to sock_common */
	PTR_TO_SOCK_COMMON_OR_NULL, /* reg points to sock_common or NULL */
276 277
	PTR_TO_TCP_SOCK,	 /* reg points to struct tcp_sock */
	PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */
278
	PTR_TO_TP_BUFFER,	 /* reg points to a writable raw tp's buffer */
279 280
};

281 282 283 284 285 286 287 288
/* The information passed from prog-specific *_is_valid_access
 * back to the verifier.
 */
struct bpf_insn_access_aux {
	enum bpf_reg_type reg_type;
	int ctx_field_size;
};

289 290 291 292 293 294
static inline void
bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size)
{
	aux->ctx_field_size = size;
}

295 296 297 298 299
struct bpf_prog_ops {
	int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr,
			union bpf_attr __user *uattr);
};

300 301
struct bpf_verifier_ops {
	/* return eBPF function prototype for verification */
302 303 304
	const struct bpf_func_proto *
	(*get_func_proto)(enum bpf_func_id func_id,
			  const struct bpf_prog *prog);
305 306 307 308

	/* return true if 'size' wide access at offset 'off' within bpf_context
	 * with 'type' (read or write) is allowed
	 */
309
	bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
310
				const struct bpf_prog *prog,
311
				struct bpf_insn_access_aux *info);
312 313
	int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
			    const struct bpf_prog *prog);
314 315
	int (*gen_ld_abs)(const struct bpf_insn *orig,
			  struct bpf_insn *insn_buf);
316 317 318
	u32 (*convert_ctx_access)(enum bpf_access_type type,
				  const struct bpf_insn *src,
				  struct bpf_insn *dst,
319
				  struct bpf_prog *prog, u32 *target_size);
320 321
};

322
struct bpf_prog_offload_ops {
323
	/* verifier basic callbacks */
324 325
	int (*insn_hook)(struct bpf_verifier_env *env,
			 int insn_idx, int prev_insn_idx);
326
	int (*finalize)(struct bpf_verifier_env *env);
327 328 329 330 331
	/* verifier optimization callbacks (called after .finalize) */
	int (*replace_insn)(struct bpf_verifier_env *env, u32 off,
			    struct bpf_insn *insn);
	int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt);
	/* program management callbacks */
332 333
	int (*prepare)(struct bpf_prog *prog);
	int (*translate)(struct bpf_prog *prog);
334
	void (*destroy)(struct bpf_prog *prog);
335 336
};

337
struct bpf_prog_offload {
338 339
	struct bpf_prog		*prog;
	struct net_device	*netdev;
340
	struct bpf_offload_dev	*offdev;
341 342 343
	void			*dev_priv;
	struct list_head	offloads;
	bool			dev_state;
344
	bool			opt_failed;
345 346
	void			*jited_image;
	u32			jited_len;
347 348
};

349 350
enum bpf_cgroup_storage_type {
	BPF_CGROUP_STORAGE_SHARED,
351
	BPF_CGROUP_STORAGE_PERCPU,
352 353 354 355 356
	__BPF_CGROUP_STORAGE_MAX
};

#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX

A
Alexei Starovoitov 已提交
357 358 359 360 361 362
struct bpf_prog_stats {
	u64 cnt;
	u64 nsecs;
	struct u64_stats_sync syncp;
};

363 364
struct bpf_prog_aux {
	atomic_t refcnt;
365
	u32 used_map_cnt;
366
	u32 max_ctx_offset;
367
	u32 max_pkt_offset;
368
	u32 max_tp_access;
369
	u32 stack_depth;
M
Martin KaFai Lau 已提交
370
	u32 id;
371 372
	u32 func_cnt; /* used by non-func prog as the number of func progs */
	u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */
373
	bool verifier_zext; /* Zero extensions has been inserted by verifier. */
374
	bool offload_requested;
375 376
	struct bpf_prog **func;
	void *jit_data; /* JIT specific data. arch dependent */
377 378
	struct latch_tree_node ksym_tnode;
	struct list_head ksym_lnode;
379
	const struct bpf_prog_ops *ops;
380 381
	struct bpf_map **used_maps;
	struct bpf_prog *prog;
382
	struct user_struct *user;
383
	u64 load_time; /* ns since boottime */
384
	struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
385
	char name[BPF_OBJ_NAME_LEN];
386 387 388
#ifdef CONFIG_SECURITY
	void *security;
#endif
389
	struct bpf_prog_offload *offload;
Y
Yonghong Song 已提交
390
	struct btf *btf;
391
	struct bpf_func_info *func_info;
M
Martin KaFai Lau 已提交
392 393 394 395 396 397 398 399 400 401 402 403 404 405 406
	/* bpf_line_info loaded from userspace.  linfo->insn_off
	 * has the xlated insn offset.
	 * Both the main and sub prog share the same linfo.
	 * The subprog can access its first linfo by
	 * using the linfo_idx.
	 */
	struct bpf_line_info *linfo;
	/* jited_linfo is the jited addr of the linfo.  It has a
	 * one to one mapping to linfo:
	 * jited_linfo[i] is the jited addr for the linfo[i]->insn_off.
	 * Both the main and sub prog share the same jited_linfo.
	 * The subprog can access its first jited_linfo by
	 * using the linfo_idx.
	 */
	void **jited_linfo;
407
	u32 func_info_cnt;
M
Martin KaFai Lau 已提交
408 409 410 411 412 413
	u32 nr_linfo;
	/* subprog can use linfo_idx to access its first linfo and
	 * jited_linfo.
	 * main prog always has linfo_idx == 0
	 */
	u32 linfo_idx;
A
Alexei Starovoitov 已提交
414
	struct bpf_prog_stats __percpu *stats;
415 416 417 418
	union {
		struct work_struct work;
		struct rcu_head	rcu;
	};
419 420
};

421 422 423
struct bpf_array {
	struct bpf_map map;
	u32 elem_size;
424
	u32 index_mask;
425 426 427 428 429 430 431 432 433
	/* 'ownership' of prog_array is claimed by the first program that
	 * is going to use this map or by the first program which FD is stored
	 * in the map to make sure that all callers and callees have the same
	 * prog_type and JITed flag
	 */
	enum bpf_prog_type owner_prog_type;
	bool owner_jited;
	union {
		char value[0] __aligned(8);
434
		void *ptrs[0] __aligned(8);
435
		void __percpu *pptrs[0] __aligned(8);
436 437
	};
};
438

439
#define BPF_COMPLEXITY_LIMIT_INSNS      1000000 /* yes. 1M insns */
440 441
#define MAX_TAIL_CALL_CNT 32

442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470
#define BPF_F_ACCESS_MASK	(BPF_F_RDONLY |		\
				 BPF_F_RDONLY_PROG |	\
				 BPF_F_WRONLY |		\
				 BPF_F_WRONLY_PROG)

#define BPF_MAP_CAN_READ	BIT(0)
#define BPF_MAP_CAN_WRITE	BIT(1)

static inline u32 bpf_map_flags_to_cap(struct bpf_map *map)
{
	u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);

	/* Combination of BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG is
	 * not possible.
	 */
	if (access_flags & BPF_F_RDONLY_PROG)
		return BPF_MAP_CAN_READ;
	else if (access_flags & BPF_F_WRONLY_PROG)
		return BPF_MAP_CAN_WRITE;
	else
		return BPF_MAP_CAN_READ | BPF_MAP_CAN_WRITE;
}

static inline bool bpf_map_flags_access_ok(u32 access_flags)
{
	return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) !=
	       (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
}

471 472 473 474 475 476 477
struct bpf_event_entry {
	struct perf_event *event;
	struct file *perf_file;
	struct file *map_file;
	struct rcu_head rcu;
};

478
bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
479
int bpf_prog_calc_tag(struct bpf_prog *fp);
480

481
const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
482 483

typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src,
484
					unsigned long off, unsigned long len);
485 486 487 488 489
typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type,
					const struct bpf_insn *src,
					struct bpf_insn *dst,
					struct bpf_prog *prog,
					u32 *target_size);
490 491 492

u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
		     void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy);
493

494 495 496 497 498 499 500 501 502 503 504 505
/* an array of programs to be executed under rcu_lock.
 *
 * Typical usage:
 * ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, BPF_PROG_RUN);
 *
 * the structure returned by bpf_prog_array_alloc() should be populated
 * with program pointers and the last pointer must be NULL.
 * The user has to keep refcnt on the program and make sure the program
 * is removed from the array before bpf_prog_put().
 * The 'struct bpf_prog_array *' should only be replaced with xchg()
 * since other cpus are walking the array of pointers in parallel.
 */
506 507
struct bpf_prog_array_item {
	struct bpf_prog *prog;
508
	struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
509 510
};

511 512
struct bpf_prog_array {
	struct rcu_head rcu;
513
	struct bpf_prog_array_item items[0];
514 515
};

516
struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
517 518 519
void bpf_prog_array_free(struct bpf_prog_array *progs);
int bpf_prog_array_length(struct bpf_prog_array *progs);
int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs,
520
				__u32 __user *prog_ids, u32 cnt);
521

522
void bpf_prog_array_delete_safe(struct bpf_prog_array *progs,
523
				struct bpf_prog *old_prog);
524
int bpf_prog_array_copy_info(struct bpf_prog_array *array,
525 526
			     u32 *prog_ids, u32 request_cnt,
			     u32 *prog_cnt);
527
int bpf_prog_array_copy(struct bpf_prog_array *old_array,
528 529 530 531 532
			struct bpf_prog *exclude_prog,
			struct bpf_prog *include_prog,
			struct bpf_prog_array **new_array);

#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null)	\
533
	({						\
534 535
		struct bpf_prog_array_item *_item;	\
		struct bpf_prog *_prog;			\
536
		struct bpf_prog_array *_array;		\
537
		u32 _ret = 1;				\
538
		preempt_disable();			\
539
		rcu_read_lock();			\
540 541 542
		_array = rcu_dereference(array);	\
		if (unlikely(check_non_null && !_array))\
			goto _out;			\
543 544 545 546 547
		_item = &_array->items[0];		\
		while ((_prog = READ_ONCE(_item->prog))) {		\
			bpf_cgroup_storage_set(_item->cgroup_storage);	\
			_ret &= func(_prog, ctx);	\
			_item++;			\
548 549
		}					\
_out:							\
550
		rcu_read_unlock();			\
551
		preempt_enable();			\
552 553 554
		_ret;					\
	 })

555 556 557 558 559 560
#define BPF_PROG_RUN_ARRAY(array, ctx, func)		\
	__BPF_PROG_RUN_ARRAY(array, ctx, func, false)

#define BPF_PROG_RUN_ARRAY_CHECK(array, ctx, func)	\
	__BPF_PROG_RUN_ARRAY(array, ctx, func, true)

561
#ifdef CONFIG_BPF_SYSCALL
562 563
DECLARE_PER_CPU(int, bpf_prog_active);

564 565 566
extern const struct file_operations bpf_map_fops;
extern const struct file_operations bpf_prog_fops;

567 568 569
#define BPF_PROG_TYPE(_id, _name) \
	extern const struct bpf_prog_ops _name ## _prog_ops; \
	extern const struct bpf_verifier_ops _name ## _verifier_ops;
570 571
#define BPF_MAP_TYPE(_id, _ops) \
	extern const struct bpf_map_ops _ops;
572 573
#include <linux/bpf_types.h>
#undef BPF_PROG_TYPE
574
#undef BPF_MAP_TYPE
575

576
extern const struct bpf_prog_ops bpf_offload_prog_ops;
577 578 579
extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops;
extern const struct bpf_verifier_ops xdp_analyzer_ops;

580
struct bpf_prog *bpf_prog_get(u32 ufd);
581
struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
582
				       bool attach_drv);
583
struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, int i);
584
void bpf_prog_sub(struct bpf_prog *prog, int i);
585
struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog);
586
struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog);
587
void bpf_prog_put(struct bpf_prog *prog);
588 589
int __bpf_prog_charge(struct user_struct *user, u32 pages);
void __bpf_prog_uncharge(struct user_struct *user, u32 pages);
590

591
void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock);
592
void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock);
593

594
struct bpf_map *bpf_map_get_with_uref(u32 ufd);
595
struct bpf_map *__bpf_map_get(struct fd f);
596
struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref);
597
void bpf_map_put_with_uref(struct bpf_map *map);
598
void bpf_map_put(struct bpf_map *map);
599
int bpf_map_precharge_memlock(u32 pages);
600 601
int bpf_map_charge_memlock(struct bpf_map *map, u32 pages);
void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages);
602
void *bpf_map_area_alloc(size_t size, int numa_node);
603
void bpf_map_area_free(void *base);
604
void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
605

606
extern int sysctl_unprivileged_bpf_disabled;
A
Alexei Starovoitov 已提交
607
extern int sysctl_bpf_stats_enabled;
608

609
int bpf_map_new_fd(struct bpf_map *map, int flags);
610 611 612
int bpf_prog_new_fd(struct bpf_prog *prog);

int bpf_obj_pin_user(u32 ufd, const char __user *pathname);
613
int bpf_obj_get_user(const char __user *pathname, int flags);
614

615 616 617 618 619 620
int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
			   u64 flags);
int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
			    u64 flags);
621

622
int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
623

624 625
int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
				 void *key, void *value, u64 map_flags);
626
int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
M
Martin KaFai Lau 已提交
627 628
int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
				void *key, void *value, u64 map_flags);
629
int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
630

631
int bpf_get_file_flag(int flags);
632 633
int bpf_check_uarg_tail_zero(void __user *uaddr, size_t expected_size,
			     size_t actual_size);
634

635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650
/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
 * forced to use 'long' read/writes to try to atomically copy long counters.
 * Best-effort only.  No barriers here, since it _will_ race with concurrent
 * updates from BPF programs. Called from bpf syscall and mostly used with
 * size 8 or 16 bytes, so ask compiler to inline it.
 */
static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
{
	const long *lsrc = src;
	long *ldst = dst;

	size /= sizeof(long);
	while (size--)
		*ldst++ = *lsrc++;
}

651
/* verify correctness of eBPF program */
Y
Yonghong Song 已提交
652 653
int bpf_check(struct bpf_prog **fp, union bpf_attr *attr,
	      union bpf_attr __user *uattr);
654
void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
655 656

/* Map specifics */
657
struct xdp_buff;
658
struct sk_buff;
659 660

struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
661 662
void __dev_map_insert_ctx(struct bpf_map *map, u32 index);
void __dev_map_flush(struct bpf_map *map);
663 664
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
		    struct net_device *dev_rx);
665 666
int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
			     struct bpf_prog *xdp_prog);
667

668 669 670 671 672 673
struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key);
void __cpu_map_insert_ctx(struct bpf_map *map, u32 index);
void __cpu_map_flush(struct bpf_map *map);
int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
		    struct net_device *dev_rx);

674 675 676 677 678 679 680
/* Return map's numa specified by userspace */
static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
{
	return (attr->map_flags & BPF_F_NUMA_NODE) ?
		attr->numa_node : NUMA_NO_NODE;
}

681
struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type);
682
int array_map_alloc_check(union bpf_attr *attr);
683

684 685 686 687 688 689 690
int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
			  union bpf_attr __user *uattr);
int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
			  union bpf_attr __user *uattr);
int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
				     const union bpf_attr *kattr,
				     union bpf_attr __user *uattr);
691
#else /* !CONFIG_BPF_SYSCALL */
692 693 694 695 696
static inline struct bpf_prog *bpf_prog_get(u32 ufd)
{
	return ERR_PTR(-EOPNOTSUPP);
}

697 698
static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd,
						     enum bpf_prog_type type,
699
						     bool attach_drv)
700 701 702 703
{
	return ERR_PTR(-EOPNOTSUPP);
}

704 705
static inline struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog,
							  int i)
706 707 708
{
	return ERR_PTR(-EOPNOTSUPP);
}
709

710 711 712 713
static inline void bpf_prog_sub(struct bpf_prog *prog, int i)
{
}

714 715 716
static inline void bpf_prog_put(struct bpf_prog *prog)
{
}
717 718

static inline struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog)
719 720 721
{
	return ERR_PTR(-EOPNOTSUPP);
}
722

723 724 725 726 727 728
static inline struct bpf_prog *__must_check
bpf_prog_inc_not_zero(struct bpf_prog *prog)
{
	return ERR_PTR(-EOPNOTSUPP);
}

729 730 731 732 733 734 735 736
static inline int __bpf_prog_charge(struct user_struct *user, u32 pages)
{
	return 0;
}

static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
{
}
737

738
static inline int bpf_obj_get_user(const char __user *pathname, int flags)
739 740 741 742
{
	return -EOPNOTSUPP;
}

743 744 745 746 747 748 749 750 751 752 753 754 755
static inline struct net_device  *__dev_map_lookup_elem(struct bpf_map *map,
						       u32 key)
{
	return NULL;
}

static inline void __dev_map_insert_ctx(struct bpf_map *map, u32 index)
{
}

static inline void __dev_map_flush(struct bpf_map *map)
{
}
756

757 758 759 760
struct xdp_buff;
struct bpf_dtab_netdev;

static inline
761 762
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
		    struct net_device *dev_rx)
763 764 765 766
{
	return 0;
}

767 768 769 770 771 772 773 774 775
struct sk_buff;

static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
					   struct sk_buff *skb,
					   struct bpf_prog *xdp_prog)
{
	return 0;
}

776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795
static inline
struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
{
	return NULL;
}

static inline void __cpu_map_insert_ctx(struct bpf_map *map, u32 index)
{
}

static inline void __cpu_map_flush(struct bpf_map *map)
{
}

static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
				  struct xdp_buff *xdp,
				  struct net_device *dev_rx)
{
	return 0;
}
796 797 798 799 800 801

static inline struct bpf_prog *bpf_prog_get_type_path(const char *name,
				enum bpf_prog_type type)
{
	return ERR_PTR(-EOPNOTSUPP);
}
802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822

static inline int bpf_prog_test_run_xdp(struct bpf_prog *prog,
					const union bpf_attr *kattr,
					union bpf_attr __user *uattr)
{
	return -ENOTSUPP;
}

static inline int bpf_prog_test_run_skb(struct bpf_prog *prog,
					const union bpf_attr *kattr,
					union bpf_attr __user *uattr)
{
	return -ENOTSUPP;
}

static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
						   const union bpf_attr *kattr,
						   union bpf_attr __user *uattr)
{
	return -ENOTSUPP;
}
823
#endif /* CONFIG_BPF_SYSCALL */
824

825 826 827 828 829 830
static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
						 enum bpf_prog_type type)
{
	return bpf_prog_get_type_dev(ufd, type, false);
}

831 832
bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool);

833 834
int bpf_prog_offload_compile(struct bpf_prog *prog);
void bpf_prog_offload_destroy(struct bpf_prog *prog);
835 836
int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
			       struct bpf_prog *prog);
837

838 839
int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map);

840 841 842 843 844 845 846
int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value);
int bpf_map_offload_update_elem(struct bpf_map *map,
				void *key, void *value, u64 flags);
int bpf_map_offload_delete_elem(struct bpf_map *map, void *key);
int bpf_map_offload_get_next_key(struct bpf_map *map,
				 void *key, void *next_key);

847
bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map);
848

849
struct bpf_offload_dev *
850
bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv);
851
void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev);
852
void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev);
853 854 855 856
int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
				    struct net_device *netdev);
void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
				       struct net_device *netdev);
857
bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev);
858

859 860 861
#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);

862
static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux)
863
{
864
	return aux->offload_requested;
865
}
866 867 868 869 870 871 872 873

static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
{
	return unlikely(map->ops == &bpf_map_offload_ops);
}

struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr);
void bpf_map_offload_map_free(struct bpf_map *map);
874 875 876 877 878 879 880 881 882 883 884
#else
static inline int bpf_prog_offload_init(struct bpf_prog *prog,
					union bpf_attr *attr)
{
	return -EOPNOTSUPP;
}

static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
{
	return false;
}
885 886 887 888 889 890 891 892 893 894 895 896 897 898

static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
{
	return false;
}

static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
{
	return ERR_PTR(-EOPNOTSUPP);
}

static inline void bpf_map_offload_map_free(struct bpf_map *map)
{
}
899 900
#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */

901 902 903
#if defined(CONFIG_BPF_STREAM_PARSER)
int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, u32 which);
int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
904
#else
905 906
static inline int sock_map_prog_update(struct bpf_map *map,
				       struct bpf_prog *prog, u32 which)
907 908 909
{
	return -EOPNOTSUPP;
}
910

911 912
static inline int sock_map_get_from_fd(const union bpf_attr *attr,
				       struct bpf_prog *prog)
913 914 915
{
	return -EINVAL;
}
916 917
#endif

918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942
#if defined(CONFIG_XDP_SOCKETS)
struct xdp_sock;
struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, u32 key);
int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
		       struct xdp_sock *xs);
void __xsk_map_flush(struct bpf_map *map);
#else
struct xdp_sock;
static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
						     u32 key)
{
	return NULL;
}

static inline int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
				     struct xdp_sock *xs)
{
	return -EOPNOTSUPP;
}

static inline void __xsk_map_flush(struct bpf_map *map)
{
}
#endif

943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969
#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
void bpf_sk_reuseport_detach(struct sock *sk);
int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
				       void *value);
int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
				       void *value, u64 map_flags);
#else
static inline void bpf_sk_reuseport_detach(struct sock *sk)
{
}

#ifdef CONFIG_BPF_SYSCALL
static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map,
						     void *key, void *value)
{
	return -EOPNOTSUPP;
}

static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map,
						     void *key, void *value,
						     u64 map_flags)
{
	return -EOPNOTSUPP;
}
#endif /* CONFIG_BPF_SYSCALL */
#endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */

970
/* verifier prototypes for helper functions called from eBPF programs */
971 972 973
extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
extern const struct bpf_func_proto bpf_map_update_elem_proto;
extern const struct bpf_func_proto bpf_map_delete_elem_proto;
M
Mauricio Vasquez B 已提交
974 975 976
extern const struct bpf_func_proto bpf_map_push_elem_proto;
extern const struct bpf_func_proto bpf_map_pop_elem_proto;
extern const struct bpf_func_proto bpf_map_peek_elem_proto;
977

978
extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
979
extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
980
extern const struct bpf_func_proto bpf_get_numa_node_id_proto;
981
extern const struct bpf_func_proto bpf_tail_call_proto;
982
extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
983 984 985
extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
extern const struct bpf_func_proto bpf_get_current_comm_proto;
986
extern const struct bpf_func_proto bpf_get_stackid_proto;
Y
Yonghong Song 已提交
987
extern const struct bpf_func_proto bpf_get_stack_proto;
988
extern const struct bpf_func_proto bpf_sock_map_update_proto;
989
extern const struct bpf_func_proto bpf_sock_hash_update_proto;
990
extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
991 992 993 994
extern const struct bpf_func_proto bpf_msg_redirect_hash_proto;
extern const struct bpf_func_proto bpf_msg_redirect_map_proto;
extern const struct bpf_func_proto bpf_sk_redirect_hash_proto;
extern const struct bpf_func_proto bpf_sk_redirect_map_proto;
995 996
extern const struct bpf_func_proto bpf_spin_lock_proto;
extern const struct bpf_func_proto bpf_spin_unlock_proto;
997
extern const struct bpf_func_proto bpf_get_local_storage_proto;
998 999
extern const struct bpf_func_proto bpf_strtol_proto;
extern const struct bpf_func_proto bpf_strtoul_proto;
1000

1001 1002 1003 1004
/* Shared helpers among cBPF and eBPF. */
void bpf_user_rnd_init_once(void);
u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);

1005
#if defined(CONFIG_NET)
1006 1007 1008
bool bpf_sock_common_is_valid_access(int off, int size,
				     enum bpf_access_type type,
				     struct bpf_insn_access_aux *info);
1009 1010 1011 1012 1013 1014 1015 1016
bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
			      struct bpf_insn_access_aux *info);
u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
				const struct bpf_insn *si,
				struct bpf_insn *insn_buf,
				struct bpf_prog *prog,
				u32 *target_size);
#else
1017 1018 1019 1020 1021 1022
static inline bool bpf_sock_common_is_valid_access(int off, int size,
						   enum bpf_access_type type,
						   struct bpf_insn_access_aux *info)
{
	return false;
}
1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038
static inline bool bpf_sock_is_valid_access(int off, int size,
					    enum bpf_access_type type,
					    struct bpf_insn_access_aux *info)
{
	return false;
}
static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
					      const struct bpf_insn *si,
					      struct bpf_insn *insn_buf,
					      struct bpf_prog *prog,
					      u32 *target_size)
{
	return 0;
}
#endif

1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065
#ifdef CONFIG_INET
bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
				  struct bpf_insn_access_aux *info);

u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
				    const struct bpf_insn *si,
				    struct bpf_insn *insn_buf,
				    struct bpf_prog *prog,
				    u32 *target_size);
#else
static inline bool bpf_tcp_sock_is_valid_access(int off, int size,
						enum bpf_access_type type,
						struct bpf_insn_access_aux *info)
{
	return false;
}

static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
						  const struct bpf_insn *si,
						  struct bpf_insn *insn_buf,
						  struct bpf_prog *prog,
						  u32 *target_size)
{
	return 0;
}
#endif /* CONFIG_INET */

1066
#endif /* _LINUX_BPF_H */