bpf.h 49.7 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0-only */
2 3 4 5 6 7
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
 */
#ifndef _LINUX_BPF_H
#define _LINUX_BPF_H 1

#include <uapi/linux/bpf.h>
8

9
#include <linux/workqueue.h>
10
#include <linux/file.h>
11
#include <linux/percpu.h>
Z
Zi Shen Lim 已提交
12
#include <linux/err.h>
13
#include <linux/rbtree_latch.h>
14
#include <linux/numa.h>
15
#include <linux/mm_types.h>
16
#include <linux/wait.h>
A
Alexei Starovoitov 已提交
17
#include <linux/u64_stats_sync.h>
A
Alexei Starovoitov 已提交
18 19
#include <linux/refcount.h>
#include <linux/mutex.h>
20
#include <linux/module.h>
21

22
struct bpf_verifier_env;
23
struct bpf_verifier_log;
24
struct perf_event;
25
struct bpf_prog;
26
struct bpf_prog_aux;
27
struct bpf_map;
28
struct sock;
29
struct seq_file;
30
struct btf;
31
struct btf_type;
32
struct exception_table_entry;
33

34 35 36
extern struct idr btf_idr;
extern spinlock_t btf_idr_lock;

37 38 39
/* map is generic key/value storage optionally accesible by eBPF programs */
struct bpf_map_ops {
	/* funcs callable from userspace (via syscall) */
40
	int (*map_alloc_check)(union bpf_attr *attr);
41
	struct bpf_map *(*map_alloc)(union bpf_attr *attr);
42 43
	void (*map_release)(struct bpf_map *map, struct file *map_file);
	void (*map_free)(struct bpf_map *map);
44
	int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
45
	void (*map_release_uref)(struct bpf_map *map);
46
	void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key);
47 48
	int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr,
				union bpf_attr __user *uattr);
49 50 51
	int (*map_lookup_and_delete_batch)(struct bpf_map *map,
					   const union bpf_attr *attr,
					   union bpf_attr __user *uattr);
52 53 54 55
	int (*map_update_batch)(struct bpf_map *map, const union bpf_attr *attr,
				union bpf_attr __user *uattr);
	int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr,
				union bpf_attr __user *uattr);
56 57 58

	/* funcs callable from userspace and from eBPF programs */
	void *(*map_lookup_elem)(struct bpf_map *map, void *key);
59
	int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
60
	int (*map_delete_elem)(struct bpf_map *map, void *key);
M
Mauricio Vasquez B 已提交
61 62 63
	int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags);
	int (*map_pop_elem)(struct bpf_map *map, void *value);
	int (*map_peek_elem)(struct bpf_map *map, void *value);
64 65

	/* funcs called by prog_array and perf_event_array map */
66 67 68
	void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
				int fd);
	void (*map_fd_put_ptr)(void *ptr);
69
	u32 (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
70
	u32 (*map_fd_sys_lookup_elem)(void *ptr);
71 72
	void (*map_seq_show_elem)(struct bpf_map *map, void *key,
				  struct seq_file *m);
73
	int (*map_check_btf)(const struct bpf_map *map,
74
			     const struct btf *btf,
75 76
			     const struct btf_type *key_type,
			     const struct btf_type *value_type);
77

78 79 80 81 82 83
	/* Prog poke tracking helpers. */
	int (*map_poke_track)(struct bpf_map *map, struct bpf_prog_aux *aux);
	void (*map_poke_untrack)(struct bpf_map *map, struct bpf_prog_aux *aux);
	void (*map_poke_run)(struct bpf_map *map, u32 key, struct bpf_prog *old,
			     struct bpf_prog *new);

84 85 86 87 88
	/* Direct value access helpers. */
	int (*map_direct_value_addr)(const struct bpf_map *map,
				     u64 *imm, u32 off);
	int (*map_direct_value_meta)(const struct bpf_map *map,
				     u64 imm, u32 *off);
89
	int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma);
90 91
};

92 93 94 95 96
struct bpf_map_memory {
	u32 pages;
	struct user_struct *user;
};

97
struct bpf_map {
98
	/* The first two cachelines with read-mostly members of which some
99 100 101 102 103 104 105
	 * are also accessed in fast-path (e.g. ops, max_entries).
	 */
	const struct bpf_map_ops *ops ____cacheline_aligned;
	struct bpf_map *inner_map_meta;
#ifdef CONFIG_SECURITY
	void *security;
#endif
106 107 108 109
	enum bpf_map_type map_type;
	u32 key_size;
	u32 value_size;
	u32 max_entries;
110
	u32 map_flags;
111
	int spin_lock_off; /* >=0 valid offset, <0 error */
M
Martin KaFai Lau 已提交
112
	u32 id;
113
	int numa_node;
114 115
	u32 btf_key_type_id;
	u32 btf_value_type_id;
116
	struct btf *btf;
117
	struct bpf_map_memory memory;
118
	char name[BPF_OBJ_NAME_LEN];
119
	u32 btf_vmlinux_value_type_id;
120
	bool unpriv_array;
121 122
	bool frozen; /* write-once; write-protected by freeze_mutex */
	/* 22 bytes hole */
123

124
	/* The 3rd and 4th cacheline with misc members to avoid false sharing
125 126
	 * particularly with refcounting.
	 */
127 128
	atomic64_t refcnt ____cacheline_aligned;
	atomic64_t usercnt;
129
	struct work_struct work;
130 131
	struct mutex freeze_mutex;
	u64 writecnt; /* writable mmap cnt; protected by freeze_mutex */
132 133
};

134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160
static inline bool map_value_has_spin_lock(const struct bpf_map *map)
{
	return map->spin_lock_off >= 0;
}

static inline void check_and_init_map_lock(struct bpf_map *map, void *dst)
{
	if (likely(!map_value_has_spin_lock(map)))
		return;
	*(struct bpf_spin_lock *)(dst + map->spin_lock_off) =
		(struct bpf_spin_lock){};
}

/* copy everything but bpf_spin_lock */
static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
{
	if (unlikely(map_value_has_spin_lock(map))) {
		u32 off = map->spin_lock_off;

		memcpy(dst, src, off);
		memcpy(dst + off + sizeof(struct bpf_spin_lock),
		       src + off + sizeof(struct bpf_spin_lock),
		       map->value_size - off - sizeof(struct bpf_spin_lock));
	} else {
		memcpy(dst, src, map->value_size);
	}
}
161 162
void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
			   bool lock_src);
163

164
struct bpf_offload_dev;
165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189
struct bpf_offloaded_map;

struct bpf_map_dev_ops {
	int (*map_get_next_key)(struct bpf_offloaded_map *map,
				void *key, void *next_key);
	int (*map_lookup_elem)(struct bpf_offloaded_map *map,
			       void *key, void *value);
	int (*map_update_elem)(struct bpf_offloaded_map *map,
			       void *key, void *value, u64 flags);
	int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key);
};

struct bpf_offloaded_map {
	struct bpf_map map;
	struct net_device *netdev;
	const struct bpf_map_dev_ops *dev_ops;
	void *dev_priv;
	struct list_head offloads;
};

static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map)
{
	return container_of(map, struct bpf_offloaded_map, map);
}

190 191 192 193 194
static inline bool bpf_map_offload_neutral(const struct bpf_map *map)
{
	return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
}

195 196
static inline bool bpf_map_support_seq_show(const struct bpf_map *map)
{
197 198
	return (map->btf_value_type_id || map->btf_vmlinux_value_type_id) &&
		map->ops->map_seq_show_elem;
199 200
}

201
int map_check_no_btf(const struct bpf_map *map,
202
		     const struct btf *btf,
203 204 205
		     const struct btf_type *key_type,
		     const struct btf_type *value_type);

206 207
extern const struct bpf_map_ops bpf_map_offload_ops;

208 209
/* function argument constraints */
enum bpf_arg_type {
210
	ARG_DONTCARE = 0,	/* unused argument in helper function */
211 212 213 214 215 216 217

	/* the following constraints used to prototype
	 * bpf_map_lookup/update/delete_elem() functions
	 */
	ARG_CONST_MAP_PTR,	/* const argument used as pointer to bpf_map */
	ARG_PTR_TO_MAP_KEY,	/* pointer to stack used as map key */
	ARG_PTR_TO_MAP_VALUE,	/* pointer to stack used as map value */
218
	ARG_PTR_TO_UNINIT_MAP_VALUE,	/* pointer to valid memory used to store a map value */
219
	ARG_PTR_TO_MAP_VALUE_OR_NULL,	/* pointer to stack used as map value or NULL */
220 221 222 223

	/* the following constraints used to prototype bpf_memcmp() and other
	 * functions that access data on eBPF program stack
	 */
224
	ARG_PTR_TO_MEM,		/* pointer to valid memory (stack, packet, map value) */
225
	ARG_PTR_TO_MEM_OR_NULL, /* pointer to valid memory or NULL */
226 227 228
	ARG_PTR_TO_UNINIT_MEM,	/* pointer to memory does not need to be initialized,
				 * helper function must fill all bytes or clear
				 * them in error case.
229 230
				 */

231 232
	ARG_CONST_SIZE,		/* number of bytes accessed from memory */
	ARG_CONST_SIZE_OR_ZERO,	/* number of bytes accessed from memory or 0 */
233

234
	ARG_PTR_TO_CTX,		/* pointer to context */
235
	ARG_ANYTHING,		/* any (initialized) argument is ok */
236
	ARG_PTR_TO_SPIN_LOCK,	/* pointer to bpf_spin_lock */
237
	ARG_PTR_TO_SOCK_COMMON,	/* pointer to sock_common */
238 239
	ARG_PTR_TO_INT,		/* pointer to int */
	ARG_PTR_TO_LONG,	/* pointer to long */
240
	ARG_PTR_TO_SOCKET,	/* pointer to bpf_sock (fullsock) */
241
	ARG_PTR_TO_BTF_ID,	/* pointer to in-kernel struct */
242 243 244 245 246 247
};

/* type of values returned from helper functions */
enum bpf_return_type {
	RET_INTEGER,			/* function returns integer */
	RET_VOID,			/* function doesn't return anything */
248
	RET_PTR_TO_MAP_VALUE,		/* returns a pointer to map elem value */
249
	RET_PTR_TO_MAP_VALUE_OR_NULL,	/* returns a pointer to map elem value or NULL */
250
	RET_PTR_TO_SOCKET_OR_NULL,	/* returns a pointer to a socket or NULL */
251
	RET_PTR_TO_TCP_SOCK_OR_NULL,	/* returns a pointer to a tcp_sock or NULL */
252
	RET_PTR_TO_SOCK_COMMON_OR_NULL,	/* returns a pointer to a sock_common or NULL */
253 254
};

255 256 257 258 259 260 261
/* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
 * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL
 * instructions after verifying
 */
struct bpf_func_proto {
	u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
	bool gpl_only;
262
	bool pkt_access;
263
	enum bpf_return_type ret_type;
264 265 266 267 268 269 270 271 272 273
	union {
		struct {
			enum bpf_arg_type arg1_type;
			enum bpf_arg_type arg2_type;
			enum bpf_arg_type arg3_type;
			enum bpf_arg_type arg4_type;
			enum bpf_arg_type arg5_type;
		};
		enum bpf_arg_type arg_type[5];
	};
274
	int *btf_id; /* BTF ids of arguments */
275 276 277 278 279 280 281 282 283 284 285
};

/* bpf_context is intentionally undefined structure. Pointer to bpf_context is
 * the first argument to eBPF programs.
 * For socket filters: 'struct bpf_context *' == 'struct sk_buff *'
 */
struct bpf_context;

enum bpf_access_type {
	BPF_READ = 1,
	BPF_WRITE = 2
286 287
};

288
/* types of values stored in eBPF registers */
289 290 291 292 293 294 295 296 297
/* Pointer types represent:
 * pointer
 * pointer + imm
 * pointer + (u16) var
 * pointer + (u16) var + imm
 * if (range > 0) then [ptr, ptr + range - off) is safe to access
 * if (id > 0) means that some 'var' was added
 * if (off > 0) means that 'imm' was added
 */
298 299
enum bpf_reg_type {
	NOT_INIT = 0,		 /* nothing was written into register */
300
	SCALAR_VALUE,		 /* reg doesn't contain a valid pointer */
301 302 303 304
	PTR_TO_CTX,		 /* reg points to bpf_context */
	CONST_PTR_TO_MAP,	 /* reg points to struct bpf_map */
	PTR_TO_MAP_VALUE,	 /* reg points to map element value */
	PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */
305
	PTR_TO_STACK,		 /* reg == frame_pointer + offset */
306
	PTR_TO_PACKET_META,	 /* skb->data - meta_len */
307
	PTR_TO_PACKET,		 /* reg points to skb->data */
308
	PTR_TO_PACKET_END,	 /* skb->data + headlen */
309
	PTR_TO_FLOW_KEYS,	 /* reg points to bpf_flow_keys */
310 311
	PTR_TO_SOCKET,		 /* reg points to struct bpf_sock */
	PTR_TO_SOCKET_OR_NULL,	 /* reg points to struct bpf_sock or NULL */
312 313
	PTR_TO_SOCK_COMMON,	 /* reg points to sock_common */
	PTR_TO_SOCK_COMMON_OR_NULL, /* reg points to sock_common or NULL */
314 315
	PTR_TO_TCP_SOCK,	 /* reg points to struct tcp_sock */
	PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */
316
	PTR_TO_TP_BUFFER,	 /* reg points to a writable raw tp's buffer */
317
	PTR_TO_XDP_SOCK,	 /* reg points to struct xdp_sock */
318
	PTR_TO_BTF_ID,		 /* reg points to kernel struct */
319 320
};

321 322 323 324 325
/* The information passed from prog-specific *_is_valid_access
 * back to the verifier.
 */
struct bpf_insn_access_aux {
	enum bpf_reg_type reg_type;
326 327 328 329 330
	union {
		int ctx_field_size;
		u32 btf_id;
	};
	struct bpf_verifier_log *log; /* for verbose logs */
331 332
};

333 334 335 336 337 338
static inline void
bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size)
{
	aux->ctx_field_size = size;
}

339 340 341 342 343
struct bpf_prog_ops {
	int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr,
			union bpf_attr __user *uattr);
};

344 345
struct bpf_verifier_ops {
	/* return eBPF function prototype for verification */
346 347 348
	const struct bpf_func_proto *
	(*get_func_proto)(enum bpf_func_id func_id,
			  const struct bpf_prog *prog);
349 350 351 352

	/* return true if 'size' wide access at offset 'off' within bpf_context
	 * with 'type' (read or write) is allowed
	 */
353
	bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
354
				const struct bpf_prog *prog,
355
				struct bpf_insn_access_aux *info);
356 357
	int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
			    const struct bpf_prog *prog);
358 359
	int (*gen_ld_abs)(const struct bpf_insn *orig,
			  struct bpf_insn *insn_buf);
360 361 362
	u32 (*convert_ctx_access)(enum bpf_access_type type,
				  const struct bpf_insn *src,
				  struct bpf_insn *dst,
363
				  struct bpf_prog *prog, u32 *target_size);
364 365 366 367
	int (*btf_struct_access)(struct bpf_verifier_log *log,
				 const struct btf_type *t, int off, int size,
				 enum bpf_access_type atype,
				 u32 *next_btf_id);
368 369
};

370
struct bpf_prog_offload_ops {
371
	/* verifier basic callbacks */
372 373
	int (*insn_hook)(struct bpf_verifier_env *env,
			 int insn_idx, int prev_insn_idx);
374
	int (*finalize)(struct bpf_verifier_env *env);
375 376 377 378 379
	/* verifier optimization callbacks (called after .finalize) */
	int (*replace_insn)(struct bpf_verifier_env *env, u32 off,
			    struct bpf_insn *insn);
	int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt);
	/* program management callbacks */
380 381
	int (*prepare)(struct bpf_prog *prog);
	int (*translate)(struct bpf_prog *prog);
382
	void (*destroy)(struct bpf_prog *prog);
383 384
};

385
struct bpf_prog_offload {
386 387
	struct bpf_prog		*prog;
	struct net_device	*netdev;
388
	struct bpf_offload_dev	*offdev;
389 390 391
	void			*dev_priv;
	struct list_head	offloads;
	bool			dev_state;
392
	bool			opt_failed;
393 394
	void			*jited_image;
	u32			jited_len;
395 396
};

397 398
enum bpf_cgroup_storage_type {
	BPF_CGROUP_STORAGE_SHARED,
399
	BPF_CGROUP_STORAGE_PERCPU,
400 401 402 403 404
	__BPF_CGROUP_STORAGE_MAX
};

#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX

405 406 407 408 409
/* The longest tracepoint has 12 args.
 * See include/trace/bpf_probe.h
 */
#define MAX_BPF_FUNC_ARGS 12

A
Alexei Starovoitov 已提交
410 411 412 413
struct bpf_prog_stats {
	u64 cnt;
	u64 nsecs;
	struct u64_stats_sync syncp;
E
Eric Dumazet 已提交
414
} __aligned(2 * sizeof(u64));
A
Alexei Starovoitov 已提交
415

A
Alexei Starovoitov 已提交
416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435
struct btf_func_model {
	u8 ret_size;
	u8 nr_args;
	u8 arg_size[MAX_BPF_FUNC_ARGS];
};

/* Restore arguments before returning from trampoline to let original function
 * continue executing. This flag is used for fentry progs when there are no
 * fexit progs.
 */
#define BPF_TRAMP_F_RESTORE_REGS	BIT(0)
/* Call original function after fentry progs, but before fexit progs.
 * Makes sense for fentry/fexit, normal calls and indirect calls.
 */
#define BPF_TRAMP_F_CALL_ORIG		BIT(1)
/* Skip current frame and return to parent.  Makes sense for fentry/fexit
 * programs only. Should not be used with normal calls and indirect calls.
 */
#define BPF_TRAMP_F_SKIP_FRAME		BIT(2)

K
KP Singh 已提交
436 437 438 439 440 441 442 443 444 445
/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
 * bytes on x86.  Pick a number to fit into BPF_IMAGE_SIZE / 2
 */
#define BPF_MAX_TRAMP_PROGS 40

struct bpf_tramp_progs {
	struct bpf_prog *progs[BPF_MAX_TRAMP_PROGS];
	int nr_progs;
};

A
Alexei Starovoitov 已提交
446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465
/* Different use cases for BPF trampoline:
 * 1. replace nop at the function entry (kprobe equivalent)
 *    flags = BPF_TRAMP_F_RESTORE_REGS
 *    fentry = a set of programs to run before returning from trampoline
 *
 * 2. replace nop at the function entry (kprobe + kretprobe equivalent)
 *    flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME
 *    orig_call = fentry_ip + MCOUNT_INSN_SIZE
 *    fentry = a set of program to run before calling original function
 *    fexit = a set of program to run after original function
 *
 * 3. replace direct call instruction anywhere in the function body
 *    or assign a function pointer for indirect call (like tcp_congestion_ops->cong_avoid)
 *    With flags = 0
 *      fentry = a set of programs to run before returning from trampoline
 *    With flags = BPF_TRAMP_F_CALL_ORIG
 *      orig_call = original callback addr or direct function addr
 *      fentry = a set of program to run before calling original function
 *      fexit = a set of program to run after original function
 */
466 467
int arch_prepare_bpf_trampoline(void *image, void *image_end,
				const struct btf_func_model *m, u32 flags,
K
KP Singh 已提交
468
				struct bpf_tramp_progs *tprogs,
A
Alexei Starovoitov 已提交
469 470 471 472 473 474 475 476
				void *orig_call);
/* these two functions are called from generated trampoline */
u64 notrace __bpf_prog_enter(void);
void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start);

enum bpf_tramp_prog_type {
	BPF_TRAMP_FENTRY,
	BPF_TRAMP_FEXIT,
K
KP Singh 已提交
477
	BPF_TRAMP_MODIFY_RETURN,
478 479
	BPF_TRAMP_MAX,
	BPF_TRAMP_REPLACE, /* more than MAX */
A
Alexei Starovoitov 已提交
480 481 482 483 484 485 486 487 488 489 490 491
};

struct bpf_trampoline {
	/* hlist for trampoline_table */
	struct hlist_node hlist;
	/* serializes access to fields of this trampoline */
	struct mutex mutex;
	refcount_t refcnt;
	u64 key;
	struct {
		struct btf_func_model model;
		void *addr;
492
		bool ftrace_managed;
A
Alexei Starovoitov 已提交
493
	} func;
494 495 496 497 498
	/* if !NULL this is BPF_PROG_TYPE_EXT program that extends another BPF
	 * program by replacing one of its functions. func.addr is the address
	 * of the function it replaced.
	 */
	struct bpf_prog *extension_prog;
A
Alexei Starovoitov 已提交
499 500 501 502 503 504 505 506
	/* list of BPF programs using this trampoline */
	struct hlist_head progs_hlist[BPF_TRAMP_MAX];
	/* Number of attached programs. A counter per kind. */
	int progs_cnt[BPF_TRAMP_MAX];
	/* Executable image of trampoline */
	void *image;
	u64 selector;
};
B
Björn Töpel 已提交
507

508
#define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */
B
Björn Töpel 已提交
509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524

struct bpf_dispatcher_prog {
	struct bpf_prog *prog;
	refcount_t users;
};

struct bpf_dispatcher {
	/* dispatcher mutex */
	struct mutex mutex;
	void *func;
	struct bpf_dispatcher_prog progs[BPF_DISPATCHER_MAX];
	int num_progs;
	void *image;
	u32 image_off;
};

525 526 527 528 529 530 531 532
static __always_inline unsigned int bpf_dispatcher_nopfunc(
	const void *ctx,
	const struct bpf_insn *insnsi,
	unsigned int (*bpf_func)(const void *,
				 const struct bpf_insn *))
{
	return bpf_func(ctx, insnsi);
}
A
Alexei Starovoitov 已提交
533 534 535 536 537
#ifdef CONFIG_BPF_JIT
struct bpf_trampoline *bpf_trampoline_lookup(u64 key);
int bpf_trampoline_link_prog(struct bpf_prog *prog);
int bpf_trampoline_unlink_prog(struct bpf_prog *prog);
void bpf_trampoline_put(struct bpf_trampoline *tr);
B
Björn Töpel 已提交
538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568
#define BPF_DISPATCHER_INIT(name) {			\
	.mutex = __MUTEX_INITIALIZER(name.mutex),	\
	.func = &name##func,				\
	.progs = {},					\
	.num_progs = 0,					\
	.image = NULL,					\
	.image_off = 0					\
}

#define DEFINE_BPF_DISPATCHER(name)					\
	noinline unsigned int name##func(				\
		const void *ctx,					\
		const struct bpf_insn *insnsi,				\
		unsigned int (*bpf_func)(const void *,			\
					 const struct bpf_insn *))	\
	{								\
		return bpf_func(ctx, insnsi);				\
	}								\
	EXPORT_SYMBOL(name##func);			\
	struct bpf_dispatcher name = BPF_DISPATCHER_INIT(name);
#define DECLARE_BPF_DISPATCHER(name)					\
	unsigned int name##func(					\
		const void *ctx,					\
		const struct bpf_insn *insnsi,				\
		unsigned int (*bpf_func)(const void *,			\
					 const struct bpf_insn *));	\
	extern struct bpf_dispatcher name;
#define BPF_DISPATCHER_FUNC(name) name##func
#define BPF_DISPATCHER_PTR(name) (&name)
void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
				struct bpf_prog *to);
569 570 571 572 573 574 575
struct bpf_image {
	struct latch_tree_node tnode;
	unsigned char data[];
};
#define BPF_IMAGE_SIZE (PAGE_SIZE - sizeof(struct bpf_image))
bool is_bpf_image_address(unsigned long address);
void *bpf_image_alloc(void);
A
Alexei Starovoitov 已提交
576 577 578 579 580 581 582 583 584 585 586 587 588 589
#else
static inline struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
{
	return NULL;
}
static inline int bpf_trampoline_link_prog(struct bpf_prog *prog)
{
	return -ENOTSUPP;
}
static inline int bpf_trampoline_unlink_prog(struct bpf_prog *prog)
{
	return -ENOTSUPP;
}
static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {}
B
Björn Töpel 已提交
590 591 592 593 594 595 596
#define DEFINE_BPF_DISPATCHER(name)
#define DECLARE_BPF_DISPATCHER(name)
#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nopfunc
#define BPF_DISPATCHER_PTR(name) NULL
static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d,
					      struct bpf_prog *from,
					      struct bpf_prog *to) {}
597 598 599 600
static inline bool is_bpf_image_address(unsigned long address)
{
	return false;
}
A
Alexei Starovoitov 已提交
601 602
#endif

603
struct bpf_func_info_aux {
604
	u16 linkage;
605 606 607
	bool unreliable;
};

608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625
enum bpf_jit_poke_reason {
	BPF_POKE_REASON_TAIL_CALL,
};

/* Descriptor of pokes pointing /into/ the JITed image. */
struct bpf_jit_poke_descriptor {
	void *ip;
	union {
		struct {
			struct bpf_map *map;
			u32 key;
		} tail_call;
	};
	bool ip_stable;
	u8 adj_off;
	u16 reason;
};

626
struct bpf_prog_aux {
627
	atomic64_t refcnt;
628
	u32 used_map_cnt;
629
	u32 max_ctx_offset;
630
	u32 max_pkt_offset;
631
	u32 max_tp_access;
632
	u32 stack_depth;
M
Martin KaFai Lau 已提交
633
	u32 id;
634 635
	u32 func_cnt; /* used by non-func prog as the number of func progs */
	u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */
636
	u32 attach_btf_id; /* in-kernel BTF type id to attach to */
637
	struct bpf_prog *linked_prog;
638
	bool verifier_zext; /* Zero extensions has been inserted by verifier. */
639
	bool offload_requested;
640
	bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */
641
	bool func_proto_unreliable;
A
Alexei Starovoitov 已提交
642 643 644
	enum bpf_tramp_prog_type trampoline_prog_type;
	struct bpf_trampoline *trampoline;
	struct hlist_node tramp_hlist;
645 646 647 648
	/* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
	const struct btf_type *attach_func_proto;
	/* function name for valid attach_btf_id */
	const char *attach_func_name;
649 650
	struct bpf_prog **func;
	void *jit_data; /* JIT specific data. arch dependent */
651 652
	struct bpf_jit_poke_descriptor *poke_tab;
	u32 size_poke_tab;
653 654
	struct latch_tree_node ksym_tnode;
	struct list_head ksym_lnode;
655
	const struct bpf_prog_ops *ops;
656 657
	struct bpf_map **used_maps;
	struct bpf_prog *prog;
658
	struct user_struct *user;
659
	u64 load_time; /* ns since boottime */
660
	struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
661
	char name[BPF_OBJ_NAME_LEN];
662 663 664
#ifdef CONFIG_SECURITY
	void *security;
#endif
665
	struct bpf_prog_offload *offload;
Y
Yonghong Song 已提交
666
	struct btf *btf;
667
	struct bpf_func_info *func_info;
668
	struct bpf_func_info_aux *func_info_aux;
M
Martin KaFai Lau 已提交
669 670 671 672 673 674 675 676 677 678 679 680 681 682 683
	/* bpf_line_info loaded from userspace.  linfo->insn_off
	 * has the xlated insn offset.
	 * Both the main and sub prog share the same linfo.
	 * The subprog can access its first linfo by
	 * using the linfo_idx.
	 */
	struct bpf_line_info *linfo;
	/* jited_linfo is the jited addr of the linfo.  It has a
	 * one to one mapping to linfo:
	 * jited_linfo[i] is the jited addr for the linfo[i]->insn_off.
	 * Both the main and sub prog share the same jited_linfo.
	 * The subprog can access its first jited_linfo by
	 * using the linfo_idx.
	 */
	void **jited_linfo;
684
	u32 func_info_cnt;
M
Martin KaFai Lau 已提交
685 686 687 688 689 690
	u32 nr_linfo;
	/* subprog can use linfo_idx to access its first linfo and
	 * jited_linfo.
	 * main prog always has linfo_idx == 0
	 */
	u32 linfo_idx;
691 692
	u32 num_exentries;
	struct exception_table_entry *extable;
A
Alexei Starovoitov 已提交
693
	struct bpf_prog_stats __percpu *stats;
694 695 696 697
	union {
		struct work_struct work;
		struct rcu_head	rcu;
	};
698 699
};

700 701 702 703 704 705 706 707
struct bpf_array_aux {
	/* 'Ownership' of prog array is claimed by the first program that
	 * is going to use this map or by the first program which FD is
	 * stored in the map to make sure that all callers and callees have
	 * the same prog type and JITed flag.
	 */
	enum bpf_prog_type type;
	bool jited;
708 709 710 711 712
	/* Programs with direct jumps into programs part of this array. */
	struct list_head poke_progs;
	struct bpf_map *map;
	struct mutex poke_mutex;
	struct work_struct work;
713 714
};

715
struct bpf_struct_ops_value;
716 717 718 719 720 721 722 723 724
struct btf_type;
struct btf_member;

#define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64
struct bpf_struct_ops {
	const struct bpf_verifier_ops *verifier_ops;
	int (*init)(struct btf *btf);
	int (*check_member)(const struct btf_type *t,
			    const struct btf_member *member);
725 726 727 728 729
	int (*init_member)(const struct btf_type *t,
			   const struct btf_member *member,
			   void *kdata, const void *udata);
	int (*reg)(void *kdata);
	void (*unreg)(void *kdata);
730
	const struct btf_type *type;
731
	const struct btf_type *value_type;
732 733 734
	const char *name;
	struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS];
	u32 type_id;
735
	u32 value_id;
736 737 738
};

#if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)
739
#define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA))
740
const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id);
741
void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log);
742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759
bool bpf_struct_ops_get(const void *kdata);
void bpf_struct_ops_put(const void *kdata);
int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
				       void *value);
static inline bool bpf_try_module_get(const void *data, struct module *owner)
{
	if (owner == BPF_MODULE_OWNER)
		return bpf_struct_ops_get(data);
	else
		return try_module_get(owner);
}
static inline void bpf_module_put(const void *data, struct module *owner)
{
	if (owner == BPF_MODULE_OWNER)
		bpf_struct_ops_put(data);
	else
		module_put(owner);
}
760 761 762 763 764
#else
static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
{
	return NULL;
}
765 766 767 768
static inline void bpf_struct_ops_init(struct btf *btf,
				       struct bpf_verifier_log *log)
{
}
769 770 771 772 773 774 775 776 777 778 779 780 781 782
static inline bool bpf_try_module_get(const void *data, struct module *owner)
{
	return try_module_get(owner);
}
static inline void bpf_module_put(const void *data, struct module *owner)
{
	module_put(owner);
}
static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map,
						     void *key,
						     void *value)
{
	return -EINVAL;
}
783 784
#endif

785 786 787
struct bpf_array {
	struct bpf_map map;
	u32 elem_size;
788
	u32 index_mask;
789
	struct bpf_array_aux *aux;
790 791
	union {
		char value[0] __aligned(8);
792
		void *ptrs[0] __aligned(8);
793
		void __percpu *pptrs[0] __aligned(8);
794 795
	};
};
796

797
#define BPF_COMPLEXITY_LIMIT_INSNS      1000000 /* yes. 1M insns */
798 799
#define MAX_TAIL_CALL_CNT 32

800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828
#define BPF_F_ACCESS_MASK	(BPF_F_RDONLY |		\
				 BPF_F_RDONLY_PROG |	\
				 BPF_F_WRONLY |		\
				 BPF_F_WRONLY_PROG)

#define BPF_MAP_CAN_READ	BIT(0)
#define BPF_MAP_CAN_WRITE	BIT(1)

static inline u32 bpf_map_flags_to_cap(struct bpf_map *map)
{
	u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);

	/* Combination of BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG is
	 * not possible.
	 */
	if (access_flags & BPF_F_RDONLY_PROG)
		return BPF_MAP_CAN_READ;
	else if (access_flags & BPF_F_WRONLY_PROG)
		return BPF_MAP_CAN_WRITE;
	else
		return BPF_MAP_CAN_READ | BPF_MAP_CAN_WRITE;
}

static inline bool bpf_map_flags_access_ok(u32 access_flags)
{
	return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) !=
	       (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
}

829 830 831 832 833 834 835
struct bpf_event_entry {
	struct perf_event *event;
	struct file *perf_file;
	struct file *map_file;
	struct rcu_head rcu;
};

836
bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
837
int bpf_prog_calc_tag(struct bpf_prog *fp);
838
const char *kernel_type_name(u32 btf_type_id);
839

840
const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
841 842

typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src,
843
					unsigned long off, unsigned long len);
844 845 846 847 848
typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type,
					const struct bpf_insn *src,
					struct bpf_insn *dst,
					struct bpf_prog *prog,
					u32 *target_size);
849 850 851

u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
		     void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy);
852

853 854 855 856 857 858 859 860 861 862 863 864
/* an array of programs to be executed under rcu_lock.
 *
 * Typical usage:
 * ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, BPF_PROG_RUN);
 *
 * the structure returned by bpf_prog_array_alloc() should be populated
 * with program pointers and the last pointer must be NULL.
 * The user has to keep refcnt on the program and make sure the program
 * is removed from the array before bpf_prog_put().
 * The 'struct bpf_prog_array *' should only be replaced with xchg()
 * since other cpus are walking the array of pointers in parallel.
 */
865 866
struct bpf_prog_array_item {
	struct bpf_prog *prog;
867
	struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
868 869
};

870 871
struct bpf_prog_array {
	struct rcu_head rcu;
872
	struct bpf_prog_array_item items[];
873 874
};

875
struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
876 877
void bpf_prog_array_free(struct bpf_prog_array *progs);
int bpf_prog_array_length(struct bpf_prog_array *progs);
878
bool bpf_prog_array_is_empty(struct bpf_prog_array *array);
879
int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs,
880
				__u32 __user *prog_ids, u32 cnt);
881

882
void bpf_prog_array_delete_safe(struct bpf_prog_array *progs,
883
				struct bpf_prog *old_prog);
884
int bpf_prog_array_copy_info(struct bpf_prog_array *array,
885 886
			     u32 *prog_ids, u32 request_cnt,
			     u32 *prog_cnt);
887
int bpf_prog_array_copy(struct bpf_prog_array *old_array,
888 889 890 891 892
			struct bpf_prog *exclude_prog,
			struct bpf_prog *include_prog,
			struct bpf_prog_array **new_array);

#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null)	\
893
	({						\
894 895
		struct bpf_prog_array_item *_item;	\
		struct bpf_prog *_prog;			\
896
		struct bpf_prog_array *_array;		\
897
		u32 _ret = 1;				\
898
		migrate_disable();			\
899
		rcu_read_lock();			\
900 901 902
		_array = rcu_dereference(array);	\
		if (unlikely(check_non_null && !_array))\
			goto _out;			\
903 904 905 906 907
		_item = &_array->items[0];		\
		while ((_prog = READ_ONCE(_item->prog))) {		\
			bpf_cgroup_storage_set(_item->cgroup_storage);	\
			_ret &= func(_prog, ctx);	\
			_item++;			\
908 909
		}					\
_out:							\
910
		rcu_read_unlock();			\
911
		migrate_enable();			\
912 913 914
		_ret;					\
	 })

915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944
/* To be used by __cgroup_bpf_run_filter_skb for EGRESS BPF progs
 * so BPF programs can request cwr for TCP packets.
 *
 * Current cgroup skb programs can only return 0 or 1 (0 to drop the
 * packet. This macro changes the behavior so the low order bit
 * indicates whether the packet should be dropped (0) or not (1)
 * and the next bit is a congestion notification bit. This could be
 * used by TCP to call tcp_enter_cwr()
 *
 * Hence, new allowed return values of CGROUP EGRESS BPF programs are:
 *   0: drop packet
 *   1: keep packet
 *   2: drop packet and cn
 *   3: keep packet and cn
 *
 * This macro then converts it to one of the NET_XMIT or an error
 * code that is then interpreted as drop packet (and no cn):
 *   0: NET_XMIT_SUCCESS  skb should be transmitted
 *   1: NET_XMIT_DROP     skb should be dropped and cn
 *   2: NET_XMIT_CN       skb should be transmitted and cn
 *   3: -EPERM            skb should be dropped
 */
#define BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(array, ctx, func)		\
	({						\
		struct bpf_prog_array_item *_item;	\
		struct bpf_prog *_prog;			\
		struct bpf_prog_array *_array;		\
		u32 ret;				\
		u32 _ret = 1;				\
		u32 _cn = 0;				\
945
		migrate_disable();			\
946 947 948 949 950 951 952 953 954 955 956
		rcu_read_lock();			\
		_array = rcu_dereference(array);	\
		_item = &_array->items[0];		\
		while ((_prog = READ_ONCE(_item->prog))) {		\
			bpf_cgroup_storage_set(_item->cgroup_storage);	\
			ret = func(_prog, ctx);		\
			_ret &= (ret & 1);		\
			_cn |= (ret & 2);		\
			_item++;			\
		}					\
		rcu_read_unlock();			\
957
		migrate_enable();			\
958 959 960 961 962 963 964
		if (_ret)				\
			_ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS);	\
		else					\
			_ret = (_cn ? NET_XMIT_DROP : -EPERM);		\
		_ret;					\
	})

965 966 967 968 969 970
#define BPF_PROG_RUN_ARRAY(array, ctx, func)		\
	__BPF_PROG_RUN_ARRAY(array, ctx, func, false)

#define BPF_PROG_RUN_ARRAY_CHECK(array, ctx, func)	\
	__BPF_PROG_RUN_ARRAY(array, ctx, func, true)

971
#ifdef CONFIG_BPF_SYSCALL
972 973
DECLARE_PER_CPU(int, bpf_prog_active);

974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003
/*
 * Block execution of BPF programs attached to instrumentation (perf,
 * kprobes, tracepoints) to prevent deadlocks on map operations as any of
 * these events can happen inside a region which holds a map bucket lock
 * and can deadlock on it.
 *
 * Use the preemption safe inc/dec variants on RT because migrate disable
 * is preemptible on RT and preemption in the middle of the RMW operation
 * might lead to inconsistent state. Use the raw variants for non RT
 * kernels as migrate_disable() maps to preempt_disable() so the slightly
 * more expensive save operation can be avoided.
 */
static inline void bpf_disable_instrumentation(void)
{
	migrate_disable();
	if (IS_ENABLED(CONFIG_PREEMPT_RT))
		this_cpu_inc(bpf_prog_active);
	else
		__this_cpu_inc(bpf_prog_active);
}

static inline void bpf_enable_instrumentation(void)
{
	if (IS_ENABLED(CONFIG_PREEMPT_RT))
		this_cpu_dec(bpf_prog_active);
	else
		__this_cpu_dec(bpf_prog_active);
	migrate_enable();
}

1004 1005 1006
extern const struct file_operations bpf_map_fops;
extern const struct file_operations bpf_prog_fops;

A
Alexei Starovoitov 已提交
1007
#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
1008 1009
	extern const struct bpf_prog_ops _name ## _prog_ops; \
	extern const struct bpf_verifier_ops _name ## _verifier_ops;
1010 1011
#define BPF_MAP_TYPE(_id, _ops) \
	extern const struct bpf_map_ops _ops;
1012 1013
#include <linux/bpf_types.h>
#undef BPF_PROG_TYPE
1014
#undef BPF_MAP_TYPE
1015

1016
extern const struct bpf_prog_ops bpf_offload_prog_ops;
1017 1018 1019
extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops;
extern const struct bpf_verifier_ops xdp_analyzer_ops;

1020
struct bpf_prog *bpf_prog_get(u32 ufd);
1021
struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
1022
				       bool attach_drv);
1023
void bpf_prog_add(struct bpf_prog *prog, int i);
1024
void bpf_prog_sub(struct bpf_prog *prog, int i);
1025
void bpf_prog_inc(struct bpf_prog *prog);
1026
struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog);
1027
void bpf_prog_put(struct bpf_prog *prog);
1028 1029
int __bpf_prog_charge(struct user_struct *user, u32 pages);
void __bpf_prog_uncharge(struct user_struct *user, u32 pages);
1030 1031
void __bpf_free_used_maps(struct bpf_prog_aux *aux,
			  struct bpf_map **used_maps, u32 len);
1032

1033
void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock);
1034
void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock);
1035

1036
struct bpf_map *bpf_map_get(u32 ufd);
1037
struct bpf_map *bpf_map_get_with_uref(u32 ufd);
1038
struct bpf_map *__bpf_map_get(struct fd f);
1039 1040 1041
void bpf_map_inc(struct bpf_map *map);
void bpf_map_inc_with_uref(struct bpf_map *map);
struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map);
1042
void bpf_map_put_with_uref(struct bpf_map *map);
1043
void bpf_map_put(struct bpf_map *map);
1044 1045
int bpf_map_charge_memlock(struct bpf_map *map, u32 pages);
void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages);
1046
int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size);
1047 1048 1049
void bpf_map_charge_finish(struct bpf_map_memory *mem);
void bpf_map_charge_move(struct bpf_map_memory *dst,
			 struct bpf_map_memory *src);
1050 1051
void *bpf_map_area_alloc(u64 size, int numa_node);
void *bpf_map_area_mmapable_alloc(u64 size, int numa_node);
1052
void bpf_map_area_free(void *base);
1053
void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
1054 1055
int  generic_map_lookup_batch(struct bpf_map *map,
			      const union bpf_attr *attr,
1056 1057 1058 1059 1060 1061
			      union bpf_attr __user *uattr);
int  generic_map_update_batch(struct bpf_map *map,
			      const union bpf_attr *attr,
			      union bpf_attr __user *uattr);
int  generic_map_delete_batch(struct bpf_map *map,
			      const union bpf_attr *attr,
1062
			      union bpf_attr __user *uattr);
1063

1064 1065
extern int sysctl_unprivileged_bpf_disabled;

1066
int bpf_map_new_fd(struct bpf_map *map, int flags);
1067 1068
int bpf_prog_new_fd(struct bpf_prog *prog);

1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081
struct bpf_link;

struct bpf_link_ops {
	void (*release)(struct bpf_link *link);
};

void bpf_link_init(struct bpf_link *link, const struct bpf_link_ops *ops,
		   struct bpf_prog *prog);
void bpf_link_inc(struct bpf_link *link);
void bpf_link_put(struct bpf_link *link);
int bpf_link_new_fd(struct bpf_link *link);
struct bpf_link *bpf_link_get_from_fd(u32 ufd);

1082
int bpf_obj_pin_user(u32 ufd, const char __user *pathname);
1083
int bpf_obj_get_user(const char __user *pathname, int flags);
1084

1085 1086 1087 1088 1089 1090
int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
			   u64 flags);
int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
			    u64 flags);
1091

1092
int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
1093

1094 1095
int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
				 void *key, void *value, u64 map_flags);
1096
int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
M
Martin KaFai Lau 已提交
1097 1098
int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
				void *key, void *value, u64 map_flags);
1099
int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
1100

1101
int bpf_get_file_flag(int flags);
1102 1103
int bpf_check_uarg_tail_zero(void __user *uaddr, size_t expected_size,
			     size_t actual_size);
1104

1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120
/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
 * forced to use 'long' read/writes to try to atomically copy long counters.
 * Best-effort only.  No barriers here, since it _will_ race with concurrent
 * updates from BPF programs. Called from bpf syscall and mostly used with
 * size 8 or 16 bytes, so ask compiler to inline it.
 */
static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
{
	const long *lsrc = src;
	long *ldst = dst;

	size /= sizeof(long);
	while (size--)
		*ldst++ = *lsrc++;
}

1121
/* verify correctness of eBPF program */
Y
Yonghong Song 已提交
1122 1123
int bpf_check(struct bpf_prog **fp, union bpf_attr *attr,
	      union bpf_attr __user *uattr);
1124
void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
1125 1126

/* Map specifics */
1127
struct xdp_buff;
1128
struct sk_buff;
1129 1130

struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
1131
struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key);
1132 1133 1134
void __dev_flush(void);
int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
		    struct net_device *dev_rx);
1135 1136
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
		    struct net_device *dev_rx);
1137 1138
int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
			     struct bpf_prog *xdp_prog);
1139

1140
struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key);
1141
void __cpu_map_flush(void);
1142 1143 1144
int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
		    struct net_device *dev_rx);

1145 1146 1147 1148 1149 1150 1151
/* Return map's numa specified by userspace */
static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
{
	return (attr->map_flags & BPF_F_NUMA_NODE) ?
		attr->numa_node : NUMA_NO_NODE;
}

1152
struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type);
1153
int array_map_alloc_check(union bpf_attr *attr);
1154

1155 1156 1157 1158
int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
			  union bpf_attr __user *uattr);
int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
			  union bpf_attr __user *uattr);
1159 1160 1161
int bpf_prog_test_run_tracing(struct bpf_prog *prog,
			      const union bpf_attr *kattr,
			      union bpf_attr __user *uattr);
1162 1163 1164
int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
				     const union bpf_attr *kattr,
				     union bpf_attr __user *uattr);
1165 1166 1167 1168 1169 1170 1171
bool btf_ctx_access(int off, int size, enum bpf_access_type type,
		    const struct bpf_prog *prog,
		    struct bpf_insn_access_aux *info);
int btf_struct_access(struct bpf_verifier_log *log,
		      const struct btf_type *t, int off, int size,
		      enum bpf_access_type atype,
		      u32 *next_btf_id);
1172 1173
int btf_resolve_helper_id(struct bpf_verifier_log *log,
			  const struct bpf_func_proto *fn, int);
1174

A
Alexei Starovoitov 已提交
1175 1176 1177 1178 1179 1180
int btf_distill_func_proto(struct bpf_verifier_log *log,
			   struct btf *btf,
			   const struct btf_type *func_proto,
			   const char *func_name,
			   struct btf_func_model *m);

1181 1182 1183 1184 1185
struct bpf_reg_state;
int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog,
			     struct bpf_reg_state *regs);
int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
			  struct bpf_reg_state *reg);
1186 1187
int btf_check_type_match(struct bpf_verifier_env *env, struct bpf_prog *prog,
			 struct btf *btf, const struct btf_type *t);
1188

1189 1190
struct bpf_prog *bpf_prog_by_id(u32 id);

1191
#else /* !CONFIG_BPF_SYSCALL */
1192 1193 1194 1195 1196
static inline struct bpf_prog *bpf_prog_get(u32 ufd)
{
	return ERR_PTR(-EOPNOTSUPP);
}

1197 1198
static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd,
						     enum bpf_prog_type type,
1199
						     bool attach_drv)
1200 1201 1202 1203
{
	return ERR_PTR(-EOPNOTSUPP);
}

1204
static inline void bpf_prog_add(struct bpf_prog *prog, int i)
1205 1206
{
}
1207

1208 1209 1210 1211
static inline void bpf_prog_sub(struct bpf_prog *prog, int i)
{
}

1212 1213 1214
static inline void bpf_prog_put(struct bpf_prog *prog)
{
}
1215

1216
static inline void bpf_prog_inc(struct bpf_prog *prog)
1217 1218
{
}
1219

1220 1221 1222 1223 1224 1225
static inline struct bpf_prog *__must_check
bpf_prog_inc_not_zero(struct bpf_prog *prog)
{
	return ERR_PTR(-EOPNOTSUPP);
}

1226 1227 1228 1229 1230 1231 1232 1233
static inline int __bpf_prog_charge(struct user_struct *user, u32 pages)
{
	return 0;
}

static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
{
}
1234

1235
static inline int bpf_obj_get_user(const char __user *pathname, int flags)
1236 1237 1238 1239
{
	return -EOPNOTSUPP;
}

1240 1241 1242 1243 1244 1245
static inline struct net_device  *__dev_map_lookup_elem(struct bpf_map *map,
						       u32 key)
{
	return NULL;
}

1246 1247 1248 1249 1250 1251
static inline struct net_device  *__dev_map_hash_lookup_elem(struct bpf_map *map,
							     u32 key)
{
	return NULL;
}

1252
static inline void __dev_flush(void)
1253 1254
{
}
1255

1256 1257 1258
struct xdp_buff;
struct bpf_dtab_netdev;

1259 1260 1261 1262 1263 1264 1265
static inline
int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
		    struct net_device *dev_rx)
{
	return 0;
}

1266
static inline
1267 1268
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
		    struct net_device *dev_rx)
1269 1270 1271 1272
{
	return 0;
}

1273 1274 1275 1276 1277 1278 1279 1280 1281
struct sk_buff;

static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
					   struct sk_buff *skb,
					   struct bpf_prog *xdp_prog)
{
	return 0;
}

1282 1283 1284 1285 1286 1287
static inline
struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
{
	return NULL;
}

1288
static inline void __cpu_map_flush(void)
1289 1290 1291 1292 1293 1294 1295 1296 1297
{
}

static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
				  struct xdp_buff *xdp,
				  struct net_device *dev_rx)
{
	return 0;
}
1298 1299 1300 1301 1302 1303

static inline struct bpf_prog *bpf_prog_get_type_path(const char *name,
				enum bpf_prog_type type)
{
	return ERR_PTR(-EOPNOTSUPP);
}
1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318

static inline int bpf_prog_test_run_xdp(struct bpf_prog *prog,
					const union bpf_attr *kattr,
					union bpf_attr __user *uattr)
{
	return -ENOTSUPP;
}

static inline int bpf_prog_test_run_skb(struct bpf_prog *prog,
					const union bpf_attr *kattr,
					union bpf_attr __user *uattr)
{
	return -ENOTSUPP;
}

1319 1320 1321 1322 1323 1324 1325
static inline int bpf_prog_test_run_tracing(struct bpf_prog *prog,
					    const union bpf_attr *kattr,
					    union bpf_attr __user *uattr)
{
	return -ENOTSUPP;
}

1326 1327 1328 1329 1330 1331
static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
						   const union bpf_attr *kattr,
						   union bpf_attr __user *uattr)
{
	return -ENOTSUPP;
}
1332 1333 1334 1335

static inline void bpf_map_put(struct bpf_map *map)
{
}
1336 1337 1338 1339 1340

static inline struct bpf_prog *bpf_prog_by_id(u32 id)
{
	return ERR_PTR(-ENOTSUPP);
}
1341
#endif /* CONFIG_BPF_SYSCALL */
1342

1343 1344 1345 1346 1347 1348
static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
						 enum bpf_prog_type type)
{
	return bpf_prog_get_type_dev(ufd, type, false);
}

1349 1350
bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool);

1351 1352
int bpf_prog_offload_compile(struct bpf_prog *prog);
void bpf_prog_offload_destroy(struct bpf_prog *prog);
1353 1354
int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
			       struct bpf_prog *prog);
1355

1356 1357
int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map);

1358 1359 1360 1361 1362 1363 1364
int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value);
int bpf_map_offload_update_elem(struct bpf_map *map,
				void *key, void *value, u64 flags);
int bpf_map_offload_delete_elem(struct bpf_map *map, void *key);
int bpf_map_offload_get_next_key(struct bpf_map *map,
				 void *key, void *next_key);

1365
bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map);
1366

1367
struct bpf_offload_dev *
1368
bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv);
1369
void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev);
1370
void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev);
1371 1372 1373 1374
int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
				    struct net_device *netdev);
void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
				       struct net_device *netdev);
1375
bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev);
1376

1377 1378 1379
#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);

1380
static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux)
1381
{
1382
	return aux->offload_requested;
1383
}
1384 1385 1386 1387 1388 1389 1390 1391

static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
{
	return unlikely(map->ops == &bpf_map_offload_ops);
}

struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr);
void bpf_map_offload_map_free(struct bpf_map *map);
1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402
#else
static inline int bpf_prog_offload_init(struct bpf_prog *prog,
					union bpf_attr *attr)
{
	return -EOPNOTSUPP;
}

static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
{
	return false;
}
1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416

static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
{
	return false;
}

static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
{
	return ERR_PTR(-EOPNOTSUPP);
}

static inline void bpf_map_offload_map_free(struct bpf_map *map)
{
}
1417 1418
#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */

1419 1420 1421
#if defined(CONFIG_BPF_STREAM_PARSER)
int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, u32 which);
int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
1422
#else
1423 1424
static inline int sock_map_prog_update(struct bpf_map *map,
				       struct bpf_prog *prog, u32 which)
1425 1426 1427
{
	return -EOPNOTSUPP;
}
1428

1429 1430
static inline int sock_map_get_from_fd(const union bpf_attr *attr,
				       struct bpf_prog *prog)
1431 1432 1433
{
	return -EINVAL;
}
1434 1435
#endif

1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462
#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
void bpf_sk_reuseport_detach(struct sock *sk);
int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
				       void *value);
int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
				       void *value, u64 map_flags);
#else
static inline void bpf_sk_reuseport_detach(struct sock *sk)
{
}

#ifdef CONFIG_BPF_SYSCALL
static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map,
						     void *key, void *value)
{
	return -EOPNOTSUPP;
}

static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map,
						     void *key, void *value,
						     u64 map_flags)
{
	return -EOPNOTSUPP;
}
#endif /* CONFIG_BPF_SYSCALL */
#endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */

1463
/* verifier prototypes for helper functions called from eBPF programs */
1464 1465 1466
extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
extern const struct bpf_func_proto bpf_map_update_elem_proto;
extern const struct bpf_func_proto bpf_map_delete_elem_proto;
M
Mauricio Vasquez B 已提交
1467 1468 1469
extern const struct bpf_func_proto bpf_map_push_elem_proto;
extern const struct bpf_func_proto bpf_map_pop_elem_proto;
extern const struct bpf_func_proto bpf_map_peek_elem_proto;
1470

1471
extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
1472
extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
1473
extern const struct bpf_func_proto bpf_get_numa_node_id_proto;
1474
extern const struct bpf_func_proto bpf_tail_call_proto;
1475
extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
1476 1477 1478
extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
extern const struct bpf_func_proto bpf_get_current_comm_proto;
1479
extern const struct bpf_func_proto bpf_get_stackid_proto;
Y
Yonghong Song 已提交
1480
extern const struct bpf_func_proto bpf_get_stack_proto;
1481
extern const struct bpf_func_proto bpf_sock_map_update_proto;
1482
extern const struct bpf_func_proto bpf_sock_hash_update_proto;
1483
extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
1484 1485 1486 1487
extern const struct bpf_func_proto bpf_msg_redirect_hash_proto;
extern const struct bpf_func_proto bpf_msg_redirect_map_proto;
extern const struct bpf_func_proto bpf_sk_redirect_hash_proto;
extern const struct bpf_func_proto bpf_sk_redirect_map_proto;
1488 1489
extern const struct bpf_func_proto bpf_spin_lock_proto;
extern const struct bpf_func_proto bpf_spin_unlock_proto;
1490
extern const struct bpf_func_proto bpf_get_local_storage_proto;
1491 1492
extern const struct bpf_func_proto bpf_strtol_proto;
extern const struct bpf_func_proto bpf_strtoul_proto;
1493
extern const struct bpf_func_proto bpf_tcp_sock_proto;
M
Martin KaFai Lau 已提交
1494
extern const struct bpf_func_proto bpf_jiffies64_proto;
1495

1496 1497 1498 1499
/* Shared helpers among cBPF and eBPF. */
void bpf_user_rnd_init_once(void);
u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);

1500
#if defined(CONFIG_NET)
1501 1502 1503
bool bpf_sock_common_is_valid_access(int off, int size,
				     enum bpf_access_type type,
				     struct bpf_insn_access_aux *info);
1504 1505 1506 1507 1508 1509 1510 1511
bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
			      struct bpf_insn_access_aux *info);
u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
				const struct bpf_insn *si,
				struct bpf_insn *insn_buf,
				struct bpf_prog *prog,
				u32 *target_size);
#else
1512 1513 1514 1515 1516 1517
static inline bool bpf_sock_common_is_valid_access(int off, int size,
						   enum bpf_access_type type,
						   struct bpf_insn_access_aux *info)
{
	return false;
}
1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533
static inline bool bpf_sock_is_valid_access(int off, int size,
					    enum bpf_access_type type,
					    struct bpf_insn_access_aux *info)
{
	return false;
}
static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
					      const struct bpf_insn *si,
					      struct bpf_insn *insn_buf,
					      struct bpf_prog *prog,
					      u32 *target_size)
{
	return 0;
}
#endif

1534
#ifdef CONFIG_INET
A
Alexei Starovoitov 已提交
1535 1536 1537 1538 1539 1540 1541 1542 1543
struct sk_reuseport_kern {
	struct sk_buff *skb;
	struct sock *sk;
	struct sock *selected_sk;
	void *data_end;
	u32 hash;
	u32 reuseport_id;
	bool bind_inany;
};
1544 1545 1546 1547 1548 1549 1550 1551
bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
				  struct bpf_insn_access_aux *info);

u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
				    const struct bpf_insn *si,
				    struct bpf_insn *insn_buf,
				    struct bpf_prog *prog,
				    u32 *target_size);
1552 1553 1554 1555 1556 1557 1558 1559 1560

bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
				  struct bpf_insn_access_aux *info);

u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
				    const struct bpf_insn *si,
				    struct bpf_insn *insn_buf,
				    struct bpf_prog *prog,
				    u32 *target_size);
1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576
#else
static inline bool bpf_tcp_sock_is_valid_access(int off, int size,
						enum bpf_access_type type,
						struct bpf_insn_access_aux *info)
{
	return false;
}

static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
						  const struct bpf_insn *si,
						  struct bpf_insn *insn_buf,
						  struct bpf_prog *prog,
						  u32 *target_size)
{
	return 0;
}
1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591
static inline bool bpf_xdp_sock_is_valid_access(int off, int size,
						enum bpf_access_type type,
						struct bpf_insn_access_aux *info)
{
	return false;
}

static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
						  const struct bpf_insn *si,
						  struct bpf_insn *insn_buf,
						  struct bpf_prog *prog,
						  u32 *target_size)
{
	return 0;
}
1592 1593
#endif /* CONFIG_INET */

1594
enum bpf_text_poke_type {
1595 1596
	BPF_MOD_CALL,
	BPF_MOD_JUMP,
1597
};
1598

1599 1600 1601
int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
		       void *addr1, void *addr2);

1602
#endif /* _LINUX_BPF_H */