bpf.h 70.0 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0-only */
2 3 4 5 6 7
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
 */
#ifndef _LINUX_BPF_H
#define _LINUX_BPF_H 1

#include <uapi/linux/bpf.h>
8

9
#include <linux/workqueue.h>
10
#include <linux/file.h>
11
#include <linux/percpu.h>
Z
Zi Shen Lim 已提交
12
#include <linux/err.h>
13
#include <linux/rbtree_latch.h>
14
#include <linux/numa.h>
15
#include <linux/mm_types.h>
16
#include <linux/wait.h>
A
Alexei Starovoitov 已提交
17 18
#include <linux/refcount.h>
#include <linux/mutex.h>
19
#include <linux/module.h>
J
Jiri Olsa 已提交
20
#include <linux/kallsyms.h>
A
Alexei Starovoitov 已提交
21
#include <linux/capability.h>
22 23
#include <linux/sched/mm.h>
#include <linux/slab.h>
A
Alexei Starovoitov 已提交
24
#include <linux/percpu-refcount.h>
25
#include <linux/bpfptr.h>
26

27
struct bpf_verifier_env;
28
struct bpf_verifier_log;
29
struct perf_event;
30
struct bpf_prog;
31
struct bpf_prog_aux;
32
struct bpf_map;
33
struct sock;
34
struct seq_file;
35
struct btf;
36
struct btf_type;
37
struct exception_table_entry;
38
struct seq_operations;
39
struct bpf_iter_aux_info;
K
KP Singh 已提交
40 41
struct bpf_local_storage;
struct bpf_local_storage_map;
42
struct kobject;
43
struct mem_cgroup;
44
struct module;
45
struct bpf_func_state;
46

47 48
extern struct idr btf_idr;
extern spinlock_t btf_idr_lock;
49
extern struct kobject *btf_kobj;
50

51
typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64);
52 53
typedef int (*bpf_iter_init_seq_priv_t)(void *private_data,
					struct bpf_iter_aux_info *aux);
54 55 56 57 58 59 60 61
typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data);
struct bpf_iter_seq_info {
	const struct seq_operations *seq_ops;
	bpf_iter_init_seq_priv_t init_seq_private;
	bpf_iter_fini_seq_priv_t fini_seq_private;
	u32 seq_priv_size;
};

62
/* map is generic key/value storage optionally accessible by eBPF programs */
63 64
struct bpf_map_ops {
	/* funcs callable from userspace (via syscall) */
65
	int (*map_alloc_check)(union bpf_attr *attr);
66
	struct bpf_map *(*map_alloc)(union bpf_attr *attr);
67 68
	void (*map_release)(struct bpf_map *map, struct file *map_file);
	void (*map_free)(struct bpf_map *map);
69
	int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
70
	void (*map_release_uref)(struct bpf_map *map);
71
	void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key);
72 73
	int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr,
				union bpf_attr __user *uattr);
74 75
	int (*map_lookup_and_delete_elem)(struct bpf_map *map, void *key,
					  void *value, u64 flags);
76 77 78
	int (*map_lookup_and_delete_batch)(struct bpf_map *map,
					   const union bpf_attr *attr,
					   union bpf_attr __user *uattr);
79 80 81 82
	int (*map_update_batch)(struct bpf_map *map, const union bpf_attr *attr,
				union bpf_attr __user *uattr);
	int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr,
				union bpf_attr __user *uattr);
83 84 85

	/* funcs callable from userspace and from eBPF programs */
	void *(*map_lookup_elem)(struct bpf_map *map, void *key);
86
	int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
87
	int (*map_delete_elem)(struct bpf_map *map, void *key);
M
Mauricio Vasquez B 已提交
88 89 90
	int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags);
	int (*map_pop_elem)(struct bpf_map *map, void *value);
	int (*map_peek_elem)(struct bpf_map *map, void *value);
91 92

	/* funcs called by prog_array and perf_event_array map */
93 94 95
	void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
				int fd);
	void (*map_fd_put_ptr)(void *ptr);
96
	int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
97
	u32 (*map_fd_sys_lookup_elem)(void *ptr);
98 99
	void (*map_seq_show_elem)(struct bpf_map *map, void *key,
				  struct seq_file *m);
100
	int (*map_check_btf)(const struct bpf_map *map,
101
			     const struct btf *btf,
102 103
			     const struct btf_type *key_type,
			     const struct btf_type *value_type);
104

105 106 107 108 109 110
	/* Prog poke tracking helpers. */
	int (*map_poke_track)(struct bpf_map *map, struct bpf_prog_aux *aux);
	void (*map_poke_untrack)(struct bpf_map *map, struct bpf_prog_aux *aux);
	void (*map_poke_run)(struct bpf_map *map, u32 key, struct bpf_prog *old,
			     struct bpf_prog *new);

111 112 113 114 115
	/* Direct value access helpers. */
	int (*map_direct_value_addr)(const struct bpf_map *map,
				     u64 *imm, u32 off);
	int (*map_direct_value_meta)(const struct bpf_map *map,
				     u64 imm, u32 *off);
116
	int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma);
117 118
	__poll_t (*map_poll)(struct bpf_map *map, struct file *filp,
			     struct poll_table_struct *pts);
119

K
KP Singh 已提交
120 121 122 123 124 125
	/* Functions called by bpf_local_storage maps */
	int (*map_local_storage_charge)(struct bpf_local_storage_map *smap,
					void *owner, u32 size);
	void (*map_local_storage_uncharge)(struct bpf_local_storage_map *smap,
					   void *owner, u32 size);
	struct bpf_local_storage __rcu ** (*map_owner_storage_ptr)(void *owner);
126

127 128 129
	/* Misc helpers.*/
	int (*map_redirect)(struct bpf_map *map, u32 ifindex, u64 flags);

130 131 132 133 134 135 136 137 138 139 140 141
	/* map_meta_equal must be implemented for maps that can be
	 * used as an inner map.  It is a runtime check to ensure
	 * an inner map can be inserted to an outer map.
	 *
	 * Some properties of the inner map has been used during the
	 * verification time.  When inserting an inner map at the runtime,
	 * map_meta_equal has to ensure the inserting map has the same
	 * properties that the verifier has used earlier.
	 */
	bool (*map_meta_equal)(const struct bpf_map *meta0,
			       const struct bpf_map *meta1);

142 143 144 145

	int (*map_set_for_each_callback_args)(struct bpf_verifier_env *env,
					      struct bpf_func_state *caller,
					      struct bpf_func_state *callee);
146 147
	int (*map_for_each_callback)(struct bpf_map *map,
				     bpf_callback_t callback_fn,
148 149
				     void *callback_ctx, u64 flags);

150 151 152
	/* BTF name and id of struct allocated by map_alloc */
	const char * const map_btf_name;
	int *map_btf_id;
153 154 155

	/* bpf_iter info used to open a seq_file */
	const struct bpf_iter_seq_info *iter_seq_info;
156 157 158
};

struct bpf_map {
159
	/* The first two cachelines with read-mostly members of which some
160 161 162 163 164 165 166
	 * are also accessed in fast-path (e.g. ops, max_entries).
	 */
	const struct bpf_map_ops *ops ____cacheline_aligned;
	struct bpf_map *inner_map_meta;
#ifdef CONFIG_SECURITY
	void *security;
#endif
167 168 169 170
	enum bpf_map_type map_type;
	u32 key_size;
	u32 value_size;
	u32 max_entries;
171
	u32 map_flags;
172
	int spin_lock_off; /* >=0 valid offset, <0 error */
A
Alexei Starovoitov 已提交
173
	int timer_off; /* >=0 valid offset, <0 error */
M
Martin KaFai Lau 已提交
174
	u32 id;
175
	int numa_node;
176 177
	u32 btf_key_type_id;
	u32 btf_value_type_id;
178
	struct btf *btf;
179 180 181
#ifdef CONFIG_MEMCG_KMEM
	struct mem_cgroup *memcg;
#endif
182
	char name[BPF_OBJ_NAME_LEN];
183
	u32 btf_vmlinux_value_type_id;
A
Alexei Starovoitov 已提交
184
	bool bypass_spec_v1;
185 186
	bool frozen; /* write-once; write-protected by freeze_mutex */
	/* 22 bytes hole */
187

188
	/* The 3rd and 4th cacheline with misc members to avoid false sharing
189 190
	 * particularly with refcounting.
	 */
191 192
	atomic64_t refcnt ____cacheline_aligned;
	atomic64_t usercnt;
193
	struct work_struct work;
194 195
	struct mutex freeze_mutex;
	u64 writecnt; /* writable mmap cnt; protected by freeze_mutex */
196 197
};

198 199 200 201 202
static inline bool map_value_has_spin_lock(const struct bpf_map *map)
{
	return map->spin_lock_off >= 0;
}

203
static inline bool map_value_has_timer(const struct bpf_map *map)
204
{
205
	return map->timer_off >= 0;
206 207
}

208 209 210 211 212 213 214 215 216 217 218
static inline void check_and_init_map_value(struct bpf_map *map, void *dst)
{
	if (unlikely(map_value_has_spin_lock(map)))
		*(struct bpf_spin_lock *)(dst + map->spin_lock_off) =
			(struct bpf_spin_lock){};
	if (unlikely(map_value_has_timer(map)))
		*(struct bpf_timer *)(dst + map->timer_off) =
			(struct bpf_timer){};
}

/* copy everything but bpf_spin_lock and bpf_timer. There could be one of each. */
219 220
static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
{
221 222
	u32 s_off = 0, s_sz = 0, t_off = 0, t_sz = 0;

223
	if (unlikely(map_value_has_spin_lock(map))) {
224 225 226 227 228 229
		s_off = map->spin_lock_off;
		s_sz = sizeof(struct bpf_spin_lock);
	} else if (unlikely(map_value_has_timer(map))) {
		t_off = map->timer_off;
		t_sz = sizeof(struct bpf_timer);
	}
230

231 232 233 234 235 236 237 238 239 240 241 242
	if (unlikely(s_sz || t_sz)) {
		if (s_off < t_off || !s_sz) {
			swap(s_off, t_off);
			swap(s_sz, t_sz);
		}
		memcpy(dst, src, t_off);
		memcpy(dst + t_off + t_sz,
		       src + t_off + t_sz,
		       s_off - t_off - t_sz);
		memcpy(dst + s_off + s_sz,
		       src + s_off + s_sz,
		       map->value_size - s_off - s_sz);
243 244 245 246
	} else {
		memcpy(dst, src, map->value_size);
	}
}
247 248
void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
			   bool lock_src);
A
Alexei Starovoitov 已提交
249
void bpf_timer_cancel_and_free(void *timer);
250
int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size);
251

252
struct bpf_offload_dev;
253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277
struct bpf_offloaded_map;

struct bpf_map_dev_ops {
	int (*map_get_next_key)(struct bpf_offloaded_map *map,
				void *key, void *next_key);
	int (*map_lookup_elem)(struct bpf_offloaded_map *map,
			       void *key, void *value);
	int (*map_update_elem)(struct bpf_offloaded_map *map,
			       void *key, void *value, u64 flags);
	int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key);
};

struct bpf_offloaded_map {
	struct bpf_map map;
	struct net_device *netdev;
	const struct bpf_map_dev_ops *dev_ops;
	void *dev_priv;
	struct list_head offloads;
};

static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map)
{
	return container_of(map, struct bpf_offloaded_map, map);
}

278 279 280 281 282
static inline bool bpf_map_offload_neutral(const struct bpf_map *map)
{
	return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
}

283 284
static inline bool bpf_map_support_seq_show(const struct bpf_map *map)
{
285 286
	return (map->btf_value_type_id || map->btf_vmlinux_value_type_id) &&
		map->ops->map_seq_show_elem;
287 288
}

289
int map_check_no_btf(const struct bpf_map *map,
290
		     const struct btf *btf,
291 292 293
		     const struct btf_type *key_type,
		     const struct btf_type *value_type);

294 295 296
bool bpf_map_meta_equal(const struct bpf_map *meta0,
			const struct bpf_map *meta1);

297 298
extern const struct bpf_map_ops bpf_map_offload_ops;

299 300
/* function argument constraints */
enum bpf_arg_type {
301
	ARG_DONTCARE = 0,	/* unused argument in helper function */
302 303 304 305 306 307 308

	/* the following constraints used to prototype
	 * bpf_map_lookup/update/delete_elem() functions
	 */
	ARG_CONST_MAP_PTR,	/* const argument used as pointer to bpf_map */
	ARG_PTR_TO_MAP_KEY,	/* pointer to stack used as map key */
	ARG_PTR_TO_MAP_VALUE,	/* pointer to stack used as map value */
309
	ARG_PTR_TO_UNINIT_MAP_VALUE,	/* pointer to valid memory used to store a map value */
310
	ARG_PTR_TO_MAP_VALUE_OR_NULL,	/* pointer to stack used as map value or NULL */
311 312 313 314

	/* the following constraints used to prototype bpf_memcmp() and other
	 * functions that access data on eBPF program stack
	 */
315
	ARG_PTR_TO_MEM,		/* pointer to valid memory (stack, packet, map value) */
316
	ARG_PTR_TO_MEM_OR_NULL, /* pointer to valid memory or NULL */
317 318 319
	ARG_PTR_TO_UNINIT_MEM,	/* pointer to memory does not need to be initialized,
				 * helper function must fill all bytes or clear
				 * them in error case.
320 321
				 */

322 323
	ARG_CONST_SIZE,		/* number of bytes accessed from memory */
	ARG_CONST_SIZE_OR_ZERO,	/* number of bytes accessed from memory or 0 */
324

325
	ARG_PTR_TO_CTX,		/* pointer to context */
326
	ARG_PTR_TO_CTX_OR_NULL,	/* pointer to context or NULL */
327
	ARG_ANYTHING,		/* any (initialized) argument is ok */
328
	ARG_PTR_TO_SPIN_LOCK,	/* pointer to bpf_spin_lock */
329
	ARG_PTR_TO_SOCK_COMMON,	/* pointer to sock_common */
330 331
	ARG_PTR_TO_INT,		/* pointer to int */
	ARG_PTR_TO_LONG,	/* pointer to long */
332
	ARG_PTR_TO_SOCKET,	/* pointer to bpf_sock (fullsock) */
333
	ARG_PTR_TO_SOCKET_OR_NULL,	/* pointer to bpf_sock (fullsock) or NULL */
334
	ARG_PTR_TO_BTF_ID,	/* pointer to in-kernel struct */
335 336 337
	ARG_PTR_TO_ALLOC_MEM,	/* pointer to dynamically allocated memory */
	ARG_PTR_TO_ALLOC_MEM_OR_NULL,	/* pointer to dynamically allocated memory or NULL */
	ARG_CONST_ALLOC_SIZE_OR_ZERO,	/* number of allocated bytes requested */
338
	ARG_PTR_TO_BTF_ID_SOCK_COMMON,	/* pointer to in-kernel sock_common or bpf-mirrored bpf_sock */
H
Hao Luo 已提交
339
	ARG_PTR_TO_PERCPU_BTF_ID,	/* pointer to in-kernel percpu type */
340 341
	ARG_PTR_TO_FUNC,	/* pointer to a bpf program function */
	ARG_PTR_TO_STACK_OR_NULL,	/* pointer to stack or NULL */
342
	ARG_PTR_TO_CONST_STR,	/* pointer to a null terminated read-only string */
A
Alexei Starovoitov 已提交
343
	ARG_PTR_TO_TIMER,	/* pointer to bpf_timer */
344
	__BPF_ARG_TYPE_MAX,
345 346 347 348 349 350
};

/* type of values returned from helper functions */
enum bpf_return_type {
	RET_INTEGER,			/* function returns integer */
	RET_VOID,			/* function doesn't return anything */
351
	RET_PTR_TO_MAP_VALUE,		/* returns a pointer to map elem value */
352
	RET_PTR_TO_MAP_VALUE_OR_NULL,	/* returns a pointer to map elem value or NULL */
353
	RET_PTR_TO_SOCKET_OR_NULL,	/* returns a pointer to a socket or NULL */
354
	RET_PTR_TO_TCP_SOCK_OR_NULL,	/* returns a pointer to a tcp_sock or NULL */
355
	RET_PTR_TO_SOCK_COMMON_OR_NULL,	/* returns a pointer to a sock_common or NULL */
356
	RET_PTR_TO_ALLOC_MEM_OR_NULL,	/* returns a pointer to dynamically allocated memory or NULL */
357
	RET_PTR_TO_BTF_ID_OR_NULL,	/* returns a pointer to a btf_id or NULL */
H
Hao Luo 已提交
358
	RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL, /* returns a pointer to a valid memory or a btf_id or NULL */
H
Hao Luo 已提交
359
	RET_PTR_TO_MEM_OR_BTF_ID,	/* returns a pointer to a valid memory or a btf_id */
360
	RET_PTR_TO_BTF_ID,		/* returns a pointer to a btf_id */
361 362
};

363 364 365 366 367 368 369
/* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
 * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL
 * instructions after verifying
 */
struct bpf_func_proto {
	u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
	bool gpl_only;
370
	bool pkt_access;
371
	enum bpf_return_type ret_type;
372 373 374 375 376 377 378 379 380 381
	union {
		struct {
			enum bpf_arg_type arg1_type;
			enum bpf_arg_type arg2_type;
			enum bpf_arg_type arg3_type;
			enum bpf_arg_type arg4_type;
			enum bpf_arg_type arg5_type;
		};
		enum bpf_arg_type arg_type[5];
	};
382 383 384 385 386 387 388 389 390 391
	union {
		struct {
			u32 *arg1_btf_id;
			u32 *arg2_btf_id;
			u32 *arg3_btf_id;
			u32 *arg4_btf_id;
			u32 *arg5_btf_id;
		};
		u32 *arg_btf_id[5];
	};
392
	int *ret_btf_id; /* return value btf_id */
J
Jiri Olsa 已提交
393
	bool (*allowed)(const struct bpf_prog *prog);
394 395 396 397 398 399 400 401 402 403 404
};

/* bpf_context is intentionally undefined structure. Pointer to bpf_context is
 * the first argument to eBPF programs.
 * For socket filters: 'struct bpf_context *' == 'struct sk_buff *'
 */
struct bpf_context;

enum bpf_access_type {
	BPF_READ = 1,
	BPF_WRITE = 2
405 406
};

407
/* types of values stored in eBPF registers */
408 409 410 411 412 413 414 415 416
/* Pointer types represent:
 * pointer
 * pointer + imm
 * pointer + (u16) var
 * pointer + (u16) var + imm
 * if (range > 0) then [ptr, ptr + range - off) is safe to access
 * if (id > 0) means that some 'var' was added
 * if (off > 0) means that 'imm' was added
 */
417 418
enum bpf_reg_type {
	NOT_INIT = 0,		 /* nothing was written into register */
419
	SCALAR_VALUE,		 /* reg doesn't contain a valid pointer */
420 421 422 423
	PTR_TO_CTX,		 /* reg points to bpf_context */
	CONST_PTR_TO_MAP,	 /* reg points to struct bpf_map */
	PTR_TO_MAP_VALUE,	 /* reg points to map element value */
	PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */
424
	PTR_TO_STACK,		 /* reg == frame_pointer + offset */
425
	PTR_TO_PACKET_META,	 /* skb->data - meta_len */
426
	PTR_TO_PACKET,		 /* reg points to skb->data */
427
	PTR_TO_PACKET_END,	 /* skb->data + headlen */
428
	PTR_TO_FLOW_KEYS,	 /* reg points to bpf_flow_keys */
429 430
	PTR_TO_SOCKET,		 /* reg points to struct bpf_sock */
	PTR_TO_SOCKET_OR_NULL,	 /* reg points to struct bpf_sock or NULL */
431 432
	PTR_TO_SOCK_COMMON,	 /* reg points to sock_common */
	PTR_TO_SOCK_COMMON_OR_NULL, /* reg points to sock_common or NULL */
433 434
	PTR_TO_TCP_SOCK,	 /* reg points to struct tcp_sock */
	PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */
435
	PTR_TO_TP_BUFFER,	 /* reg points to a writable raw tp's buffer */
436
	PTR_TO_XDP_SOCK,	 /* reg points to struct xdp_sock */
437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452
	/* PTR_TO_BTF_ID points to a kernel struct that does not need
	 * to be null checked by the BPF program. This does not imply the
	 * pointer is _not_ null and in practice this can easily be a null
	 * pointer when reading pointer chains. The assumption is program
	 * context will handle null pointer dereference typically via fault
	 * handling. The verifier must keep this in mind and can make no
	 * assumptions about null or non-null when doing branch analysis.
	 * Further, when passed into helpers the helpers can not, without
	 * additional context, assume the value is non-null.
	 */
	PTR_TO_BTF_ID,
	/* PTR_TO_BTF_ID_OR_NULL points to a kernel struct that has not
	 * been checked for null. Used primarily to inform the verifier
	 * an explicit null check is required for this struct.
	 */
	PTR_TO_BTF_ID_OR_NULL,
453 454
	PTR_TO_MEM,		 /* reg points to valid memory region */
	PTR_TO_MEM_OR_NULL,	 /* reg points to valid memory region or NULL */
455 456 457 458
	PTR_TO_RDONLY_BUF,	 /* reg points to a readonly buffer */
	PTR_TO_RDONLY_BUF_OR_NULL, /* reg points to a readonly buffer or NULL */
	PTR_TO_RDWR_BUF,	 /* reg points to a read/write buffer */
	PTR_TO_RDWR_BUF_OR_NULL, /* reg points to a read/write buffer or NULL */
H
Hao Luo 已提交
459
	PTR_TO_PERCPU_BTF_ID,	 /* reg points to a percpu kernel variable */
460 461
	PTR_TO_FUNC,		 /* reg points to a bpf program function */
	PTR_TO_MAP_KEY,		 /* reg points to a map element key */
462
	__BPF_REG_TYPE_MAX,
463 464
};

465 466 467 468 469
/* The information passed from prog-specific *_is_valid_access
 * back to the verifier.
 */
struct bpf_insn_access_aux {
	enum bpf_reg_type reg_type;
470 471
	union {
		int ctx_field_size;
472 473 474 475
		struct {
			struct btf *btf;
			u32 btf_id;
		};
476 477
	};
	struct bpf_verifier_log *log; /* for verbose logs */
478 479
};

480 481 482 483 484 485
static inline void
bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size)
{
	aux->ctx_field_size = size;
}

486 487 488 489 490
struct bpf_prog_ops {
	int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr,
			union bpf_attr __user *uattr);
};

491 492
struct bpf_verifier_ops {
	/* return eBPF function prototype for verification */
493 494 495
	const struct bpf_func_proto *
	(*get_func_proto)(enum bpf_func_id func_id,
			  const struct bpf_prog *prog);
496 497 498 499

	/* return true if 'size' wide access at offset 'off' within bpf_context
	 * with 'type' (read or write) is allowed
	 */
500
	bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
501
				const struct bpf_prog *prog,
502
				struct bpf_insn_access_aux *info);
503 504
	int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
			    const struct bpf_prog *prog);
505 506
	int (*gen_ld_abs)(const struct bpf_insn *orig,
			  struct bpf_insn *insn_buf);
507 508 509
	u32 (*convert_ctx_access)(enum bpf_access_type type,
				  const struct bpf_insn *src,
				  struct bpf_insn *dst,
510
				  struct bpf_prog *prog, u32 *target_size);
511
	int (*btf_struct_access)(struct bpf_verifier_log *log,
512
				 const struct btf *btf,
513 514 515
				 const struct btf_type *t, int off, int size,
				 enum bpf_access_type atype,
				 u32 *next_btf_id);
516
	bool (*check_kfunc_call)(u32 kfunc_btf_id, struct module *owner);
517 518
};

519
struct bpf_prog_offload_ops {
520
	/* verifier basic callbacks */
521 522
	int (*insn_hook)(struct bpf_verifier_env *env,
			 int insn_idx, int prev_insn_idx);
523
	int (*finalize)(struct bpf_verifier_env *env);
524 525 526 527 528
	/* verifier optimization callbacks (called after .finalize) */
	int (*replace_insn)(struct bpf_verifier_env *env, u32 off,
			    struct bpf_insn *insn);
	int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt);
	/* program management callbacks */
529 530
	int (*prepare)(struct bpf_prog *prog);
	int (*translate)(struct bpf_prog *prog);
531
	void (*destroy)(struct bpf_prog *prog);
532 533
};

534
struct bpf_prog_offload {
535 536
	struct bpf_prog		*prog;
	struct net_device	*netdev;
537
	struct bpf_offload_dev	*offdev;
538 539 540
	void			*dev_priv;
	struct list_head	offloads;
	bool			dev_state;
541
	bool			opt_failed;
542 543
	void			*jited_image;
	u32			jited_len;
544 545
};

546 547
enum bpf_cgroup_storage_type {
	BPF_CGROUP_STORAGE_SHARED,
548
	BPF_CGROUP_STORAGE_PERCPU,
549 550 551 552 553
	__BPF_CGROUP_STORAGE_MAX
};

#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX

554 555 556 557 558
/* The longest tracepoint has 12 args.
 * See include/trace/bpf_probe.h
 */
#define MAX_BPF_FUNC_ARGS 12

559 560 561 562 563
/* The maximum number of arguments passed through registers
 * a single function may have.
 */
#define MAX_BPF_FUNC_REG_ARGS 5

A
Alexei Starovoitov 已提交
564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582
struct btf_func_model {
	u8 ret_size;
	u8 nr_args;
	u8 arg_size[MAX_BPF_FUNC_ARGS];
};

/* Restore arguments before returning from trampoline to let original function
 * continue executing. This flag is used for fentry progs when there are no
 * fexit progs.
 */
#define BPF_TRAMP_F_RESTORE_REGS	BIT(0)
/* Call original function after fentry progs, but before fexit progs.
 * Makes sense for fentry/fexit, normal calls and indirect calls.
 */
#define BPF_TRAMP_F_CALL_ORIG		BIT(1)
/* Skip current frame and return to parent.  Makes sense for fentry/fexit
 * programs only. Should not be used with normal calls and indirect calls.
 */
#define BPF_TRAMP_F_SKIP_FRAME		BIT(2)
583 584 585 586
/* Store IP address of the caller on the trampoline stack,
 * so it's available for trampoline's programs.
 */
#define BPF_TRAMP_F_IP_ARG		BIT(3)
587 588
/* Return the return value of fentry prog. Only used by bpf_struct_ops. */
#define BPF_TRAMP_F_RET_FENTRY_RET	BIT(4)
589

K
KP Singh 已提交
590 591 592
/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
 * bytes on x86.  Pick a number to fit into BPF_IMAGE_SIZE / 2
 */
593
#define BPF_MAX_TRAMP_PROGS 38
K
KP Singh 已提交
594 595 596 597 598 599

struct bpf_tramp_progs {
	struct bpf_prog *progs[BPF_MAX_TRAMP_PROGS];
	int nr_progs;
};

A
Alexei Starovoitov 已提交
600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619
/* Different use cases for BPF trampoline:
 * 1. replace nop at the function entry (kprobe equivalent)
 *    flags = BPF_TRAMP_F_RESTORE_REGS
 *    fentry = a set of programs to run before returning from trampoline
 *
 * 2. replace nop at the function entry (kprobe + kretprobe equivalent)
 *    flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME
 *    orig_call = fentry_ip + MCOUNT_INSN_SIZE
 *    fentry = a set of program to run before calling original function
 *    fexit = a set of program to run after original function
 *
 * 3. replace direct call instruction anywhere in the function body
 *    or assign a function pointer for indirect call (like tcp_congestion_ops->cong_avoid)
 *    With flags = 0
 *      fentry = a set of programs to run before returning from trampoline
 *    With flags = BPF_TRAMP_F_CALL_ORIG
 *      orig_call = original callback addr or direct function addr
 *      fentry = a set of program to run before calling original function
 *      fexit = a set of program to run after original function
 */
A
Alexei Starovoitov 已提交
620 621
struct bpf_tramp_image;
int arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end,
622
				const struct btf_func_model *m, u32 flags,
K
KP Singh 已提交
623
				struct bpf_tramp_progs *tprogs,
A
Alexei Starovoitov 已提交
624 625
				void *orig_call);
/* these two functions are called from generated trampoline */
626
u64 notrace __bpf_prog_enter(struct bpf_prog *prog);
A
Alexei Starovoitov 已提交
627
void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start);
628
u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog);
629
void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start);
A
Alexei Starovoitov 已提交
630 631
void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr);
void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr);
A
Alexei Starovoitov 已提交
632

J
Jiri Olsa 已提交
633 634 635
struct bpf_ksym {
	unsigned long		 start;
	unsigned long		 end;
J
Jiri Olsa 已提交
636
	char			 name[KSYM_NAME_LEN];
637
	struct list_head	 lnode;
J
Jiri Olsa 已提交
638
	struct latch_tree_node	 tnode;
639
	bool			 prog;
J
Jiri Olsa 已提交
640 641
};

A
Alexei Starovoitov 已提交
642 643 644
enum bpf_tramp_prog_type {
	BPF_TRAMP_FENTRY,
	BPF_TRAMP_FEXIT,
K
KP Singh 已提交
645
	BPF_TRAMP_MODIFY_RETURN,
646 647
	BPF_TRAMP_MAX,
	BPF_TRAMP_REPLACE, /* more than MAX */
A
Alexei Starovoitov 已提交
648 649
};

A
Alexei Starovoitov 已提交
650 651 652 653 654 655 656 657 658 659 660 661
struct bpf_tramp_image {
	void *image;
	struct bpf_ksym ksym;
	struct percpu_ref pcref;
	void *ip_after_call;
	void *ip_epilogue;
	union {
		struct rcu_head rcu;
		struct work_struct work;
	};
};

A
Alexei Starovoitov 已提交
662 663 664 665 666 667 668 669 670 671
struct bpf_trampoline {
	/* hlist for trampoline_table */
	struct hlist_node hlist;
	/* serializes access to fields of this trampoline */
	struct mutex mutex;
	refcount_t refcnt;
	u64 key;
	struct {
		struct btf_func_model model;
		void *addr;
672
		bool ftrace_managed;
A
Alexei Starovoitov 已提交
673
	} func;
674 675 676 677 678
	/* if !NULL this is BPF_PROG_TYPE_EXT program that extends another BPF
	 * program by replacing one of its functions. func.addr is the address
	 * of the function it replaced.
	 */
	struct bpf_prog *extension_prog;
A
Alexei Starovoitov 已提交
679 680 681 682 683
	/* list of BPF programs using this trampoline */
	struct hlist_head progs_hlist[BPF_TRAMP_MAX];
	/* Number of attached programs. A counter per kind. */
	int progs_cnt[BPF_TRAMP_MAX];
	/* Executable image of trampoline */
A
Alexei Starovoitov 已提交
684
	struct bpf_tramp_image *cur_image;
A
Alexei Starovoitov 已提交
685
	u64 selector;
686
	struct module *mod;
A
Alexei Starovoitov 已提交
687
};
B
Björn Töpel 已提交
688

689 690 691 692 693 694 695
struct bpf_attach_target_info {
	struct btf_func_model fmodel;
	long tgt_addr;
	const char *tgt_name;
	const struct btf_type *tgt_type;
};

696
#define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */
B
Björn Töpel 已提交
697 698 699 700 701 702 703 704 705 706 707 708 709 710

struct bpf_dispatcher_prog {
	struct bpf_prog *prog;
	refcount_t users;
};

struct bpf_dispatcher {
	/* dispatcher mutex */
	struct mutex mutex;
	void *func;
	struct bpf_dispatcher_prog progs[BPF_DISPATCHER_MAX];
	int num_progs;
	void *image;
	u32 image_off;
J
Jiri Olsa 已提交
711
	struct bpf_ksym ksym;
B
Björn Töpel 已提交
712 713
};

714
static __always_inline __nocfi unsigned int bpf_dispatcher_nop_func(
715 716 717 718 719 720 721
	const void *ctx,
	const struct bpf_insn *insnsi,
	unsigned int (*bpf_func)(const void *,
				 const struct bpf_insn *))
{
	return bpf_func(ctx, insnsi);
}
A
Alexei Starovoitov 已提交
722
#ifdef CONFIG_BPF_JIT
723 724
int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr);
int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr);
725 726
struct bpf_trampoline *bpf_trampoline_get(u64 key,
					  struct bpf_attach_target_info *tgt_info);
A
Alexei Starovoitov 已提交
727
void bpf_trampoline_put(struct bpf_trampoline *tr);
J
Jiri Olsa 已提交
728 729 730 731 732 733 734 735 736 737 738
#define BPF_DISPATCHER_INIT(_name) {				\
	.mutex = __MUTEX_INITIALIZER(_name.mutex),		\
	.func = &_name##_func,					\
	.progs = {},						\
	.num_progs = 0,						\
	.image = NULL,						\
	.image_off = 0,						\
	.ksym = {						\
		.name  = #_name,				\
		.lnode = LIST_HEAD_INIT(_name.ksym.lnode),	\
	},							\
B
Björn Töpel 已提交
739 740 741
}

#define DEFINE_BPF_DISPATCHER(name)					\
742
	noinline __nocfi unsigned int bpf_dispatcher_##name##_func(	\
B
Björn Töpel 已提交
743 744 745 746 747 748 749
		const void *ctx,					\
		const struct bpf_insn *insnsi,				\
		unsigned int (*bpf_func)(const void *,			\
					 const struct bpf_insn *))	\
	{								\
		return bpf_func(ctx, insnsi);				\
	}								\
750 751 752
	EXPORT_SYMBOL(bpf_dispatcher_##name##_func);			\
	struct bpf_dispatcher bpf_dispatcher_##name =			\
		BPF_DISPATCHER_INIT(bpf_dispatcher_##name);
B
Björn Töpel 已提交
753
#define DECLARE_BPF_DISPATCHER(name)					\
754
	unsigned int bpf_dispatcher_##name##_func(			\
B
Björn Töpel 已提交
755 756 757 758
		const void *ctx,					\
		const struct bpf_insn *insnsi,				\
		unsigned int (*bpf_func)(const void *,			\
					 const struct bpf_insn *));	\
759 760 761
	extern struct bpf_dispatcher bpf_dispatcher_##name;
#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func
#define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name)
B
Björn Töpel 已提交
762 763
void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
				struct bpf_prog *to);
J
Jiri Olsa 已提交
764
/* Called only from JIT-enabled code, so there's no need for stubs. */
J
Jiri Olsa 已提交
765
void *bpf_jit_alloc_exec_page(void);
J
Jiri Olsa 已提交
766 767
void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym);
void bpf_image_ksym_del(struct bpf_ksym *ksym);
J
Jiri Olsa 已提交
768 769
void bpf_ksym_add(struct bpf_ksym *ksym);
void bpf_ksym_del(struct bpf_ksym *ksym);
A
Alexei Starovoitov 已提交
770 771
int bpf_jit_charge_modmem(u32 pages);
void bpf_jit_uncharge_modmem(u32 pages);
A
Alexei Starovoitov 已提交
772
#else
773 774
static inline int bpf_trampoline_link_prog(struct bpf_prog *prog,
					   struct bpf_trampoline *tr)
A
Alexei Starovoitov 已提交
775 776 777
{
	return -ENOTSUPP;
}
778 779
static inline int bpf_trampoline_unlink_prog(struct bpf_prog *prog,
					     struct bpf_trampoline *tr)
A
Alexei Starovoitov 已提交
780 781 782
{
	return -ENOTSUPP;
}
783 784 785 786 787
static inline struct bpf_trampoline *bpf_trampoline_get(u64 key,
							struct bpf_attach_target_info *tgt_info)
{
	return ERR_PTR(-EOPNOTSUPP);
}
A
Alexei Starovoitov 已提交
788
static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {}
B
Björn Töpel 已提交
789 790
#define DEFINE_BPF_DISPATCHER(name)
#define DECLARE_BPF_DISPATCHER(name)
791
#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nop_func
B
Björn Töpel 已提交
792 793 794 795
#define BPF_DISPATCHER_PTR(name) NULL
static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d,
					      struct bpf_prog *from,
					      struct bpf_prog *to) {}
796 797 798 799
static inline bool is_bpf_image_address(unsigned long address)
{
	return false;
}
A
Alexei Starovoitov 已提交
800 801
#endif

802
struct bpf_func_info_aux {
803
	u16 linkage;
804 805 806
	bool unreliable;
};

807 808 809 810 811 812
enum bpf_jit_poke_reason {
	BPF_POKE_REASON_TAIL_CALL,
};

/* Descriptor of pokes pointing /into/ the JITed image. */
struct bpf_jit_poke_descriptor {
813
	void *tailcall_target;
814 815
	void *tailcall_bypass;
	void *bypass_addr;
816
	void *aux;
817 818 819 820 821 822
	union {
		struct {
			struct bpf_map *map;
			u32 key;
		} tail_call;
	};
823
	bool tailcall_target_stable;
824 825
	u8 adj_off;
	u16 reason;
826
	u32 insn_idx;
827 828
};

829 830 831 832
/* reg_type info for ctx arguments */
struct bpf_ctx_arg_aux {
	u32 offset;
	enum bpf_reg_type reg_type;
833
	u32 btf_id;
834 835
};

836 837 838 839 840
struct btf_mod_pair {
	struct btf *btf;
	struct module *module;
};

841 842
struct bpf_kfunc_desc_tab;

843
struct bpf_prog_aux {
844
	atomic64_t refcnt;
845
	u32 used_map_cnt;
846
	u32 used_btf_cnt;
847
	u32 max_ctx_offset;
848
	u32 max_pkt_offset;
849
	u32 max_tp_access;
850
	u32 stack_depth;
M
Martin KaFai Lau 已提交
851
	u32 id;
852 853
	u32 func_cnt; /* used by non-func prog as the number of func progs */
	u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */
854
	u32 attach_btf_id; /* in-kernel BTF type id to attach to */
855
	u32 ctx_arg_info_size;
856 857
	u32 max_rdonly_access;
	u32 max_rdwr_access;
858
	struct btf *attach_btf;
859
	const struct bpf_ctx_arg_aux *ctx_arg_info;
860 861 862
	struct mutex dst_mutex; /* protects dst_* pointers below, *after* prog becomes visible */
	struct bpf_prog *dst_prog;
	struct bpf_trampoline *dst_trampoline;
863 864
	enum bpf_prog_type saved_dst_prog_type;
	enum bpf_attach_type saved_dst_attach_type;
865
	bool verifier_zext; /* Zero extensions has been inserted by verifier. */
866
	bool offload_requested;
867
	bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */
868
	bool func_proto_unreliable;
869
	bool sleepable;
870
	bool tail_call_reachable;
A
Alexei Starovoitov 已提交
871
	struct hlist_node tramp_hlist;
872 873 874 875
	/* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
	const struct btf_type *attach_func_proto;
	/* function name for valid attach_btf_id */
	const char *attach_func_name;
876 877
	struct bpf_prog **func;
	void *jit_data; /* JIT specific data. arch dependent */
878
	struct bpf_jit_poke_descriptor *poke_tab;
879
	struct bpf_kfunc_desc_tab *kfunc_tab;
880
	struct bpf_kfunc_btf_tab *kfunc_btf_tab;
881
	u32 size_poke_tab;
J
Jiri Olsa 已提交
882
	struct bpf_ksym ksym;
883
	const struct bpf_prog_ops *ops;
884
	struct bpf_map **used_maps;
885
	struct mutex used_maps_mutex; /* mutex for used_maps and used_map_cnt */
886
	struct btf_mod_pair *used_btfs;
887
	struct bpf_prog *prog;
888
	struct user_struct *user;
889
	u64 load_time; /* ns since boottime */
890
	struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
891
	char name[BPF_OBJ_NAME_LEN];
892 893 894
#ifdef CONFIG_SECURITY
	void *security;
#endif
895
	struct bpf_prog_offload *offload;
Y
Yonghong Song 已提交
896
	struct btf *btf;
897
	struct bpf_func_info *func_info;
898
	struct bpf_func_info_aux *func_info_aux;
M
Martin KaFai Lau 已提交
899 900 901 902 903 904 905 906 907 908 909 910 911 912 913
	/* bpf_line_info loaded from userspace.  linfo->insn_off
	 * has the xlated insn offset.
	 * Both the main and sub prog share the same linfo.
	 * The subprog can access its first linfo by
	 * using the linfo_idx.
	 */
	struct bpf_line_info *linfo;
	/* jited_linfo is the jited addr of the linfo.  It has a
	 * one to one mapping to linfo:
	 * jited_linfo[i] is the jited addr for the linfo[i]->insn_off.
	 * Both the main and sub prog share the same jited_linfo.
	 * The subprog can access its first jited_linfo by
	 * using the linfo_idx.
	 */
	void **jited_linfo;
914
	u32 func_info_cnt;
M
Martin KaFai Lau 已提交
915 916 917 918 919 920
	u32 nr_linfo;
	/* subprog can use linfo_idx to access its first linfo and
	 * jited_linfo.
	 * main prog always has linfo_idx == 0
	 */
	u32 linfo_idx;
921 922
	u32 num_exentries;
	struct exception_table_entry *extable;
923 924 925 926
	union {
		struct work_struct work;
		struct rcu_head	rcu;
	};
927 928
};

929 930 931 932 933 934 935 936
struct bpf_array_aux {
	/* 'Ownership' of prog array is claimed by the first program that
	 * is going to use this map or by the first program which FD is
	 * stored in the map to make sure that all callers and callees have
	 * the same prog type and JITed flag.
	 */
	enum bpf_prog_type type;
	bool jited;
937 938 939 940 941
	/* Programs with direct jumps into programs part of this array. */
	struct list_head poke_progs;
	struct bpf_map *map;
	struct mutex poke_mutex;
	struct work_struct work;
942 943
};

944 945 946 947 948 949 950 951 952 953 954 955
struct bpf_link {
	atomic64_t refcnt;
	u32 id;
	enum bpf_link_type type;
	const struct bpf_link_ops *ops;
	struct bpf_prog *prog;
	struct work_struct work;
};

struct bpf_link_ops {
	void (*release)(struct bpf_link *link);
	void (*dealloc)(struct bpf_link *link);
956
	int (*detach)(struct bpf_link *link);
957 958 959 960 961 962 963 964 965 966 967 968 969 970
	int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog,
			   struct bpf_prog *old_prog);
	void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq);
	int (*fill_link_info)(const struct bpf_link *link,
			      struct bpf_link_info *info);
};

struct bpf_link_primer {
	struct bpf_link *link;
	struct file *file;
	int fd;
	u32 id;
};

971
struct bpf_struct_ops_value;
972 973 974 975 976 977 978 979
struct btf_member;

#define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64
struct bpf_struct_ops {
	const struct bpf_verifier_ops *verifier_ops;
	int (*init)(struct btf *btf);
	int (*check_member)(const struct btf_type *t,
			    const struct btf_member *member);
980 981 982 983 984
	int (*init_member)(const struct btf_type *t,
			   const struct btf_member *member,
			   void *kdata, const void *udata);
	int (*reg)(void *kdata);
	void (*unreg)(void *kdata);
985
	const struct btf_type *type;
986
	const struct btf_type *value_type;
987 988 989
	const char *name;
	struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS];
	u32 type_id;
990
	u32 value_id;
991 992 993
};

#if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)
994
#define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA))
995
const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id);
996
void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log);
997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014
bool bpf_struct_ops_get(const void *kdata);
void bpf_struct_ops_put(const void *kdata);
int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
				       void *value);
static inline bool bpf_try_module_get(const void *data, struct module *owner)
{
	if (owner == BPF_MODULE_OWNER)
		return bpf_struct_ops_get(data);
	else
		return try_module_get(owner);
}
static inline void bpf_module_put(const void *data, struct module *owner)
{
	if (owner == BPF_MODULE_OWNER)
		bpf_struct_ops_put(data);
	else
		module_put(owner);
}
1015 1016 1017 1018 1019
#else
static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
{
	return NULL;
}
1020 1021 1022 1023
static inline void bpf_struct_ops_init(struct btf *btf,
				       struct bpf_verifier_log *log)
{
}
1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037
static inline bool bpf_try_module_get(const void *data, struct module *owner)
{
	return try_module_get(owner);
}
static inline void bpf_module_put(const void *data, struct module *owner)
{
	module_put(owner);
}
static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map,
						     void *key,
						     void *value)
{
	return -EINVAL;
}
1038 1039
#endif

1040 1041 1042
struct bpf_array {
	struct bpf_map map;
	u32 elem_size;
1043
	u32 index_mask;
1044
	struct bpf_array_aux *aux;
1045 1046
	union {
		char value[0] __aligned(8);
1047
		void *ptrs[0] __aligned(8);
1048
		void __percpu *pptrs[0] __aligned(8);
1049 1050
	};
};
1051

1052
#define BPF_COMPLEXITY_LIMIT_INSNS      1000000 /* yes. 1M insns */
1053 1054
#define MAX_TAIL_CALL_CNT 32

1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083
#define BPF_F_ACCESS_MASK	(BPF_F_RDONLY |		\
				 BPF_F_RDONLY_PROG |	\
				 BPF_F_WRONLY |		\
				 BPF_F_WRONLY_PROG)

#define BPF_MAP_CAN_READ	BIT(0)
#define BPF_MAP_CAN_WRITE	BIT(1)

static inline u32 bpf_map_flags_to_cap(struct bpf_map *map)
{
	u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);

	/* Combination of BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG is
	 * not possible.
	 */
	if (access_flags & BPF_F_RDONLY_PROG)
		return BPF_MAP_CAN_READ;
	else if (access_flags & BPF_F_WRONLY_PROG)
		return BPF_MAP_CAN_WRITE;
	else
		return BPF_MAP_CAN_READ | BPF_MAP_CAN_WRITE;
}

static inline bool bpf_map_flags_access_ok(u32 access_flags)
{
	return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) !=
	       (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
}

1084 1085 1086 1087 1088 1089 1090
struct bpf_event_entry {
	struct perf_event *event;
	struct file *perf_file;
	struct file *map_file;
	struct rcu_head rcu;
};

1091
bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
1092
int bpf_prog_calc_tag(struct bpf_prog *fp);
1093

1094
const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
1095
const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void);
1096 1097

typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src,
1098
					unsigned long off, unsigned long len);
1099 1100 1101 1102 1103
typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type,
					const struct bpf_insn *src,
					struct bpf_insn *dst,
					struct bpf_prog *prog,
					u32 *target_size);
1104 1105 1106

u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
		     void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy);
1107

1108 1109 1110
/* an array of programs to be executed under rcu_lock.
 *
 * Typical usage:
1111
 * ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, bpf_prog_run);
1112 1113 1114 1115 1116 1117 1118 1119
 *
 * the structure returned by bpf_prog_array_alloc() should be populated
 * with program pointers and the last pointer must be NULL.
 * The user has to keep refcnt on the program and make sure the program
 * is removed from the array before bpf_prog_put().
 * The 'struct bpf_prog_array *' should only be replaced with xchg()
 * since other cpus are walking the array of pointers in parallel.
 */
1120 1121
struct bpf_prog_array_item {
	struct bpf_prog *prog;
1122 1123 1124 1125
	union {
		struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
		u64 bpf_cookie;
	};
1126 1127
};

1128 1129
struct bpf_prog_array {
	struct rcu_head rcu;
1130
	struct bpf_prog_array_item items[];
1131 1132
};

1133
struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
1134 1135
void bpf_prog_array_free(struct bpf_prog_array *progs);
int bpf_prog_array_length(struct bpf_prog_array *progs);
1136
bool bpf_prog_array_is_empty(struct bpf_prog_array *array);
1137
int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs,
1138
				__u32 __user *prog_ids, u32 cnt);
1139

1140
void bpf_prog_array_delete_safe(struct bpf_prog_array *progs,
1141
				struct bpf_prog *old_prog);
1142 1143 1144
int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index);
int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
			     struct bpf_prog *prog);
1145
int bpf_prog_array_copy_info(struct bpf_prog_array *array,
1146 1147
			     u32 *prog_ids, u32 request_cnt,
			     u32 *prog_cnt);
1148
int bpf_prog_array_copy(struct bpf_prog_array *old_array,
1149 1150
			struct bpf_prog *exclude_prog,
			struct bpf_prog *include_prog,
1151
			u64 bpf_cookie,
1152 1153
			struct bpf_prog_array **new_array);

1154 1155 1156 1157
struct bpf_run_ctx {};

struct bpf_cg_run_ctx {
	struct bpf_run_ctx run_ctx;
1158
	const struct bpf_prog_array_item *prog_item;
1159 1160
};

1161 1162 1163 1164 1165
struct bpf_trace_run_ctx {
	struct bpf_run_ctx run_ctx;
	u64 bpf_cookie;
};

1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183
static inline struct bpf_run_ctx *bpf_set_run_ctx(struct bpf_run_ctx *new_ctx)
{
	struct bpf_run_ctx *old_ctx = NULL;

#ifdef CONFIG_BPF_SYSCALL
	old_ctx = current->bpf_ctx;
	current->bpf_ctx = new_ctx;
#endif
	return old_ctx;
}

static inline void bpf_reset_run_ctx(struct bpf_run_ctx *old_ctx)
{
#ifdef CONFIG_BPF_SYSCALL
	current->bpf_ctx = old_ctx;
#endif
}

1184 1185 1186 1187 1188
/* BPF program asks to bypass CAP_NET_BIND_SERVICE in bind. */
#define BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE			(1 << 0)
/* BPF program asks to set CN on the packet. */
#define BPF_RET_SET_CN						(1 << 0)

1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255
typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *prog, const void *ctx);

static __always_inline u32
BPF_PROG_RUN_ARRAY_CG_FLAGS(const struct bpf_prog_array __rcu *array_rcu,
			    const void *ctx, bpf_prog_run_fn run_prog,
			    u32 *ret_flags)
{
	const struct bpf_prog_array_item *item;
	const struct bpf_prog *prog;
	const struct bpf_prog_array *array;
	struct bpf_run_ctx *old_run_ctx;
	struct bpf_cg_run_ctx run_ctx;
	u32 ret = 1;
	u32 func_ret;

	migrate_disable();
	rcu_read_lock();
	array = rcu_dereference(array_rcu);
	item = &array->items[0];
	old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
	while ((prog = READ_ONCE(item->prog))) {
		run_ctx.prog_item = item;
		func_ret = run_prog(prog, ctx);
		ret &= (func_ret & 1);
		*(ret_flags) |= (func_ret >> 1);
		item++;
	}
	bpf_reset_run_ctx(old_run_ctx);
	rcu_read_unlock();
	migrate_enable();
	return ret;
}

static __always_inline u32
BPF_PROG_RUN_ARRAY_CG(const struct bpf_prog_array __rcu *array_rcu,
		      const void *ctx, bpf_prog_run_fn run_prog)
{
	const struct bpf_prog_array_item *item;
	const struct bpf_prog *prog;
	const struct bpf_prog_array *array;
	struct bpf_run_ctx *old_run_ctx;
	struct bpf_cg_run_ctx run_ctx;
	u32 ret = 1;

	migrate_disable();
	rcu_read_lock();
	array = rcu_dereference(array_rcu);
	item = &array->items[0];
	old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
	while ((prog = READ_ONCE(item->prog))) {
		run_ctx.prog_item = item;
		ret &= run_prog(prog, ctx);
		item++;
	}
	bpf_reset_run_ctx(old_run_ctx);
	rcu_read_unlock();
	migrate_enable();
	return ret;
}

static __always_inline u32
BPF_PROG_RUN_ARRAY(const struct bpf_prog_array __rcu *array_rcu,
		   const void *ctx, bpf_prog_run_fn run_prog)
{
	const struct bpf_prog_array_item *item;
	const struct bpf_prog *prog;
	const struct bpf_prog_array *array;
1256 1257
	struct bpf_run_ctx *old_run_ctx;
	struct bpf_trace_run_ctx run_ctx;
1258 1259 1260 1261 1262 1263 1264
	u32 ret = 1;

	migrate_disable();
	rcu_read_lock();
	array = rcu_dereference(array_rcu);
	if (unlikely(!array))
		goto out;
1265
	old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
1266 1267
	item = &array->items[0];
	while ((prog = READ_ONCE(item->prog))) {
1268
		run_ctx.bpf_cookie = item->bpf_cookie;
1269 1270 1271
		ret &= run_prog(prog, ctx);
		item++;
	}
1272
	bpf_reset_run_ctx(old_run_ctx);
1273 1274 1275 1276 1277
out:
	rcu_read_unlock();
	migrate_enable();
	return ret;
}
1278

1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302
/* To be used by __cgroup_bpf_run_filter_skb for EGRESS BPF progs
 * so BPF programs can request cwr for TCP packets.
 *
 * Current cgroup skb programs can only return 0 or 1 (0 to drop the
 * packet. This macro changes the behavior so the low order bit
 * indicates whether the packet should be dropped (0) or not (1)
 * and the next bit is a congestion notification bit. This could be
 * used by TCP to call tcp_enter_cwr()
 *
 * Hence, new allowed return values of CGROUP EGRESS BPF programs are:
 *   0: drop packet
 *   1: keep packet
 *   2: drop packet and cn
 *   3: keep packet and cn
 *
 * This macro then converts it to one of the NET_XMIT or an error
 * code that is then interpreted as drop packet (and no cn):
 *   0: NET_XMIT_SUCCESS  skb should be transmitted
 *   1: NET_XMIT_DROP     skb should be dropped and cn
 *   2: NET_XMIT_CN       skb should be transmitted and cn
 *   3: -EPERM            skb should be dropped
 */
#define BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(array, ctx, func)		\
	({						\
1303 1304 1305
		u32 _flags = 0;				\
		bool _cn;				\
		u32 _ret;				\
1306
		_ret = BPF_PROG_RUN_ARRAY_CG_FLAGS(array, ctx, func, &_flags); \
1307
		_cn = _flags & BPF_RET_SET_CN;		\
1308 1309 1310 1311 1312 1313 1314
		if (_ret)				\
			_ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS);	\
		else					\
			_ret = (_cn ? NET_XMIT_DROP : -EPERM);		\
		_ret;					\
	})

1315
#ifdef CONFIG_BPF_SYSCALL
1316
DECLARE_PER_CPU(int, bpf_prog_active);
1317
extern struct mutex bpf_stats_enabled_mutex;
1318

1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348
/*
 * Block execution of BPF programs attached to instrumentation (perf,
 * kprobes, tracepoints) to prevent deadlocks on map operations as any of
 * these events can happen inside a region which holds a map bucket lock
 * and can deadlock on it.
 *
 * Use the preemption safe inc/dec variants on RT because migrate disable
 * is preemptible on RT and preemption in the middle of the RMW operation
 * might lead to inconsistent state. Use the raw variants for non RT
 * kernels as migrate_disable() maps to preempt_disable() so the slightly
 * more expensive save operation can be avoided.
 */
static inline void bpf_disable_instrumentation(void)
{
	migrate_disable();
	if (IS_ENABLED(CONFIG_PREEMPT_RT))
		this_cpu_inc(bpf_prog_active);
	else
		__this_cpu_inc(bpf_prog_active);
}

static inline void bpf_enable_instrumentation(void)
{
	if (IS_ENABLED(CONFIG_PREEMPT_RT))
		this_cpu_dec(bpf_prog_active);
	else
		__this_cpu_dec(bpf_prog_active);
	migrate_enable();
}

1349 1350
extern const struct file_operations bpf_map_fops;
extern const struct file_operations bpf_prog_fops;
Y
Yonghong Song 已提交
1351
extern const struct file_operations bpf_iter_fops;
1352

A
Alexei Starovoitov 已提交
1353
#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
1354 1355
	extern const struct bpf_prog_ops _name ## _prog_ops; \
	extern const struct bpf_verifier_ops _name ## _verifier_ops;
1356 1357
#define BPF_MAP_TYPE(_id, _ops) \
	extern const struct bpf_map_ops _ops;
1358
#define BPF_LINK_TYPE(_id, _name)
1359 1360
#include <linux/bpf_types.h>
#undef BPF_PROG_TYPE
1361
#undef BPF_MAP_TYPE
1362
#undef BPF_LINK_TYPE
1363

1364
extern const struct bpf_prog_ops bpf_offload_prog_ops;
1365 1366 1367
extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops;
extern const struct bpf_verifier_ops xdp_analyzer_ops;

1368
struct bpf_prog *bpf_prog_get(u32 ufd);
1369
struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
1370
				       bool attach_drv);
1371
void bpf_prog_add(struct bpf_prog *prog, int i);
1372
void bpf_prog_sub(struct bpf_prog *prog, int i);
1373
void bpf_prog_inc(struct bpf_prog *prog);
1374
struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog);
1375 1376
void bpf_prog_put(struct bpf_prog *prog);

1377
void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock);
1378
void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock);
1379

1380
struct bpf_map *bpf_map_get(u32 ufd);
1381
struct bpf_map *bpf_map_get_with_uref(u32 ufd);
1382
struct bpf_map *__bpf_map_get(struct fd f);
1383 1384 1385
void bpf_map_inc(struct bpf_map *map);
void bpf_map_inc_with_uref(struct bpf_map *map);
struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map);
1386
void bpf_map_put_with_uref(struct bpf_map *map);
1387
void bpf_map_put(struct bpf_map *map);
1388 1389
void *bpf_map_area_alloc(u64 size, int numa_node);
void *bpf_map_area_mmapable_alloc(u64 size, int numa_node);
1390
void bpf_map_area_free(void *base);
1391
void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
1392 1393
int  generic_map_lookup_batch(struct bpf_map *map,
			      const union bpf_attr *attr,
1394 1395 1396 1397 1398 1399
			      union bpf_attr __user *uattr);
int  generic_map_update_batch(struct bpf_map *map,
			      const union bpf_attr *attr,
			      union bpf_attr __user *uattr);
int  generic_map_delete_batch(struct bpf_map *map,
			      const union bpf_attr *attr,
1400
			      union bpf_attr __user *uattr);
Y
Yonghong Song 已提交
1401
struct bpf_map *bpf_map_get_curr_or_next(u32 *id);
A
Alexei Starovoitov 已提交
1402
struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id);
1403

1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431
#ifdef CONFIG_MEMCG_KMEM
void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
			   int node);
void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags);
void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
				    size_t align, gfp_t flags);
#else
static inline void *
bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
		     int node)
{
	return kmalloc_node(size, flags, node);
}

static inline void *
bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
{
	return kzalloc(size, flags);
}

static inline void __percpu *
bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align,
		     gfp_t flags)
{
	return __alloc_percpu_gfp(size, align, flags);
}
#endif

1432 1433
extern int sysctl_unprivileged_bpf_disabled;

A
Alexei Starovoitov 已提交
1434 1435 1436 1437 1438
static inline bool bpf_allow_ptr_leaks(void)
{
	return perfmon_capable();
}

1439 1440 1441 1442 1443
static inline bool bpf_allow_uninit_stack(void)
{
	return perfmon_capable();
}

1444 1445 1446 1447 1448
static inline bool bpf_allow_ptr_to_map_access(void)
{
	return perfmon_capable();
}

A
Alexei Starovoitov 已提交
1449 1450 1451 1452 1453 1454 1455 1456 1457 1458
static inline bool bpf_bypass_spec_v1(void)
{
	return perfmon_capable();
}

static inline bool bpf_bypass_spec_v4(void)
{
	return perfmon_capable();
}

1459
int bpf_map_new_fd(struct bpf_map *map, int flags);
1460 1461
int bpf_prog_new_fd(struct bpf_prog *prog);

1462
void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
A
Andrii Nakryiko 已提交
1463 1464 1465 1466
		   const struct bpf_link_ops *ops, struct bpf_prog *prog);
int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer);
int bpf_link_settle(struct bpf_link_primer *primer);
void bpf_link_cleanup(struct bpf_link_primer *primer);
1467 1468 1469
void bpf_link_inc(struct bpf_link *link);
void bpf_link_put(struct bpf_link *link);
int bpf_link_new_fd(struct bpf_link *link);
1470
struct file *bpf_link_new_file(struct bpf_link *link, int *reserved_fd);
1471 1472
struct bpf_link *bpf_link_get_from_fd(u32 ufd);

1473
int bpf_obj_pin_user(u32 ufd, const char __user *pathname);
1474
int bpf_obj_get_user(const char __user *pathname, int flags);
1475

1476
#define BPF_ITER_FUNC_PREFIX "bpf_iter_"
1477
#define DEFINE_BPF_ITER_FUNC(target, args...)			\
1478 1479
	extern int bpf_iter_ ## target(args);			\
	int __init bpf_iter_ ## target(args) { return 0; }
1480

1481
struct bpf_iter_aux_info {
1482
	struct bpf_map *map;
1483 1484
};

1485 1486 1487 1488
typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *prog,
					union bpf_iter_link_info *linfo,
					struct bpf_iter_aux_info *aux);
typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *aux);
1489 1490 1491 1492
typedef void (*bpf_iter_show_fdinfo_t) (const struct bpf_iter_aux_info *aux,
					struct seq_file *seq);
typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *aux,
					 struct bpf_link_info *info);
1493 1494 1495
typedef const struct bpf_func_proto *
(*bpf_iter_get_func_proto_t)(enum bpf_func_id func_id,
			     const struct bpf_prog *prog);
1496

1497 1498 1499 1500
enum bpf_iter_feature {
	BPF_ITER_RESCHED	= BIT(0),
};

1501
#define BPF_ITER_CTX_ARG_MAX 2
1502 1503
struct bpf_iter_reg {
	const char *target;
1504 1505
	bpf_iter_attach_target_t attach_target;
	bpf_iter_detach_target_t detach_target;
1506 1507
	bpf_iter_show_fdinfo_t show_fdinfo;
	bpf_iter_fill_link_info_t fill_link_info;
1508
	bpf_iter_get_func_proto_t get_func_proto;
1509
	u32 ctx_arg_info_size;
1510
	u32 feature;
1511
	struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX];
1512
	const struct bpf_iter_seq_info *seq_info;
1513 1514
};

1515 1516 1517 1518 1519 1520
struct bpf_iter_meta {
	__bpf_md_ptr(struct seq_file *, seq);
	u64 session_id;
	u64 seq_num;
};

1521 1522 1523 1524 1525 1526 1527
struct bpf_iter__bpf_map_elem {
	__bpf_md_ptr(struct bpf_iter_meta *, meta);
	__bpf_md_ptr(struct bpf_map *, map);
	__bpf_md_ptr(void *, key);
	__bpf_md_ptr(void *, value);
};

1528
int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info);
1529
void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info);
1530
bool bpf_iter_prog_supported(struct bpf_prog *prog);
1531 1532
const struct bpf_func_proto *
bpf_iter_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
1533
int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr, struct bpf_prog *prog);
1534
int bpf_iter_new_fd(struct bpf_link *link);
Y
Yonghong Song 已提交
1535
bool bpf_link_is_iter(struct bpf_link *link);
1536 1537
struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop);
int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx);
1538 1539 1540 1541
void bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info *aux,
			      struct seq_file *seq);
int bpf_iter_map_fill_link_info(const struct bpf_iter_aux_info *aux,
				struct bpf_link_info *info);
1542

1543 1544 1545 1546
int map_set_for_each_callback_args(struct bpf_verifier_env *env,
				   struct bpf_func_state *caller,
				   struct bpf_func_state *callee);

1547 1548 1549 1550 1551 1552
int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
			   u64 flags);
int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
			    u64 flags);
1553

1554
int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
1555

1556 1557
int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
				 void *key, void *value, u64 map_flags);
1558
int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
M
Martin KaFai Lau 已提交
1559 1560
int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
				void *key, void *value, u64 map_flags);
1561
int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
1562

1563
int bpf_get_file_flag(int flags);
1564
int bpf_check_uarg_tail_zero(bpfptr_t uaddr, size_t expected_size,
1565
			     size_t actual_size);
1566

1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582
/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
 * forced to use 'long' read/writes to try to atomically copy long counters.
 * Best-effort only.  No barriers here, since it _will_ race with concurrent
 * updates from BPF programs. Called from bpf syscall and mostly used with
 * size 8 or 16 bytes, so ask compiler to inline it.
 */
static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
{
	const long *lsrc = src;
	long *ldst = dst;

	size /= sizeof(long);
	while (size--)
		*ldst++ = *lsrc++;
}

1583
/* verify correctness of eBPF program */
1584
int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, bpfptr_t uattr);
1585 1586

#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1587
void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
1588
#endif
1589

1590 1591
struct btf *bpf_get_btf_vmlinux(void);

1592
/* Map specifics */
1593
struct xdp_buff;
1594
struct sk_buff;
1595 1596
struct bpf_dtab_netdev;
struct bpf_cpu_map_entry;
1597

1598 1599 1600
void __dev_flush(void);
int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
		    struct net_device *dev_rx);
1601 1602
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
		    struct net_device *dev_rx);
1603 1604
int dev_map_enqueue_multi(struct xdp_buff *xdp, struct net_device *dev_rx,
			  struct bpf_map *map, bool exclude_ingress);
1605 1606
int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
			     struct bpf_prog *xdp_prog);
1607 1608 1609
int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
			   struct bpf_prog *xdp_prog, struct bpf_map *map,
			   bool exclude_ingress);
1610

1611
void __cpu_map_flush(void);
1612 1613
int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
		    struct net_device *dev_rx);
1614 1615
int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
			     struct sk_buff *skb);
1616

1617 1618 1619 1620 1621 1622 1623
/* Return map's numa specified by userspace */
static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
{
	return (attr->map_flags & BPF_F_NUMA_NODE) ?
		attr->numa_node : NUMA_NO_NODE;
}

1624
struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type);
1625
int array_map_alloc_check(union bpf_attr *attr);
1626

1627 1628 1629 1630
int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
			  union bpf_attr __user *uattr);
int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
			  union bpf_attr __user *uattr);
1631 1632 1633
int bpf_prog_test_run_tracing(struct bpf_prog *prog,
			      const union bpf_attr *kattr,
			      union bpf_attr __user *uattr);
1634 1635 1636
int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
				     const union bpf_attr *kattr,
				     union bpf_attr __user *uattr);
1637 1638 1639
int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
			     const union bpf_attr *kattr,
			     union bpf_attr __user *uattr);
1640 1641 1642
int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
				const union bpf_attr *kattr,
				union bpf_attr __user *uattr);
1643
bool bpf_prog_test_check_kfunc_call(u32 kfunc_id, struct module *owner);
1644 1645 1646
bool btf_ctx_access(int off, int size, enum bpf_access_type type,
		    const struct bpf_prog *prog,
		    struct bpf_insn_access_aux *info);
1647
int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf,
1648 1649 1650
		      const struct btf_type *t, int off, int size,
		      enum bpf_access_type atype,
		      u32 *next_btf_id);
1651
bool btf_struct_ids_match(struct bpf_verifier_log *log,
1652 1653
			  const struct btf *btf, u32 id, int off,
			  const struct btf *need_btf, u32 need_type_id);
1654

A
Alexei Starovoitov 已提交
1655 1656 1657 1658 1659 1660
int btf_distill_func_proto(struct bpf_verifier_log *log,
			   struct btf *btf,
			   const struct btf_type *func_proto,
			   const char *func_name,
			   struct btf_func_model *m);

1661
struct bpf_reg_state;
1662 1663
int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog,
				struct bpf_reg_state *regs);
1664 1665 1666
int btf_check_kfunc_arg_match(struct bpf_verifier_env *env,
			      const struct btf *btf, u32 func_id,
			      struct bpf_reg_state *regs);
1667 1668
int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
			  struct bpf_reg_state *reg);
1669
int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog,
1670
			 struct btf *btf, const struct btf_type *t);
1671

1672
struct bpf_prog *bpf_prog_by_id(u32 id);
1673
struct bpf_link *bpf_link_by_id(u32 id);
1674

1675
const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id);
1676
void bpf_task_storage_free(struct task_struct *task);
1677 1678 1679 1680
bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog);
const struct btf_func_model *
bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
			 const struct bpf_insn *insn);
1681
#else /* !CONFIG_BPF_SYSCALL */
1682 1683 1684 1685 1686
static inline struct bpf_prog *bpf_prog_get(u32 ufd)
{
	return ERR_PTR(-EOPNOTSUPP);
}

1687 1688
static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd,
						     enum bpf_prog_type type,
1689
						     bool attach_drv)
1690 1691 1692 1693
{
	return ERR_PTR(-EOPNOTSUPP);
}

1694
static inline void bpf_prog_add(struct bpf_prog *prog, int i)
1695 1696
{
}
1697

1698 1699 1700 1701
static inline void bpf_prog_sub(struct bpf_prog *prog, int i)
{
}

1702 1703 1704
static inline void bpf_prog_put(struct bpf_prog *prog)
{
}
1705

1706
static inline void bpf_prog_inc(struct bpf_prog *prog)
1707 1708
{
}
1709

1710 1711 1712 1713 1714 1715
static inline struct bpf_prog *__must_check
bpf_prog_inc_not_zero(struct bpf_prog *prog)
{
	return ERR_PTR(-EOPNOTSUPP);
}

1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744
static inline void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
				 const struct bpf_link_ops *ops,
				 struct bpf_prog *prog)
{
}

static inline int bpf_link_prime(struct bpf_link *link,
				 struct bpf_link_primer *primer)
{
	return -EOPNOTSUPP;
}

static inline int bpf_link_settle(struct bpf_link_primer *primer)
{
	return -EOPNOTSUPP;
}

static inline void bpf_link_cleanup(struct bpf_link_primer *primer)
{
}

static inline void bpf_link_inc(struct bpf_link *link)
{
}

static inline void bpf_link_put(struct bpf_link *link)
{
}

1745
static inline int bpf_obj_get_user(const char __user *pathname, int flags)
1746 1747 1748 1749
{
	return -EOPNOTSUPP;
}

1750 1751 1752 1753
static inline bool dev_map_can_have_prog(struct bpf_map *map)
{
	return false;
}
1754

1755
static inline void __dev_flush(void)
1756 1757
{
}
1758

1759 1760
struct xdp_buff;
struct bpf_dtab_netdev;
1761
struct bpf_cpu_map_entry;
1762

1763 1764 1765 1766 1767 1768 1769
static inline
int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
		    struct net_device *dev_rx)
{
	return 0;
}

1770
static inline
1771 1772
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
		    struct net_device *dev_rx)
1773 1774 1775 1776
{
	return 0;
}

1777 1778 1779 1780 1781 1782 1783
static inline
int dev_map_enqueue_multi(struct xdp_buff *xdp, struct net_device *dev_rx,
			  struct bpf_map *map, bool exclude_ingress)
{
	return 0;
}

1784 1785 1786 1787 1788 1789 1790 1791 1792
struct sk_buff;

static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
					   struct sk_buff *skb,
					   struct bpf_prog *xdp_prog)
{
	return 0;
}

1793 1794 1795 1796 1797 1798 1799 1800
static inline
int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
			   struct bpf_prog *xdp_prog, struct bpf_map *map,
			   bool exclude_ingress)
{
	return 0;
}

1801
static inline void __cpu_map_flush(void)
1802 1803 1804 1805 1806 1807 1808 1809 1810
{
}

static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
				  struct xdp_buff *xdp,
				  struct net_device *dev_rx)
{
	return 0;
}
1811

1812 1813 1814 1815 1816 1817
static inline int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
					   struct sk_buff *skb)
{
	return -EOPNOTSUPP;
}

1818 1819 1820 1821 1822
static inline bool cpu_map_prog_allowed(struct bpf_map *map)
{
	return false;
}

1823 1824 1825 1826 1827
static inline struct bpf_prog *bpf_prog_get_type_path(const char *name,
				enum bpf_prog_type type)
{
	return ERR_PTR(-EOPNOTSUPP);
}
1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842

static inline int bpf_prog_test_run_xdp(struct bpf_prog *prog,
					const union bpf_attr *kattr,
					union bpf_attr __user *uattr)
{
	return -ENOTSUPP;
}

static inline int bpf_prog_test_run_skb(struct bpf_prog *prog,
					const union bpf_attr *kattr,
					union bpf_attr __user *uattr)
{
	return -ENOTSUPP;
}

1843 1844 1845 1846 1847 1848 1849
static inline int bpf_prog_test_run_tracing(struct bpf_prog *prog,
					    const union bpf_attr *kattr,
					    union bpf_attr __user *uattr)
{
	return -ENOTSUPP;
}

1850 1851 1852 1853 1854 1855
static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
						   const union bpf_attr *kattr,
						   union bpf_attr __user *uattr)
{
	return -ENOTSUPP;
}
1856

1857 1858 1859 1860 1861 1862 1863
static inline int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
					      const union bpf_attr *kattr,
					      union bpf_attr __user *uattr)
{
	return -ENOTSUPP;
}

1864 1865
static inline bool bpf_prog_test_check_kfunc_call(u32 kfunc_id,
						  struct module *owner)
1866 1867 1868 1869
{
	return false;
}

1870 1871 1872
static inline void bpf_map_put(struct bpf_map *map)
{
}
1873 1874 1875 1876 1877

static inline struct bpf_prog *bpf_prog_by_id(u32 id)
{
	return ERR_PTR(-ENOTSUPP);
}
1878 1879 1880 1881 1882 1883

static inline const struct bpf_func_proto *
bpf_base_func_proto(enum bpf_func_id func_id)
{
	return NULL;
}
1884 1885 1886 1887

static inline void bpf_task_storage_free(struct task_struct *task)
{
}
1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899

static inline bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog)
{
	return false;
}

static inline const struct btf_func_model *
bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
			 const struct bpf_insn *insn)
{
	return NULL;
}
1900
#endif /* CONFIG_BPF_SYSCALL */
1901

1902 1903 1904
void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
			  struct btf_mod_pair *used_btfs, u32 len);

1905 1906 1907 1908 1909 1910
static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
						 enum bpf_prog_type type)
{
	return bpf_prog_get_type_dev(ufd, type, false);
}

1911 1912 1913
void __bpf_free_used_maps(struct bpf_prog_aux *aux,
			  struct bpf_map **used_maps, u32 len);

1914 1915
bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool);

1916 1917
int bpf_prog_offload_compile(struct bpf_prog *prog);
void bpf_prog_offload_destroy(struct bpf_prog *prog);
1918 1919
int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
			       struct bpf_prog *prog);
1920

1921 1922
int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map);

1923 1924 1925 1926 1927 1928 1929
int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value);
int bpf_map_offload_update_elem(struct bpf_map *map,
				void *key, void *value, u64 flags);
int bpf_map_offload_delete_elem(struct bpf_map *map, void *key);
int bpf_map_offload_get_next_key(struct bpf_map *map,
				 void *key, void *next_key);

1930
bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map);
1931

1932
struct bpf_offload_dev *
1933
bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv);
1934
void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev);
1935
void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev);
1936 1937 1938 1939
int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
				    struct net_device *netdev);
void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
				       struct net_device *netdev);
1940
bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev);
1941

1942 1943 1944
#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);

1945
static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux)
1946
{
1947
	return aux->offload_requested;
1948
}
1949 1950 1951 1952 1953 1954 1955 1956

static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
{
	return unlikely(map->ops == &bpf_map_offload_ops);
}

struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr);
void bpf_map_offload_map_free(struct bpf_map *map);
1957 1958 1959
int bpf_prog_test_run_syscall(struct bpf_prog *prog,
			      const union bpf_attr *kattr,
			      union bpf_attr __user *uattr);
1960 1961 1962 1963 1964 1965

int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags);
void sock_map_unhash(struct sock *sk);
void sock_map_close(struct sock *sk, long timeout);
1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976
#else
static inline int bpf_prog_offload_init(struct bpf_prog *prog,
					union bpf_attr *attr)
{
	return -EOPNOTSUPP;
}

static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
{
	return false;
}
1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990

static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
{
	return false;
}

static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
{
	return ERR_PTR(-EOPNOTSUPP);
}

static inline void bpf_map_offload_map_free(struct bpf_map *map)
{
}
1991 1992 1993 1994 1995 1996 1997

static inline int bpf_prog_test_run_syscall(struct bpf_prog *prog,
					    const union bpf_attr *kattr,
					    union bpf_attr __user *uattr)
{
	return -ENOTSUPP;
}
1998

1999
#ifdef CONFIG_BPF_SYSCALL
2000 2001
static inline int sock_map_get_from_fd(const union bpf_attr *attr,
				       struct bpf_prog *prog)
2002 2003 2004
{
	return -EINVAL;
}
2005 2006 2007 2008 2009 2010

static inline int sock_map_prog_detach(const union bpf_attr *attr,
				       enum bpf_prog_type ptype)
{
	return -EOPNOTSUPP;
}
2011 2012 2013 2014 2015 2016

static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value,
					   u64 flags)
{
	return -EOPNOTSUPP;
}
2017 2018
#endif /* CONFIG_BPF_SYSCALL */
#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
2019

2020 2021 2022 2023 2024 2025 2026 2027 2028 2029
#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
void bpf_sk_reuseport_detach(struct sock *sk);
int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
				       void *value);
int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
				       void *value, u64 map_flags);
#else
static inline void bpf_sk_reuseport_detach(struct sock *sk)
{
}
2030

2031
#ifdef CONFIG_BPF_SYSCALL
2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046
static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map,
						     void *key, void *value)
{
	return -EOPNOTSUPP;
}

static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map,
						     void *key, void *value,
						     u64 map_flags)
{
	return -EOPNOTSUPP;
}
#endif /* CONFIG_BPF_SYSCALL */
#endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */

2047
/* verifier prototypes for helper functions called from eBPF programs */
2048 2049 2050
extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
extern const struct bpf_func_proto bpf_map_update_elem_proto;
extern const struct bpf_func_proto bpf_map_delete_elem_proto;
M
Mauricio Vasquez B 已提交
2051 2052 2053
extern const struct bpf_func_proto bpf_map_push_elem_proto;
extern const struct bpf_func_proto bpf_map_pop_elem_proto;
extern const struct bpf_func_proto bpf_map_peek_elem_proto;
2054

2055
extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
2056
extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
2057
extern const struct bpf_func_proto bpf_get_numa_node_id_proto;
2058
extern const struct bpf_func_proto bpf_tail_call_proto;
2059
extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
2060
extern const struct bpf_func_proto bpf_ktime_get_boot_ns_proto;
2061 2062 2063
extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
extern const struct bpf_func_proto bpf_get_current_comm_proto;
2064
extern const struct bpf_func_proto bpf_get_stackid_proto;
Y
Yonghong Song 已提交
2065
extern const struct bpf_func_proto bpf_get_stack_proto;
2066
extern const struct bpf_func_proto bpf_get_task_stack_proto;
2067 2068
extern const struct bpf_func_proto bpf_get_stackid_proto_pe;
extern const struct bpf_func_proto bpf_get_stack_proto_pe;
2069
extern const struct bpf_func_proto bpf_sock_map_update_proto;
2070
extern const struct bpf_func_proto bpf_sock_hash_update_proto;
2071
extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
2072
extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto;
2073 2074 2075 2076
extern const struct bpf_func_proto bpf_msg_redirect_hash_proto;
extern const struct bpf_func_proto bpf_msg_redirect_map_proto;
extern const struct bpf_func_proto bpf_sk_redirect_hash_proto;
extern const struct bpf_func_proto bpf_sk_redirect_map_proto;
2077 2078
extern const struct bpf_func_proto bpf_spin_lock_proto;
extern const struct bpf_func_proto bpf_spin_unlock_proto;
2079
extern const struct bpf_func_proto bpf_get_local_storage_proto;
2080 2081
extern const struct bpf_func_proto bpf_strtol_proto;
extern const struct bpf_func_proto bpf_strtoul_proto;
2082
extern const struct bpf_func_proto bpf_tcp_sock_proto;
M
Martin KaFai Lau 已提交
2083
extern const struct bpf_func_proto bpf_jiffies64_proto;
2084
extern const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto;
2085
extern const struct bpf_func_proto bpf_event_output_data_proto;
2086 2087 2088 2089 2090
extern const struct bpf_func_proto bpf_ringbuf_output_proto;
extern const struct bpf_func_proto bpf_ringbuf_reserve_proto;
extern const struct bpf_func_proto bpf_ringbuf_submit_proto;
extern const struct bpf_func_proto bpf_ringbuf_discard_proto;
extern const struct bpf_func_proto bpf_ringbuf_query_proto;
2091
extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto;
2092 2093 2094
extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto;
extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto;
extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto;
2095
extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto;
2096
extern const struct bpf_func_proto bpf_copy_from_user_proto;
A
Alan Maguire 已提交
2097
extern const struct bpf_func_proto bpf_snprintf_btf_proto;
F
Florent Revest 已提交
2098
extern const struct bpf_func_proto bpf_snprintf_proto;
H
Hao Luo 已提交
2099
extern const struct bpf_func_proto bpf_per_cpu_ptr_proto;
H
Hao Luo 已提交
2100
extern const struct bpf_func_proto bpf_this_cpu_ptr_proto;
2101
extern const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto;
2102
extern const struct bpf_func_proto bpf_sock_from_file_proto;
2103
extern const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto;
2104 2105
extern const struct bpf_func_proto bpf_task_storage_get_proto;
extern const struct bpf_func_proto bpf_task_storage_delete_proto;
2106
extern const struct bpf_func_proto bpf_for_each_map_elem_proto;
2107
extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto;
2108 2109
extern const struct bpf_func_proto bpf_sk_setsockopt_proto;
extern const struct bpf_func_proto bpf_sk_getsockopt_proto;
2110

2111 2112 2113
const struct bpf_func_proto *tracing_prog_func_proto(
  enum bpf_func_id func_id, const struct bpf_prog *prog);

2114 2115 2116
/* Shared helpers among cBPF and eBPF. */
void bpf_user_rnd_init_once(void);
u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
2117
u64 bpf_get_raw_cpu_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
2118

2119
#if defined(CONFIG_NET)
2120 2121 2122
bool bpf_sock_common_is_valid_access(int off, int size,
				     enum bpf_access_type type,
				     struct bpf_insn_access_aux *info);
2123 2124 2125 2126 2127 2128 2129 2130
bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
			      struct bpf_insn_access_aux *info);
u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
				const struct bpf_insn *si,
				struct bpf_insn *insn_buf,
				struct bpf_prog *prog,
				u32 *target_size);
#else
2131 2132 2133 2134 2135 2136
static inline bool bpf_sock_common_is_valid_access(int off, int size,
						   enum bpf_access_type type,
						   struct bpf_insn_access_aux *info)
{
	return false;
}
2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152
static inline bool bpf_sock_is_valid_access(int off, int size,
					    enum bpf_access_type type,
					    struct bpf_insn_access_aux *info)
{
	return false;
}
static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
					      const struct bpf_insn *si,
					      struct bpf_insn *insn_buf,
					      struct bpf_prog *prog,
					      u32 *target_size)
{
	return 0;
}
#endif

2153
#ifdef CONFIG_INET
A
Alexei Starovoitov 已提交
2154 2155 2156 2157
struct sk_reuseport_kern {
	struct sk_buff *skb;
	struct sock *sk;
	struct sock *selected_sk;
2158
	struct sock *migrating_sk;
A
Alexei Starovoitov 已提交
2159 2160 2161 2162 2163
	void *data_end;
	u32 hash;
	u32 reuseport_id;
	bool bind_inany;
};
2164 2165 2166 2167 2168 2169 2170 2171
bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
				  struct bpf_insn_access_aux *info);

u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
				    const struct bpf_insn *si,
				    struct bpf_insn *insn_buf,
				    struct bpf_prog *prog,
				    u32 *target_size);
2172 2173 2174 2175 2176 2177 2178 2179 2180

bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
				  struct bpf_insn_access_aux *info);

u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
				    const struct bpf_insn *si,
				    struct bpf_insn *insn_buf,
				    struct bpf_prog *prog,
				    u32 *target_size);
2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196
#else
static inline bool bpf_tcp_sock_is_valid_access(int off, int size,
						enum bpf_access_type type,
						struct bpf_insn_access_aux *info)
{
	return false;
}

static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
						  const struct bpf_insn *si,
						  struct bpf_insn *insn_buf,
						  struct bpf_prog *prog,
						  u32 *target_size)
{
	return 0;
}
2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211
static inline bool bpf_xdp_sock_is_valid_access(int off, int size,
						enum bpf_access_type type,
						struct bpf_insn_access_aux *info)
{
	return false;
}

static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
						  const struct bpf_insn *si,
						  struct bpf_insn *insn_buf,
						  struct bpf_prog *prog,
						  u32 *target_size)
{
	return 0;
}
2212 2213
#endif /* CONFIG_INET */

2214
enum bpf_text_poke_type {
2215 2216
	BPF_MOD_CALL,
	BPF_MOD_JUMP,
2217
};
2218

2219 2220 2221
int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
		       void *addr1, void *addr2);

J
Jiri Olsa 已提交
2222
struct btf_id_set;
2223
bool btf_id_set_contains(const struct btf_id_set *set, u32 id);
J
Jiri Olsa 已提交
2224

2225 2226
#define MAX_BPRINTF_VARARGS		12

2227 2228 2229
int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
			u32 **bin_buf, u32 num_args);
void bpf_bprintf_cleanup(void);
2230

2231
#endif /* _LINUX_BPF_H */