filter.h 29.5 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
L
Linus Torvalds 已提交
2 3 4 5 6 7
/*
 * Linux Socket Filter Data Structures
 */
#ifndef __LINUX_FILTER_H__
#define __LINUX_FILTER_H__

8 9
#include <stdarg.h>

A
Arun Sharma 已提交
10
#include <linux/atomic.h>
11
#include <linux/refcount.h>
12
#include <linux/compat.h>
13
#include <linux/skbuff.h>
14 15
#include <linux/linkage.h>
#include <linux/printk.h>
16
#include <linux/workqueue.h>
17
#include <linux/sched.h>
18
#include <linux/capability.h>
19
#include <linux/cryptohash.h>
20
#include <linux/set_memory.h>
21
#include <linux/kallsyms.h>
22
#include <linux/if_vlan.h>
23

24
#include <net/sch_generic.h>
25 26

#include <uapi/linux/filter.h>
27
#include <uapi/linux/bpf.h>
28 29 30 31

struct sk_buff;
struct sock;
struct seccomp_data;
32
struct bpf_prog_aux;
33
struct xdp_rxq_info;
34
struct xdp_buff;
35
struct sock_reuseport;
36

37 38 39 40 41 42 43 44 45 46 47 48 49 50 51
/* ArgX, context and stack frame pointer register positions. Note,
 * Arg1, Arg2, Arg3, etc are used as argument mappings of function
 * calls in BPF_CALL instruction.
 */
#define BPF_REG_ARG1	BPF_REG_1
#define BPF_REG_ARG2	BPF_REG_2
#define BPF_REG_ARG3	BPF_REG_3
#define BPF_REG_ARG4	BPF_REG_4
#define BPF_REG_ARG5	BPF_REG_5
#define BPF_REG_CTX	BPF_REG_6
#define BPF_REG_FP	BPF_REG_10

/* Additional register mappings for converted user programs. */
#define BPF_REG_A	BPF_REG_0
#define BPF_REG_X	BPF_REG_7
52 53 54
#define BPF_REG_TMP	BPF_REG_2	/* scratch reg */
#define BPF_REG_D	BPF_REG_8	/* data, callee-saved */
#define BPF_REG_H	BPF_REG_9	/* hlen, callee-saved */
55

56 57 58 59 60 61 62 63 64
/* Kernel hidden auxiliary/helper register for hardening step.
 * Only used by eBPF JITs. It's nothing more than a temporary
 * register that JITs use internally, only that here it's part
 * of eBPF instructions that have been rewritten for blinding
 * constants. See JIT pre-step in bpf_jit_blind_constants().
 */
#define BPF_REG_AX		MAX_BPF_REG
#define MAX_BPF_JIT_REG		(MAX_BPF_REG + 1)

65 66 67
/* unused opcode to mark special call to bpf_tail_call() helper */
#define BPF_TAIL_CALL	0xf0

68 69 70
/* unused opcode to mark call to interpreter with arguments */
#define BPF_CALL_ARGS	0xe0

71 72 73 74 75 76
/* As per nm, we expose JITed images as text (code) section for
 * kallsyms. That way, tools like perf can find it to match
 * addresses.
 */
#define BPF_SYM_ELF_TYPE	't'

77 78 79
/* BPF program can access up to 512 bytes of stack space. */
#define MAX_BPF_STACK	512

80 81
/* Helper macros for filter block array initializers. */

82
/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
83

84
#define BPF_ALU64_REG(OP, DST, SRC)				\
85
	((struct bpf_insn) {					\
86
		.code  = BPF_ALU64 | BPF_OP(OP) | BPF_X,	\
87 88
		.dst_reg = DST,					\
		.src_reg = SRC,					\
89 90 91
		.off   = 0,					\
		.imm   = 0 })

92
#define BPF_ALU32_REG(OP, DST, SRC)				\
93
	((struct bpf_insn) {					\
94
		.code  = BPF_ALU | BPF_OP(OP) | BPF_X,		\
95 96
		.dst_reg = DST,					\
		.src_reg = SRC,					\
97 98 99
		.off   = 0,					\
		.imm   = 0 })

100
/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
101

102
#define BPF_ALU64_IMM(OP, DST, IMM)				\
103
	((struct bpf_insn) {					\
104
		.code  = BPF_ALU64 | BPF_OP(OP) | BPF_K,	\
105 106
		.dst_reg = DST,					\
		.src_reg = 0,					\
107 108 109
		.off   = 0,					\
		.imm   = IMM })

110
#define BPF_ALU32_IMM(OP, DST, IMM)				\
111
	((struct bpf_insn) {					\
112
		.code  = BPF_ALU | BPF_OP(OP) | BPF_K,		\
113 114
		.dst_reg = DST,					\
		.src_reg = 0,					\
115 116 117 118 119
		.off   = 0,					\
		.imm   = IMM })

/* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */

120
#define BPF_ENDIAN(TYPE, DST, LEN)				\
121
	((struct bpf_insn) {					\
122
		.code  = BPF_ALU | BPF_END | BPF_SRC(TYPE),	\
123 124
		.dst_reg = DST,					\
		.src_reg = 0,					\
125 126 127
		.off   = 0,					\
		.imm   = LEN })

128
/* Short form of mov, dst_reg = src_reg */
129

130
#define BPF_MOV64_REG(DST, SRC)					\
131
	((struct bpf_insn) {					\
132
		.code  = BPF_ALU64 | BPF_MOV | BPF_X,		\
133 134
		.dst_reg = DST,					\
		.src_reg = SRC,					\
135 136 137
		.off   = 0,					\
		.imm   = 0 })

138
#define BPF_MOV32_REG(DST, SRC)					\
139
	((struct bpf_insn) {					\
140
		.code  = BPF_ALU | BPF_MOV | BPF_X,		\
141 142
		.dst_reg = DST,					\
		.src_reg = SRC,					\
143 144 145
		.off   = 0,					\
		.imm   = 0 })

146
/* Short form of mov, dst_reg = imm32 */
147

148
#define BPF_MOV64_IMM(DST, IMM)					\
149
	((struct bpf_insn) {					\
150
		.code  = BPF_ALU64 | BPF_MOV | BPF_K,		\
151 152
		.dst_reg = DST,					\
		.src_reg = 0,					\
153 154 155
		.off   = 0,					\
		.imm   = IMM })

156
#define BPF_MOV32_IMM(DST, IMM)					\
157
	((struct bpf_insn) {					\
158
		.code  = BPF_ALU | BPF_MOV | BPF_K,		\
159 160
		.dst_reg = DST,					\
		.src_reg = 0,					\
161 162 163
		.off   = 0,					\
		.imm   = IMM })

164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
/* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
#define BPF_LD_IMM64(DST, IMM)					\
	BPF_LD_IMM64_RAW(DST, 0, IMM)

#define BPF_LD_IMM64_RAW(DST, SRC, IMM)				\
	((struct bpf_insn) {					\
		.code  = BPF_LD | BPF_DW | BPF_IMM,		\
		.dst_reg = DST,					\
		.src_reg = SRC,					\
		.off   = 0,					\
		.imm   = (__u32) (IMM) }),			\
	((struct bpf_insn) {					\
		.code  = 0, /* zero is reserved opcode */	\
		.dst_reg = 0,					\
		.src_reg = 0,					\
		.off   = 0,					\
		.imm   = ((__u64) (IMM)) >> 32 })

182 183 184 185
/* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
#define BPF_LD_MAP_FD(DST, MAP_FD)				\
	BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)

186
/* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */
187

188
#define BPF_MOV64_RAW(TYPE, DST, SRC, IMM)			\
189
	((struct bpf_insn) {					\
190
		.code  = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE),	\
191 192
		.dst_reg = DST,					\
		.src_reg = SRC,					\
193 194 195
		.off   = 0,					\
		.imm   = IMM })

196
#define BPF_MOV32_RAW(TYPE, DST, SRC, IMM)			\
197
	((struct bpf_insn) {					\
198
		.code  = BPF_ALU | BPF_MOV | BPF_SRC(TYPE),	\
199 200
		.dst_reg = DST,					\
		.src_reg = SRC,					\
201 202 203
		.off   = 0,					\
		.imm   = IMM })

204
/* Direct packet access, R0 = *(uint *) (skb->data + imm32) */
205

206
#define BPF_LD_ABS(SIZE, IMM)					\
207
	((struct bpf_insn) {					\
208
		.code  = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS,	\
209 210
		.dst_reg = 0,					\
		.src_reg = 0,					\
211
		.off   = 0,					\
212
		.imm   = IMM })
213

214
/* Indirect packet access, R0 = *(uint *) (skb->data + src_reg + imm32) */
215

216
#define BPF_LD_IND(SIZE, SRC, IMM)				\
217
	((struct bpf_insn) {					\
218
		.code  = BPF_LD | BPF_SIZE(SIZE) | BPF_IND,	\
219 220
		.dst_reg = 0,					\
		.src_reg = SRC,					\
221
		.off   = 0,					\
222
		.imm   = IMM })
223

224
/* Memory load, dst_reg = *(uint *) (src_reg + off16) */
225

226
#define BPF_LDX_MEM(SIZE, DST, SRC, OFF)			\
227
	((struct bpf_insn) {					\
228
		.code  = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM,	\
229 230
		.dst_reg = DST,					\
		.src_reg = SRC,					\
231 232 233
		.off   = OFF,					\
		.imm   = 0 })

234 235 236
/* Memory store, *(uint *) (dst_reg + off16) = src_reg */

#define BPF_STX_MEM(SIZE, DST, SRC, OFF)			\
237
	((struct bpf_insn) {					\
238
		.code  = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM,	\
239 240
		.dst_reg = DST,					\
		.src_reg = SRC,					\
241 242 243
		.off   = OFF,					\
		.imm   = 0 })

244 245 246 247 248 249 250 251 252 253
/* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */

#define BPF_STX_XADD(SIZE, DST, SRC, OFF)			\
	((struct bpf_insn) {					\
		.code  = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD,	\
		.dst_reg = DST,					\
		.src_reg = SRC,					\
		.off   = OFF,					\
		.imm   = 0 })

254 255 256
/* Memory store, *(uint *) (dst_reg + off16) = imm32 */

#define BPF_ST_MEM(SIZE, DST, OFF, IMM)				\
257
	((struct bpf_insn) {					\
258 259 260 261 262 263 264
		.code  = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM,	\
		.dst_reg = DST,					\
		.src_reg = 0,					\
		.off   = OFF,					\
		.imm   = IMM })

/* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */
265

266
#define BPF_JMP_REG(OP, DST, SRC, OFF)				\
267
	((struct bpf_insn) {					\
268
		.code  = BPF_JMP | BPF_OP(OP) | BPF_X,		\
269 270
		.dst_reg = DST,					\
		.src_reg = SRC,					\
271 272 273
		.off   = OFF,					\
		.imm   = 0 })

274
/* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
275

276
#define BPF_JMP_IMM(OP, DST, IMM, OFF)				\
277
	((struct bpf_insn) {					\
278
		.code  = BPF_JMP | BPF_OP(OP) | BPF_K,		\
279 280
		.dst_reg = DST,					\
		.src_reg = 0,					\
281 282 283
		.off   = OFF,					\
		.imm   = IMM })

284 285 286 287 288 289 290 291 292 293
/* Unconditional jumps, goto pc + off16 */

#define BPF_JMP_A(OFF)						\
	((struct bpf_insn) {					\
		.code  = BPF_JMP | BPF_JA,			\
		.dst_reg = 0,					\
		.src_reg = 0,					\
		.off   = OFF,					\
		.imm   = 0 })

294 295 296 297 298 299 300 301 302 303
/* Relative call */

#define BPF_CALL_REL(TGT)					\
	((struct bpf_insn) {					\
		.code  = BPF_JMP | BPF_CALL,			\
		.dst_reg = 0,					\
		.src_reg = BPF_PSEUDO_CALL,			\
		.off   = 0,					\
		.imm   = TGT })

304 305
/* Function call */

306 307 308
#define BPF_CAST_CALL(x)					\
		((u64 (*)(u64, u64, u64, u64, u64))(x))

309
#define BPF_EMIT_CALL(FUNC)					\
310
	((struct bpf_insn) {					\
311
		.code  = BPF_JMP | BPF_CALL,			\
312 313
		.dst_reg = 0,					\
		.src_reg = 0,					\
314 315 316 317 318
		.off   = 0,					\
		.imm   = ((FUNC) - __bpf_call_base) })

/* Raw code statement block */

319
#define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM)			\
320
	((struct bpf_insn) {					\
321
		.code  = CODE,					\
322 323
		.dst_reg = DST,					\
		.src_reg = SRC,					\
324 325 326 327 328 329
		.off   = OFF,					\
		.imm   = IMM })

/* Program exit */

#define BPF_EXIT_INSN()						\
330
	((struct bpf_insn) {					\
331
		.code  = BPF_JMP | BPF_EXIT,			\
332 333
		.dst_reg = 0,					\
		.src_reg = 0,					\
334 335 336
		.off   = 0,					\
		.imm   = 0 })

337 338 339 340 341 342 343 344
/* Internal classic blocks for direct assignment */

#define __BPF_STMT(CODE, K)					\
	((struct sock_filter) BPF_STMT(CODE, K))

#define __BPF_JUMP(CODE, K, JT, JF)				\
	((struct sock_filter) BPF_JUMP(CODE, K, JT, JF))

345 346 347 348 349 350 351 352 353 354 355 356 357 358 359
#define bytes_to_bpf_size(bytes)				\
({								\
	int bpf_size = -EINVAL;					\
								\
	if (bytes == sizeof(u8))				\
		bpf_size = BPF_B;				\
	else if (bytes == sizeof(u16))				\
		bpf_size = BPF_H;				\
	else if (bytes == sizeof(u32))				\
		bpf_size = BPF_W;				\
	else if (bytes == sizeof(u64))				\
		bpf_size = BPF_DW;				\
								\
	bpf_size;						\
})
360

361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376
#define bpf_size_to_bytes(bpf_size)				\
({								\
	int bytes = -EINVAL;					\
								\
	if (bpf_size == BPF_B)					\
		bytes = sizeof(u8);				\
	else if (bpf_size == BPF_H)				\
		bytes = sizeof(u16);				\
	else if (bpf_size == BPF_W)				\
		bytes = sizeof(u32);				\
	else if (bpf_size == BPF_DW)				\
		bytes = sizeof(u64);				\
								\
	bytes;							\
})

377 378 379 380 381 382 383 384 385 386 387 388 389 390
#define BPF_SIZEOF(type)					\
	({							\
		const int __size = bytes_to_bpf_size(sizeof(type)); \
		BUILD_BUG_ON(__size < 0);			\
		__size;						\
	})

#define BPF_FIELD_SIZEOF(type, field)				\
	({							\
		const int __size = bytes_to_bpf_size(FIELD_SIZEOF(type, field)); \
		BUILD_BUG_ON(__size < 0);			\
		__size;						\
	})

391 392
#define BPF_LDST_BYTES(insn)					\
	({							\
393
		const int __size = bpf_size_to_bytes(BPF_SIZE((insn)->code)); \
394 395 396 397
		WARN_ON(__size < 0);				\
		__size;						\
	})

398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447
#define __BPF_MAP_0(m, v, ...) v
#define __BPF_MAP_1(m, v, t, a, ...) m(t, a)
#define __BPF_MAP_2(m, v, t, a, ...) m(t, a), __BPF_MAP_1(m, v, __VA_ARGS__)
#define __BPF_MAP_3(m, v, t, a, ...) m(t, a), __BPF_MAP_2(m, v, __VA_ARGS__)
#define __BPF_MAP_4(m, v, t, a, ...) m(t, a), __BPF_MAP_3(m, v, __VA_ARGS__)
#define __BPF_MAP_5(m, v, t, a, ...) m(t, a), __BPF_MAP_4(m, v, __VA_ARGS__)

#define __BPF_REG_0(...) __BPF_PAD(5)
#define __BPF_REG_1(...) __BPF_MAP(1, __VA_ARGS__), __BPF_PAD(4)
#define __BPF_REG_2(...) __BPF_MAP(2, __VA_ARGS__), __BPF_PAD(3)
#define __BPF_REG_3(...) __BPF_MAP(3, __VA_ARGS__), __BPF_PAD(2)
#define __BPF_REG_4(...) __BPF_MAP(4, __VA_ARGS__), __BPF_PAD(1)
#define __BPF_REG_5(...) __BPF_MAP(5, __VA_ARGS__)

#define __BPF_MAP(n, ...) __BPF_MAP_##n(__VA_ARGS__)
#define __BPF_REG(n, ...) __BPF_REG_##n(__VA_ARGS__)

#define __BPF_CAST(t, a)						       \
	(__force t)							       \
	(__force							       \
	 typeof(__builtin_choose_expr(sizeof(t) == sizeof(unsigned long),      \
				      (unsigned long)0, (t)0))) a
#define __BPF_V void
#define __BPF_N

#define __BPF_DECL_ARGS(t, a) t   a
#define __BPF_DECL_REGS(t, a) u64 a

#define __BPF_PAD(n)							       \
	__BPF_MAP(n, __BPF_DECL_ARGS, __BPF_N, u64, __ur_1, u64, __ur_2,       \
		  u64, __ur_3, u64, __ur_4, u64, __ur_5)

#define BPF_CALL_x(x, name, ...)					       \
	static __always_inline						       \
	u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__));   \
	u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__));	       \
	u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__))	       \
	{								       \
		return ____##name(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\
	}								       \
	static __always_inline						       \
	u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__))

#define BPF_CALL_0(name, ...)	BPF_CALL_x(0, name, __VA_ARGS__)
#define BPF_CALL_1(name, ...)	BPF_CALL_x(1, name, __VA_ARGS__)
#define BPF_CALL_2(name, ...)	BPF_CALL_x(2, name, __VA_ARGS__)
#define BPF_CALL_3(name, ...)	BPF_CALL_x(3, name, __VA_ARGS__)
#define BPF_CALL_4(name, ...)	BPF_CALL_x(4, name, __VA_ARGS__)
#define BPF_CALL_5(name, ...)	BPF_CALL_x(5, name, __VA_ARGS__)

448 449 450 451 452 453 454 455 456 457 458 459
#define bpf_ctx_range(TYPE, MEMBER)						\
	offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1
#define bpf_ctx_range_till(TYPE, MEMBER1, MEMBER2)				\
	offsetof(TYPE, MEMBER1) ... offsetofend(TYPE, MEMBER2) - 1

#define bpf_target_off(TYPE, MEMBER, SIZE, PTR_SIZE)				\
	({									\
		BUILD_BUG_ON(FIELD_SIZEOF(TYPE, MEMBER) != (SIZE));		\
		*(PTR_SIZE) = (SIZE);						\
		offsetof(TYPE, MEMBER);						\
	})

460 461
#ifdef CONFIG_COMPAT
/* A struct sock_filter is architecture independent. */
462 463
struct compat_sock_fprog {
	u16		len;
464
	compat_uptr_t	filter;	/* struct sock_filter * */
465 466 467
};
#endif

468 469 470 471 472
struct sock_fprog_kern {
	u16			len;
	struct sock_filter	*filter;
};

473
struct bpf_binary_header {
474
	u32 pages;
475 476
	/* Some arches need word alignment for their instructions */
	u8 image[] __aligned(4);
477 478
};

479
struct bpf_prog {
480
	u16			pages;		/* Number of allocated pages */
481
	u16			jited:1,	/* Is our filter JIT'ed? */
482
				jit_requested:1,/* archs need to JIT the prog */
483
				undo_set_mem:1,	/* Passed set_memory_ro() checkpoint */
484
				gpl_compatible:1, /* Is filter GPL compatible? */
485
				cb_access:1,	/* Is control block accessed? */
486
				dst_needed:1,	/* Do we need dst entry? */
487 488
				blinded:1,	/* Was blinded */
				is_func:1,	/* program is a bpf function */
Y
Yonghong Song 已提交
489 490
				kprobe_override:1, /* Do we override a kprobe? */
				has_callchain_buf:1; /* callchain buffer allocated? */
491
	enum bpf_prog_type	type;		/* Type of BPF program */
492
	enum bpf_attach_type	expected_attach_type; /* For some prog types */
493
	u32			len;		/* Number of filter blocks */
494
	u32			jited_len;	/* Size of jited insns in bytes */
495
	u8			tag[BPF_TAG_SIZE];
496
	struct bpf_prog_aux	*aux;		/* Auxiliary fields */
497
	struct sock_fprog_kern	*orig_prog;	/* Original BPF program */
498 499
	unsigned int		(*bpf_func)(const void *ctx,
					    const struct bpf_insn *insn);
500
	/* Instructions for interpreter */
501
	union {
502
		struct sock_filter	insns[0];
503
		struct bpf_insn		insnsi[0];
504
	};
505 506
};

507
struct sk_filter {
508
	refcount_t	refcnt;
509 510 511 512
	struct rcu_head	rcu;
	struct bpf_prog	*prog;
};

513
#define BPF_PROG_RUN(filter, ctx)  (*(filter)->bpf_func)(ctx, (filter)->insnsi)
514

515 516
#define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN

517 518
struct bpf_skb_data_end {
	struct qdisc_skb_cb qdisc_cb;
519
	void *data_meta;
520 521 522
	void *data_end;
};

523 524 525 526 527
struct bpf_redirect_info {
	u32 ifindex;
	u32 flags;
	struct bpf_map *map;
	struct bpf_map *map_to_flush;
528
	u32 kern_flags;
529 530 531 532
};

DECLARE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info);

533 534 535
/* flags for bpf_redirect_info kern_flags */
#define BPF_RI_F_RF_NO_DIRECT	BIT(0)	/* no napi_direct on return_frame */

536 537 538 539 540
/* Compute the linear packet data range [data, data_end) which
 * will be accessed by various program types (cls_bpf, act_bpf,
 * lwt, ...). Subsystems allowing direct data access must (!)
 * ensure that cb[] area can be written to when BPF program is
 * invoked (otherwise cb[] save/restore is necessary).
541
 */
542
static inline void bpf_compute_data_pointers(struct sk_buff *skb)
543 544 545 546
{
	struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;

	BUILD_BUG_ON(sizeof(*cb) > FIELD_SIZEOF(struct sk_buff, cb));
547 548
	cb->data_meta = skb->data - skb_metadata_len(skb);
	cb->data_end  = skb->data + skb_headlen(skb);
549 550
}

551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569
static inline u8 *bpf_skb_cb(struct sk_buff *skb)
{
	/* eBPF programs may read/write skb->cb[] area to transfer meta
	 * data between tail calls. Since this also needs to work with
	 * tc, that scratch memory is mapped to qdisc_skb_cb's data area.
	 *
	 * In some socket filter cases, the cb unfortunately needs to be
	 * saved/restored so that protocol specific skb->cb[] data won't
	 * be lost. In any case, due to unpriviledged eBPF programs
	 * attached to sockets, we need to clear the bpf_skb_cb() area
	 * to not leak previous contents to user space.
	 */
	BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) != BPF_SKB_CB_LEN);
	BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) !=
		     FIELD_SIZEOF(struct qdisc_skb_cb, data));

	return qdisc_skb_cb(skb)->data;
}

570 571 572
static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
				       struct sk_buff *skb)
{
573 574
	u8 *cb_data = bpf_skb_cb(skb);
	u8 cb_saved[BPF_SKB_CB_LEN];
575 576 577
	u32 res;

	if (unlikely(prog->cb_access)) {
578 579
		memcpy(cb_saved, cb_data, sizeof(cb_saved));
		memset(cb_data, 0, sizeof(cb_saved));
580 581 582 583 584
	}

	res = BPF_PROG_RUN(prog, skb);

	if (unlikely(prog->cb_access))
585
		memcpy(cb_data, cb_saved, sizeof(cb_saved));
586 587 588 589 590 591 592

	return res;
}

static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
					struct sk_buff *skb)
{
593
	u8 *cb_data = bpf_skb_cb(skb);
594 595

	if (unlikely(prog->cb_access))
596 597
		memset(cb_data, 0, BPF_SKB_CB_LEN);

598 599 600
	return BPF_PROG_RUN(prog, skb);
}

601 602
static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
					    struct xdp_buff *xdp)
603
{
604 605 606 607 608 609 610
	/* Caller needs to hold rcu_read_lock() (!), otherwise program
	 * can be released while still running, or map elements could be
	 * freed early while still having concurrent users. XDP fastpath
	 * already takes rcu_read_lock() when fetching the program, so
	 * it's not necessary here anymore.
	 */
	return BPF_PROG_RUN(prog, xdp);
611 612
}

613 614 615 616 617
static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog)
{
	return prog->len * sizeof(struct bpf_insn);
}

618
static inline u32 bpf_prog_tag_scratch_size(const struct bpf_prog *prog)
619 620 621 622 623
{
	return round_up(bpf_prog_insn_size(prog) +
			sizeof(__be64) + 1, SHA_MESSAGE_BYTES);
}

624
static inline unsigned int bpf_prog_size(unsigned int proglen)
625
{
626 627
	return max(sizeof(struct bpf_prog),
		   offsetof(struct bpf_prog, insns[proglen]));
628 629
}

630 631 632 633 634 635 636 637 638 639
static inline bool bpf_prog_was_classic(const struct bpf_prog *prog)
{
	/* When classic BPF programs have been loaded and the arch
	 * does not have a classic BPF JIT (anymore), they have been
	 * converted via bpf_migrate_filter() to eBPF and thus always
	 * have an unspec program type.
	 */
	return prog->type == BPF_PROG_TYPE_UNSPEC;
}

640 641 642 643 644 645 646 647 648 649 650 651
static inline u32 bpf_ctx_off_adjust_machine(u32 size)
{
	const u32 size_machine = sizeof(unsigned long);

	if (size > size_machine && size % size_machine == 0)
		size = size_machine;

	return size;
}

static inline bool bpf_ctx_narrow_align_ok(u32 off, u32 size_access,
					   u32 size_default)
652
{
653 654 655
	size_default = bpf_ctx_off_adjust_machine(size_default);
	size_access  = bpf_ctx_off_adjust_machine(size_access);

656
#ifdef __LITTLE_ENDIAN
657
	return (off & (size_default - 1)) == 0;
658
#else
659
	return (off & (size_default - 1)) + size_access == size_default;
660
#endif
661 662 663 664 665 666 667
}

static inline bool
bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
{
	return bpf_ctx_narrow_align_ok(off, size, size_default) &&
	       size <= size_default && (size & (size - 1)) == 0;
668 669
}

670
#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
671

672 673
static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
{
674 675
	fp->undo_set_mem = 1;
	set_memory_ro((unsigned long)fp, fp->pages);
676 677 678 679
}

static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
{
680 681
	if (fp->undo_set_mem)
		set_memory_rw((unsigned long)fp, fp->pages);
682
}
683

684 685
static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
{
686
	set_memory_ro((unsigned long)hdr, hdr->pages);
687 688
}

689 690
static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
{
691
	set_memory_rw((unsigned long)hdr, hdr->pages);
692
}
693

694 695 696 697 698 699 700 701 702
static inline struct bpf_binary_header *
bpf_jit_binary_hdr(const struct bpf_prog *fp)
{
	unsigned long real_start = (unsigned long)fp->bpf_func;
	unsigned long addr = real_start & PAGE_MASK;

	return (void *)addr;
}

703 704 705 706 707
int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
{
	return sk_filter_trim_cap(sk, skb, 1);
}
708

709
struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err);
710
void bpf_prog_free(struct bpf_prog *fp);
711

712 713
bool bpf_opcode_in_insntable(u8 code);

714 715 716 717 718 719 720 721 722 723 724
struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
				  gfp_t gfp_extra_flags);
void __bpf_prog_free(struct bpf_prog *fp);

static inline void bpf_prog_unlock_free(struct bpf_prog *fp)
{
	bpf_prog_unlock_ro(fp);
	__bpf_prog_free(fp);
}

725 726 727
typedef int (*bpf_aux_classic_check_t)(struct sock_filter *filter,
				       unsigned int flen);

728
int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog);
729
int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
730
			      bpf_aux_classic_check_t trans, bool save_orig);
731
void bpf_prog_destroy(struct bpf_prog *fp);
732

733
int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
734
int sk_attach_bpf(u32 ufd, struct sock *sk);
735 736
int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk);
int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk);
737
void sk_reuseport_prog_free(struct bpf_prog *prog);
738 739 740 741
int sk_detach_filter(struct sock *sk);
int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
		  unsigned int len);

742
bool sk_filter_charge(struct sock *sk, struct sk_filter *fp);
743
void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
744

745
u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
746 747 748
#define __bpf_call_base_args \
	((u64 (*)(u64, u64, u64, u64, u64, const struct bpf_insn *)) \
	 __bpf_call_base)
749 750

struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
751
void bpf_jit_compile(struct bpf_prog *prog);
752
bool bpf_helper_changes_pkt_data(void *func);
753

754 755 756 757 758 759 760 761
static inline bool bpf_dump_raw_ok(void)
{
	/* Reconstruction of call-sites is dependent on kallsyms,
	 * thus make dump the same restriction.
	 */
	return kallsyms_show_value() == 1;
}

762 763
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
				       const struct bpf_insn *patch, u32 len);
764

765 766
void bpf_clear_redirect_map(struct bpf_map *map);

767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787
static inline bool xdp_return_frame_no_direct(void)
{
	struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);

	return ri->kern_flags & BPF_RI_F_RF_NO_DIRECT;
}

static inline void xdp_set_return_frame_no_direct(void)
{
	struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);

	ri->kern_flags |= BPF_RI_F_RF_NO_DIRECT;
}

static inline void xdp_clear_return_frame_no_direct(void)
{
	struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);

	ri->kern_flags &= ~BPF_RI_F_RF_NO_DIRECT;
}

788 789
static inline int xdp_ok_fwd_dev(const struct net_device *fwd,
				 unsigned int pktlen)
790 791 792 793 794 795 796
{
	unsigned int len;

	if (unlikely(!(fwd->flags & IFF_UP)))
		return -ENETDOWN;

	len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN;
797
	if (pktlen > len)
798 799 800 801 802
		return -EMSGSIZE;

	return 0;
}

803 804 805 806
/* The pair of xdp_do_redirect and xdp_do_flush_map MUST be called in the
 * same cpu context. Further for best results no more than a single map
 * for the do_redirect/do_flush pair should be used. This limitation is
 * because we only track one map and force a flush when the map changes.
807
 * This does not appear to be a real limitation for existing software.
808
 */
809
int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
810
			    struct xdp_buff *xdp, struct bpf_prog *prog);
811 812 813
int xdp_do_redirect(struct net_device *dev,
		    struct xdp_buff *xdp,
		    struct bpf_prog *prog);
814
void xdp_do_flush_map(void);
815

816
void bpf_warn_invalid_xdp_action(u32 act);
817

818 819 820 821 822 823 824 825 826 827 828 829 830 831
#ifdef CONFIG_INET
struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
				  struct bpf_prog *prog, struct sk_buff *skb,
				  u32 hash);
#else
static inline struct sock *
bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
		     struct bpf_prog *prog, struct sk_buff *skb,
		     u32 hash)
{
	return NULL;
}
#endif

832
#ifdef CONFIG_BPF_JIT
833
extern int bpf_jit_enable;
834
extern int bpf_jit_harden;
835
extern int bpf_jit_kallsyms;
836

837 838 839 840 841 842 843 844 845 846
typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);

struct bpf_binary_header *
bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
		     unsigned int alignment,
		     bpf_jit_fill_hole_t bpf_fill_ill_insns);
void bpf_jit_binary_free(struct bpf_binary_header *hdr);

void bpf_jit_free(struct bpf_prog *fp);

847 848 849
struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp);
void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other);

850 851 852
static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
				u32 pass, void *image)
{
853 854 855
	pr_err("flen=%u proglen=%u pass=%u image=%pK from=%s pid=%d\n", flen,
	       proglen, pass, image, current->comm, task_pid_nr(current));

856 857 858 859
	if (image)
		print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET,
			       16, 1, image, proglen, false);
}
860 861 862 863 864 865 866 867 868 869

static inline bool bpf_jit_is_ebpf(void)
{
# ifdef CONFIG_HAVE_EBPF_JIT
	return true;
# else
	return false;
# endif
}

870 871 872 873 874
static inline bool ebpf_jit_enabled(void)
{
	return bpf_jit_enable && bpf_jit_is_ebpf();
}

875 876 877 878 879
static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
{
	return fp->jited && bpf_jit_is_ebpf();
}

880
static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog)
881 882 883 884 885 886 887
{
	/* These are the prerequisites, should someone ever have the
	 * idea to call blinding outside of them, we make sure to
	 * bail out.
	 */
	if (!bpf_jit_is_ebpf())
		return false;
888
	if (!prog->jit_requested)
889 890 891 892 893 894 895 896
		return false;
	if (!bpf_jit_harden)
		return false;
	if (bpf_jit_harden == 1 && capable(CAP_SYS_ADMIN))
		return false;

	return true;
}
897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934

static inline bool bpf_jit_kallsyms_enabled(void)
{
	/* There are a couple of corner cases where kallsyms should
	 * not be enabled f.e. on hardening.
	 */
	if (bpf_jit_harden)
		return false;
	if (!bpf_jit_kallsyms)
		return false;
	if (bpf_jit_kallsyms == 1)
		return true;

	return false;
}

const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
				 unsigned long *off, char *sym);
bool is_bpf_text_address(unsigned long addr);
int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
		    char *sym);

static inline const char *
bpf_address_lookup(unsigned long addr, unsigned long *size,
		   unsigned long *off, char **modname, char *sym)
{
	const char *ret = __bpf_address_lookup(addr, size, off, sym);

	if (ret && modname)
		*modname = NULL;
	return ret;
}

void bpf_prog_kallsyms_add(struct bpf_prog *fp);
void bpf_prog_kallsyms_del(struct bpf_prog *fp);

#else /* CONFIG_BPF_JIT */

935 936 937 938 939
static inline bool ebpf_jit_enabled(void)
{
	return false;
}

940 941 942 943 944
static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
{
	return false;
}

945 946 947 948
static inline void bpf_jit_free(struct bpf_prog *fp)
{
	bpf_prog_unlock_free(fp);
}
949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986

static inline bool bpf_jit_kallsyms_enabled(void)
{
	return false;
}

static inline const char *
__bpf_address_lookup(unsigned long addr, unsigned long *size,
		     unsigned long *off, char *sym)
{
	return NULL;
}

static inline bool is_bpf_text_address(unsigned long addr)
{
	return false;
}

static inline int bpf_get_kallsym(unsigned int symnum, unsigned long *value,
				  char *type, char *sym)
{
	return -ERANGE;
}

static inline const char *
bpf_address_lookup(unsigned long addr, unsigned long *size,
		   unsigned long *off, char **modname, char *sym)
{
	return NULL;
}

static inline void bpf_prog_kallsyms_add(struct bpf_prog *fp)
{
}

static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp)
{
}
987 988
#endif /* CONFIG_BPF_JIT */

989 990 991
void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp);
void bpf_prog_kallsyms_del_all(struct bpf_prog *fp);

992 993
#define BPF_ANC		BIT(15)

994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012
static inline bool bpf_needs_clear_a(const struct sock_filter *first)
{
	switch (first->code) {
	case BPF_RET | BPF_K:
	case BPF_LD | BPF_W | BPF_LEN:
		return false;

	case BPF_LD | BPF_W | BPF_ABS:
	case BPF_LD | BPF_H | BPF_ABS:
	case BPF_LD | BPF_B | BPF_ABS:
		if (first->k == SKF_AD_OFF + SKF_AD_ALU_XOR_X)
			return true;
		return false;

	default:
		return true;
	}
}

1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038
static inline u16 bpf_anc_helper(const struct sock_filter *ftest)
{
	BUG_ON(ftest->code & BPF_ANC);

	switch (ftest->code) {
	case BPF_LD | BPF_W | BPF_ABS:
	case BPF_LD | BPF_H | BPF_ABS:
	case BPF_LD | BPF_B | BPF_ABS:
#define BPF_ANCILLARY(CODE)	case SKF_AD_OFF + SKF_AD_##CODE:	\
				return BPF_ANC | SKF_AD_##CODE
		switch (ftest->k) {
		BPF_ANCILLARY(PROTOCOL);
		BPF_ANCILLARY(PKTTYPE);
		BPF_ANCILLARY(IFINDEX);
		BPF_ANCILLARY(NLATTR);
		BPF_ANCILLARY(NLATTR_NEST);
		BPF_ANCILLARY(MARK);
		BPF_ANCILLARY(QUEUE);
		BPF_ANCILLARY(HATYPE);
		BPF_ANCILLARY(RXHASH);
		BPF_ANCILLARY(CPU);
		BPF_ANCILLARY(ALU_XOR_X);
		BPF_ANCILLARY(VLAN_TAG);
		BPF_ANCILLARY(VLAN_TAG_PRESENT);
		BPF_ANCILLARY(PAY_OFFSET);
		BPF_ANCILLARY(RANDOM);
1039
		BPF_ANCILLARY(VLAN_TPID);
1040 1041 1042 1043 1044 1045 1046
		}
		/* Fallthrough. */
	default:
		return ftest->code;
	}
}

1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058
void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb,
					   int k, unsigned int size);

static inline void *bpf_load_pointer(const struct sk_buff *skb, int k,
				     unsigned int size, void *buffer)
{
	if (k >= 0)
		return skb_header_pointer(skb, k, size, buffer);

	return bpf_internal_load_pointer_neg_helper(skb, k, size);
}

1059 1060
static inline int bpf_tell_extensions(void)
{
1061
	return SKF_AD_MAX;
1062 1063
}

A
Andrey Ignatov 已提交
1064 1065 1066 1067 1068 1069 1070 1071
struct bpf_sock_addr_kern {
	struct sock *sk;
	struct sockaddr *uaddr;
	/* Temporary "register" to make indirect stores to nested structures
	 * defined above. We need three registers to make such a store, but
	 * only two (src and dst) are available at convert_ctx_access time
	 */
	u64 tmp_reg;
A
Andrey Ignatov 已提交
1072
	void *t_ctx;	/* Attach type specific context. */
A
Andrey Ignatov 已提交
1073 1074
};

L
Lawrence Brakmo 已提交
1075 1076 1077 1078
struct bpf_sock_ops_kern {
	struct	sock *sk;
	u32	op;
	union {
1079
		u32 args[4];
L
Lawrence Brakmo 已提交
1080 1081 1082
		u32 reply;
		u32 replylong[4];
	};
1083
	u32	is_fullsock;
1084 1085 1086 1087 1088 1089 1090 1091 1092
	u64	temp;			/* temp and everything after is not
					 * initialized to 0 before calling
					 * the BPF program. New fields that
					 * should be initialized to 0 should
					 * be inserted before temp.
					 * temp is scratch storage used by
					 * sock_ops_convert_ctx_access
					 * as temporary storage of a register.
					 */
L
Lawrence Brakmo 已提交
1093 1094
};

L
Linus Torvalds 已提交
1095
#endif /* __LINUX_FILTER_H__ */