filter.h 17.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6
/*
 * Linux Socket Filter Data Structures
 */
#ifndef __LINUX_FILTER_H__
#define __LINUX_FILTER_H__

7 8
#include <stdarg.h>

A
Arun Sharma 已提交
9
#include <linux/atomic.h>
10
#include <linux/compat.h>
11
#include <linux/skbuff.h>
12 13
#include <linux/linkage.h>
#include <linux/printk.h>
14
#include <linux/workqueue.h>
15
#include <linux/sched.h>
16 17
#include <linux/capability.h>

18
#include <net/sch_generic.h>
19

20
#include <asm/cacheflush.h>
21 22

#include <uapi/linux/filter.h>
23
#include <uapi/linux/bpf.h>
24 25 26 27

struct sk_buff;
struct sock;
struct seccomp_data;
28
struct bpf_prog_aux;
29

30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45
/* ArgX, context and stack frame pointer register positions. Note,
 * Arg1, Arg2, Arg3, etc are used as argument mappings of function
 * calls in BPF_CALL instruction.
 */
#define BPF_REG_ARG1	BPF_REG_1
#define BPF_REG_ARG2	BPF_REG_2
#define BPF_REG_ARG3	BPF_REG_3
#define BPF_REG_ARG4	BPF_REG_4
#define BPF_REG_ARG5	BPF_REG_5
#define BPF_REG_CTX	BPF_REG_6
#define BPF_REG_FP	BPF_REG_10

/* Additional register mappings for converted user programs. */
#define BPF_REG_A	BPF_REG_0
#define BPF_REG_X	BPF_REG_7
#define BPF_REG_TMP	BPF_REG_8
46

47 48 49 50 51 52 53 54 55
/* Kernel hidden auxiliary/helper register for hardening step.
 * Only used by eBPF JITs. It's nothing more than a temporary
 * register that JITs use internally, only that here it's part
 * of eBPF instructions that have been rewritten for blinding
 * constants. See JIT pre-step in bpf_jit_blind_constants().
 */
#define BPF_REG_AX		MAX_BPF_REG
#define MAX_BPF_JIT_REG		(MAX_BPF_REG + 1)

56 57 58
/* BPF program can access up to 512 bytes of stack space. */
#define MAX_BPF_STACK	512

59 60
/* Helper macros for filter block array initializers. */

61
/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
62

63
#define BPF_ALU64_REG(OP, DST, SRC)				\
64
	((struct bpf_insn) {					\
65
		.code  = BPF_ALU64 | BPF_OP(OP) | BPF_X,	\
66 67
		.dst_reg = DST,					\
		.src_reg = SRC,					\
68 69 70
		.off   = 0,					\
		.imm   = 0 })

71
#define BPF_ALU32_REG(OP, DST, SRC)				\
72
	((struct bpf_insn) {					\
73
		.code  = BPF_ALU | BPF_OP(OP) | BPF_X,		\
74 75
		.dst_reg = DST,					\
		.src_reg = SRC,					\
76 77 78
		.off   = 0,					\
		.imm   = 0 })

79
/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
80

81
#define BPF_ALU64_IMM(OP, DST, IMM)				\
82
	((struct bpf_insn) {					\
83
		.code  = BPF_ALU64 | BPF_OP(OP) | BPF_K,	\
84 85
		.dst_reg = DST,					\
		.src_reg = 0,					\
86 87 88
		.off   = 0,					\
		.imm   = IMM })

89
#define BPF_ALU32_IMM(OP, DST, IMM)				\
90
	((struct bpf_insn) {					\
91
		.code  = BPF_ALU | BPF_OP(OP) | BPF_K,		\
92 93
		.dst_reg = DST,					\
		.src_reg = 0,					\
94 95 96 97 98
		.off   = 0,					\
		.imm   = IMM })

/* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */

99
#define BPF_ENDIAN(TYPE, DST, LEN)				\
100
	((struct bpf_insn) {					\
101
		.code  = BPF_ALU | BPF_END | BPF_SRC(TYPE),	\
102 103
		.dst_reg = DST,					\
		.src_reg = 0,					\
104 105 106
		.off   = 0,					\
		.imm   = LEN })

107
/* Short form of mov, dst_reg = src_reg */
108

109
#define BPF_MOV64_REG(DST, SRC)					\
110
	((struct bpf_insn) {					\
111
		.code  = BPF_ALU64 | BPF_MOV | BPF_X,		\
112 113
		.dst_reg = DST,					\
		.src_reg = SRC,					\
114 115 116
		.off   = 0,					\
		.imm   = 0 })

117
#define BPF_MOV32_REG(DST, SRC)					\
118
	((struct bpf_insn) {					\
119
		.code  = BPF_ALU | BPF_MOV | BPF_X,		\
120 121
		.dst_reg = DST,					\
		.src_reg = SRC,					\
122 123 124
		.off   = 0,					\
		.imm   = 0 })

125
/* Short form of mov, dst_reg = imm32 */
126

127
#define BPF_MOV64_IMM(DST, IMM)					\
128
	((struct bpf_insn) {					\
129
		.code  = BPF_ALU64 | BPF_MOV | BPF_K,		\
130 131
		.dst_reg = DST,					\
		.src_reg = 0,					\
132 133 134
		.off   = 0,					\
		.imm   = IMM })

135
#define BPF_MOV32_IMM(DST, IMM)					\
136
	((struct bpf_insn) {					\
137
		.code  = BPF_ALU | BPF_MOV | BPF_K,		\
138 139
		.dst_reg = DST,					\
		.src_reg = 0,					\
140 141 142
		.off   = 0,					\
		.imm   = IMM })

143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160
/* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
#define BPF_LD_IMM64(DST, IMM)					\
	BPF_LD_IMM64_RAW(DST, 0, IMM)

#define BPF_LD_IMM64_RAW(DST, SRC, IMM)				\
	((struct bpf_insn) {					\
		.code  = BPF_LD | BPF_DW | BPF_IMM,		\
		.dst_reg = DST,					\
		.src_reg = SRC,					\
		.off   = 0,					\
		.imm   = (__u32) (IMM) }),			\
	((struct bpf_insn) {					\
		.code  = 0, /* zero is reserved opcode */	\
		.dst_reg = 0,					\
		.src_reg = 0,					\
		.off   = 0,					\
		.imm   = ((__u64) (IMM)) >> 32 })

161 162 163 164
/* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
#define BPF_LD_MAP_FD(DST, MAP_FD)				\
	BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)

165
/* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */
166

167
#define BPF_MOV64_RAW(TYPE, DST, SRC, IMM)			\
168
	((struct bpf_insn) {					\
169
		.code  = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE),	\
170 171
		.dst_reg = DST,					\
		.src_reg = SRC,					\
172 173 174
		.off   = 0,					\
		.imm   = IMM })

175
#define BPF_MOV32_RAW(TYPE, DST, SRC, IMM)			\
176
	((struct bpf_insn) {					\
177
		.code  = BPF_ALU | BPF_MOV | BPF_SRC(TYPE),	\
178 179
		.dst_reg = DST,					\
		.src_reg = SRC,					\
180 181 182
		.off   = 0,					\
		.imm   = IMM })

183
/* Direct packet access, R0 = *(uint *) (skb->data + imm32) */
184

185
#define BPF_LD_ABS(SIZE, IMM)					\
186
	((struct bpf_insn) {					\
187
		.code  = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS,	\
188 189
		.dst_reg = 0,					\
		.src_reg = 0,					\
190
		.off   = 0,					\
191
		.imm   = IMM })
192

193
/* Indirect packet access, R0 = *(uint *) (skb->data + src_reg + imm32) */
194

195
#define BPF_LD_IND(SIZE, SRC, IMM)				\
196
	((struct bpf_insn) {					\
197
		.code  = BPF_LD | BPF_SIZE(SIZE) | BPF_IND,	\
198 199
		.dst_reg = 0,					\
		.src_reg = SRC,					\
200
		.off   = 0,					\
201
		.imm   = IMM })
202

203
/* Memory load, dst_reg = *(uint *) (src_reg + off16) */
204

205
#define BPF_LDX_MEM(SIZE, DST, SRC, OFF)			\
206
	((struct bpf_insn) {					\
207
		.code  = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM,	\
208 209
		.dst_reg = DST,					\
		.src_reg = SRC,					\
210 211 212
		.off   = OFF,					\
		.imm   = 0 })

213 214 215
/* Memory store, *(uint *) (dst_reg + off16) = src_reg */

#define BPF_STX_MEM(SIZE, DST, SRC, OFF)			\
216
	((struct bpf_insn) {					\
217
		.code  = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM,	\
218 219
		.dst_reg = DST,					\
		.src_reg = SRC,					\
220 221 222
		.off   = OFF,					\
		.imm   = 0 })

223 224 225 226 227 228 229 230 231 232
/* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */

#define BPF_STX_XADD(SIZE, DST, SRC, OFF)			\
	((struct bpf_insn) {					\
		.code  = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD,	\
		.dst_reg = DST,					\
		.src_reg = SRC,					\
		.off   = OFF,					\
		.imm   = 0 })

233 234 235
/* Memory store, *(uint *) (dst_reg + off16) = imm32 */

#define BPF_ST_MEM(SIZE, DST, OFF, IMM)				\
236
	((struct bpf_insn) {					\
237 238 239 240 241 242 243
		.code  = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM,	\
		.dst_reg = DST,					\
		.src_reg = 0,					\
		.off   = OFF,					\
		.imm   = IMM })

/* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */
244

245
#define BPF_JMP_REG(OP, DST, SRC, OFF)				\
246
	((struct bpf_insn) {					\
247
		.code  = BPF_JMP | BPF_OP(OP) | BPF_X,		\
248 249
		.dst_reg = DST,					\
		.src_reg = SRC,					\
250 251 252
		.off   = OFF,					\
		.imm   = 0 })

253
/* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
254

255
#define BPF_JMP_IMM(OP, DST, IMM, OFF)				\
256
	((struct bpf_insn) {					\
257
		.code  = BPF_JMP | BPF_OP(OP) | BPF_K,		\
258 259
		.dst_reg = DST,					\
		.src_reg = 0,					\
260 261 262 263 264 265
		.off   = OFF,					\
		.imm   = IMM })

/* Function call */

#define BPF_EMIT_CALL(FUNC)					\
266
	((struct bpf_insn) {					\
267
		.code  = BPF_JMP | BPF_CALL,			\
268 269
		.dst_reg = 0,					\
		.src_reg = 0,					\
270 271 272 273 274
		.off   = 0,					\
		.imm   = ((FUNC) - __bpf_call_base) })

/* Raw code statement block */

275
#define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM)			\
276
	((struct bpf_insn) {					\
277
		.code  = CODE,					\
278 279
		.dst_reg = DST,					\
		.src_reg = SRC,					\
280 281 282 283 284 285
		.off   = OFF,					\
		.imm   = IMM })

/* Program exit */

#define BPF_EXIT_INSN()						\
286
	((struct bpf_insn) {					\
287
		.code  = BPF_JMP | BPF_EXIT,			\
288 289
		.dst_reg = 0,					\
		.src_reg = 0,					\
290 291 292
		.off   = 0,					\
		.imm   = 0 })

293 294 295 296 297 298 299 300
/* Internal classic blocks for direct assignment */

#define __BPF_STMT(CODE, K)					\
	((struct sock_filter) BPF_STMT(CODE, K))

#define __BPF_JUMP(CODE, K, JT, JF)				\
	((struct sock_filter) BPF_JUMP(CODE, K, JT, JF))

301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
#define bytes_to_bpf_size(bytes)				\
({								\
	int bpf_size = -EINVAL;					\
								\
	if (bytes == sizeof(u8))				\
		bpf_size = BPF_B;				\
	else if (bytes == sizeof(u16))				\
		bpf_size = BPF_H;				\
	else if (bytes == sizeof(u32))				\
		bpf_size = BPF_W;				\
	else if (bytes == sizeof(u64))				\
		bpf_size = BPF_DW;				\
								\
	bpf_size;						\
})
316

317 318
#ifdef CONFIG_COMPAT
/* A struct sock_filter is architecture independent. */
319 320
struct compat_sock_fprog {
	u16		len;
321
	compat_uptr_t	filter;	/* struct sock_filter * */
322 323 324
};
#endif

325 326 327 328 329
struct sock_fprog_kern {
	u16			len;
	struct sock_filter	*filter;
};

330 331 332 333 334
struct bpf_binary_header {
	unsigned int pages;
	u8 image[];
};

335
struct bpf_prog {
336
	u16			pages;		/* Number of allocated pages */
337 338
	kmemcheck_bitfield_begin(meta);
	u16			jited:1,	/* Is our filter JIT'ed? */
339
				gpl_compatible:1, /* Is filter GPL compatible? */
340
				cb_access:1,	/* Is control block accessed? */
341
				dst_needed:1;	/* Do we need dst entry? */
342
	kmemcheck_bitfield_end(meta);
343
	u32			len;		/* Number of filter blocks */
344
	enum bpf_prog_type	type;		/* Type of BPF program */
345
	struct bpf_prog_aux	*aux;		/* Auxiliary fields */
346
	struct sock_fprog_kern	*orig_prog;	/* Original BPF program */
347
	unsigned int		(*bpf_func)(const struct sk_buff *skb,
348
					    const struct bpf_insn *filter);
349
	/* Instructions for interpreter */
350
	union {
351
		struct sock_filter	insns[0];
352
		struct bpf_insn		insnsi[0];
353
	};
354 355
};

356 357 358 359 360 361 362 363
struct sk_filter {
	atomic_t	refcnt;
	struct rcu_head	rcu;
	struct bpf_prog	*prog;
};

#define BPF_PROG_RUN(filter, ctx)  (*filter->bpf_func)(ctx, filter->insnsi)

364 365
#define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN

366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381
struct bpf_skb_data_end {
	struct qdisc_skb_cb qdisc_cb;
	void *data_end;
};

/* compute the linear packet data range [data, data_end) which
 * will be accessed by cls_bpf and act_bpf programs
 */
static inline void bpf_compute_data_end(struct sk_buff *skb)
{
	struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;

	BUILD_BUG_ON(sizeof(*cb) > FIELD_SIZEOF(struct sk_buff, cb));
	cb->data_end = skb->data + skb_headlen(skb);
}

382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400
static inline u8 *bpf_skb_cb(struct sk_buff *skb)
{
	/* eBPF programs may read/write skb->cb[] area to transfer meta
	 * data between tail calls. Since this also needs to work with
	 * tc, that scratch memory is mapped to qdisc_skb_cb's data area.
	 *
	 * In some socket filter cases, the cb unfortunately needs to be
	 * saved/restored so that protocol specific skb->cb[] data won't
	 * be lost. In any case, due to unpriviledged eBPF programs
	 * attached to sockets, we need to clear the bpf_skb_cb() area
	 * to not leak previous contents to user space.
	 */
	BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) != BPF_SKB_CB_LEN);
	BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) !=
		     FIELD_SIZEOF(struct qdisc_skb_cb, data));

	return qdisc_skb_cb(skb)->data;
}

401 402 403
static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
				       struct sk_buff *skb)
{
404 405
	u8 *cb_data = bpf_skb_cb(skb);
	u8 cb_saved[BPF_SKB_CB_LEN];
406 407 408
	u32 res;

	if (unlikely(prog->cb_access)) {
409 410
		memcpy(cb_saved, cb_data, sizeof(cb_saved));
		memset(cb_data, 0, sizeof(cb_saved));
411 412 413 414 415
	}

	res = BPF_PROG_RUN(prog, skb);

	if (unlikely(prog->cb_access))
416
		memcpy(cb_data, cb_saved, sizeof(cb_saved));
417 418 419 420 421 422 423

	return res;
}

static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
					struct sk_buff *skb)
{
424
	u8 *cb_data = bpf_skb_cb(skb);
425 426

	if (unlikely(prog->cb_access))
427 428
		memset(cb_data, 0, BPF_SKB_CB_LEN);

429 430 431
	return BPF_PROG_RUN(prog, skb);
}

432
static inline unsigned int bpf_prog_size(unsigned int proglen)
433
{
434 435
	return max(sizeof(struct bpf_prog),
		   offsetof(struct bpf_prog, insns[proglen]));
436 437
}

438 439 440 441 442 443 444 445 446 447
static inline bool bpf_prog_was_classic(const struct bpf_prog *prog)
{
	/* When classic BPF programs have been loaded and the arch
	 * does not have a classic BPF JIT (anymore), they have been
	 * converted via bpf_migrate_filter() to eBPF and thus always
	 * have an unspec program type.
	 */
	return prog->type == BPF_PROG_TYPE_UNSPEC;
}

448
#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
449

450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469
#ifdef CONFIG_DEBUG_SET_MODULE_RONX
static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
{
	set_memory_ro((unsigned long)fp, fp->pages);
}

static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
{
	set_memory_rw((unsigned long)fp, fp->pages);
}
#else
static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
{
}

static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
{
}
#endif /* CONFIG_DEBUG_SET_MODULE_RONX */

470
int sk_filter(struct sock *sk, struct sk_buff *skb);
471

472
struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err);
473
void bpf_prog_free(struct bpf_prog *fp);
474

475 476 477 478 479 480 481 482 483 484 485
struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
				  gfp_t gfp_extra_flags);
void __bpf_prog_free(struct bpf_prog *fp);

static inline void bpf_prog_unlock_free(struct bpf_prog *fp)
{
	bpf_prog_unlock_ro(fp);
	__bpf_prog_free(fp);
}

486 487 488
typedef int (*bpf_aux_classic_check_t)(struct sock_filter *filter,
				       unsigned int flen);

489
int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog);
490
int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
491
			      bpf_aux_classic_check_t trans, bool save_orig);
492
void bpf_prog_destroy(struct bpf_prog *fp);
493

494
int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
495
int sk_attach_bpf(u32 ufd, struct sock *sk);
496 497
int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk);
int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk);
498 499 500 501
int sk_detach_filter(struct sock *sk);
int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
		  unsigned int len);

502
bool sk_filter_charge(struct sock *sk, struct sk_filter *fp);
503
void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
504

505
u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
506 507

struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
508
bool bpf_helper_changes_skb_data(void *func);
509

510 511 512
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
				       const struct bpf_insn *patch, u32 len);

513
#ifdef CONFIG_BPF_JIT
514
extern int bpf_jit_enable;
515
extern int bpf_jit_harden;
516

517 518 519 520 521 522 523 524 525 526 527
typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);

struct bpf_binary_header *
bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
		     unsigned int alignment,
		     bpf_jit_fill_hole_t bpf_fill_ill_insns);
void bpf_jit_binary_free(struct bpf_binary_header *hdr);

void bpf_jit_compile(struct bpf_prog *fp);
void bpf_jit_free(struct bpf_prog *fp);

528 529 530
struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp);
void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other);

531 532 533
static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
				u32 pass, void *image)
{
534 535 536
	pr_err("flen=%u proglen=%u pass=%u image=%pK from=%s pid=%d\n", flen,
	       proglen, pass, image, current->comm, task_pid_nr(current));

537 538 539 540
	if (image)
		print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET,
			       16, 1, image, proglen, false);
}
541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567

static inline bool bpf_jit_is_ebpf(void)
{
# ifdef CONFIG_HAVE_EBPF_JIT
	return true;
# else
	return false;
# endif
}

static inline bool bpf_jit_blinding_enabled(void)
{
	/* These are the prerequisites, should someone ever have the
	 * idea to call blinding outside of them, we make sure to
	 * bail out.
	 */
	if (!bpf_jit_is_ebpf())
		return false;
	if (!bpf_jit_enable)
		return false;
	if (!bpf_jit_harden)
		return false;
	if (bpf_jit_harden == 1 && capable(CAP_SYS_ADMIN))
		return false;

	return true;
}
568 569 570 571 572 573 574 575 576 577 578
#else
static inline void bpf_jit_compile(struct bpf_prog *fp)
{
}

static inline void bpf_jit_free(struct bpf_prog *fp)
{
	bpf_prog_unlock_free(fp);
}
#endif /* CONFIG_BPF_JIT */

579 580
#define BPF_ANC		BIT(15)

581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599
static inline bool bpf_needs_clear_a(const struct sock_filter *first)
{
	switch (first->code) {
	case BPF_RET | BPF_K:
	case BPF_LD | BPF_W | BPF_LEN:
		return false;

	case BPF_LD | BPF_W | BPF_ABS:
	case BPF_LD | BPF_H | BPF_ABS:
	case BPF_LD | BPF_B | BPF_ABS:
		if (first->k == SKF_AD_OFF + SKF_AD_ALU_XOR_X)
			return true;
		return false;

	default:
		return true;
	}
}

600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625
static inline u16 bpf_anc_helper(const struct sock_filter *ftest)
{
	BUG_ON(ftest->code & BPF_ANC);

	switch (ftest->code) {
	case BPF_LD | BPF_W | BPF_ABS:
	case BPF_LD | BPF_H | BPF_ABS:
	case BPF_LD | BPF_B | BPF_ABS:
#define BPF_ANCILLARY(CODE)	case SKF_AD_OFF + SKF_AD_##CODE:	\
				return BPF_ANC | SKF_AD_##CODE
		switch (ftest->k) {
		BPF_ANCILLARY(PROTOCOL);
		BPF_ANCILLARY(PKTTYPE);
		BPF_ANCILLARY(IFINDEX);
		BPF_ANCILLARY(NLATTR);
		BPF_ANCILLARY(NLATTR_NEST);
		BPF_ANCILLARY(MARK);
		BPF_ANCILLARY(QUEUE);
		BPF_ANCILLARY(HATYPE);
		BPF_ANCILLARY(RXHASH);
		BPF_ANCILLARY(CPU);
		BPF_ANCILLARY(ALU_XOR_X);
		BPF_ANCILLARY(VLAN_TAG);
		BPF_ANCILLARY(VLAN_TAG_PRESENT);
		BPF_ANCILLARY(PAY_OFFSET);
		BPF_ANCILLARY(RANDOM);
626
		BPF_ANCILLARY(VLAN_TPID);
627 628 629 630 631 632 633
		}
		/* Fallthrough. */
	default:
		return ftest->code;
	}
}

634 635 636 637 638 639 640 641 642 643 644 645
void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb,
					   int k, unsigned int size);

static inline void *bpf_load_pointer(const struct sk_buff *skb, int k,
				     unsigned int size, void *buffer)
{
	if (k >= 0)
		return skb_header_pointer(skb, k, size, buffer);

	return bpf_internal_load_pointer_neg_helper(skb, k, size);
}

646 647
static inline int bpf_tell_extensions(void)
{
648
	return SKF_AD_MAX;
649 650
}

L
Linus Torvalds 已提交
651
#endif /* __LINUX_FILTER_H__ */