filter.h 12.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6
/*
 * Linux Socket Filter Data Structures
 */
#ifndef __LINUX_FILTER_H__
#define __LINUX_FILTER_H__

7 8
#include <stdarg.h>

A
Arun Sharma 已提交
9
#include <linux/atomic.h>
10
#include <linux/compat.h>
11
#include <linux/skbuff.h>
12 13
#include <linux/linkage.h>
#include <linux/printk.h>
14
#include <linux/workqueue.h>
15

16
#include <asm/cacheflush.h>
17 18

#include <uapi/linux/filter.h>
19
#include <uapi/linux/bpf.h>
20 21 22 23

struct sk_buff;
struct sock;
struct seccomp_data;
24
struct bpf_prog_aux;
25

26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41
/* ArgX, context and stack frame pointer register positions. Note,
 * Arg1, Arg2, Arg3, etc are used as argument mappings of function
 * calls in BPF_CALL instruction.
 */
#define BPF_REG_ARG1	BPF_REG_1
#define BPF_REG_ARG2	BPF_REG_2
#define BPF_REG_ARG3	BPF_REG_3
#define BPF_REG_ARG4	BPF_REG_4
#define BPF_REG_ARG5	BPF_REG_5
#define BPF_REG_CTX	BPF_REG_6
#define BPF_REG_FP	BPF_REG_10

/* Additional register mappings for converted user programs. */
#define BPF_REG_A	BPF_REG_0
#define BPF_REG_X	BPF_REG_7
#define BPF_REG_TMP	BPF_REG_8
42 43 44 45

/* BPF program can access up to 512 bytes of stack space. */
#define MAX_BPF_STACK	512

46 47
/* Helper macros for filter block array initializers. */

48
/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
49

50
#define BPF_ALU64_REG(OP, DST, SRC)				\
51
	((struct bpf_insn) {					\
52
		.code  = BPF_ALU64 | BPF_OP(OP) | BPF_X,	\
53 54
		.dst_reg = DST,					\
		.src_reg = SRC,					\
55 56 57
		.off   = 0,					\
		.imm   = 0 })

58
#define BPF_ALU32_REG(OP, DST, SRC)				\
59
	((struct bpf_insn) {					\
60
		.code  = BPF_ALU | BPF_OP(OP) | BPF_X,		\
61 62
		.dst_reg = DST,					\
		.src_reg = SRC,					\
63 64 65
		.off   = 0,					\
		.imm   = 0 })

66
/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
67

68
#define BPF_ALU64_IMM(OP, DST, IMM)				\
69
	((struct bpf_insn) {					\
70
		.code  = BPF_ALU64 | BPF_OP(OP) | BPF_K,	\
71 72
		.dst_reg = DST,					\
		.src_reg = 0,					\
73 74 75
		.off   = 0,					\
		.imm   = IMM })

76
#define BPF_ALU32_IMM(OP, DST, IMM)				\
77
	((struct bpf_insn) {					\
78
		.code  = BPF_ALU | BPF_OP(OP) | BPF_K,		\
79 80
		.dst_reg = DST,					\
		.src_reg = 0,					\
81 82 83 84 85
		.off   = 0,					\
		.imm   = IMM })

/* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */

86
#define BPF_ENDIAN(TYPE, DST, LEN)				\
87
	((struct bpf_insn) {					\
88
		.code  = BPF_ALU | BPF_END | BPF_SRC(TYPE),	\
89 90
		.dst_reg = DST,					\
		.src_reg = 0,					\
91 92 93
		.off   = 0,					\
		.imm   = LEN })

94
/* Short form of mov, dst_reg = src_reg */
95

96
#define BPF_MOV64_REG(DST, SRC)					\
97
	((struct bpf_insn) {					\
98
		.code  = BPF_ALU64 | BPF_MOV | BPF_X,		\
99 100
		.dst_reg = DST,					\
		.src_reg = SRC,					\
101 102 103
		.off   = 0,					\
		.imm   = 0 })

104
#define BPF_MOV32_REG(DST, SRC)					\
105
	((struct bpf_insn) {					\
106
		.code  = BPF_ALU | BPF_MOV | BPF_X,		\
107 108
		.dst_reg = DST,					\
		.src_reg = SRC,					\
109 110 111
		.off   = 0,					\
		.imm   = 0 })

112
/* Short form of mov, dst_reg = imm32 */
113

114
#define BPF_MOV64_IMM(DST, IMM)					\
115
	((struct bpf_insn) {					\
116
		.code  = BPF_ALU64 | BPF_MOV | BPF_K,		\
117 118
		.dst_reg = DST,					\
		.src_reg = 0,					\
119 120 121
		.off   = 0,					\
		.imm   = IMM })

122
#define BPF_MOV32_IMM(DST, IMM)					\
123
	((struct bpf_insn) {					\
124
		.code  = BPF_ALU | BPF_MOV | BPF_K,		\
125 126
		.dst_reg = DST,					\
		.src_reg = 0,					\
127 128 129
		.off   = 0,					\
		.imm   = IMM })

130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147
/* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
#define BPF_LD_IMM64(DST, IMM)					\
	BPF_LD_IMM64_RAW(DST, 0, IMM)

#define BPF_LD_IMM64_RAW(DST, SRC, IMM)				\
	((struct bpf_insn) {					\
		.code  = BPF_LD | BPF_DW | BPF_IMM,		\
		.dst_reg = DST,					\
		.src_reg = SRC,					\
		.off   = 0,					\
		.imm   = (__u32) (IMM) }),			\
	((struct bpf_insn) {					\
		.code  = 0, /* zero is reserved opcode */	\
		.dst_reg = 0,					\
		.src_reg = 0,					\
		.off   = 0,					\
		.imm   = ((__u64) (IMM)) >> 32 })

148
/* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */
149

150
#define BPF_MOV64_RAW(TYPE, DST, SRC, IMM)			\
151
	((struct bpf_insn) {					\
152
		.code  = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE),	\
153 154
		.dst_reg = DST,					\
		.src_reg = SRC,					\
155 156 157
		.off   = 0,					\
		.imm   = IMM })

158
#define BPF_MOV32_RAW(TYPE, DST, SRC, IMM)			\
159
	((struct bpf_insn) {					\
160
		.code  = BPF_ALU | BPF_MOV | BPF_SRC(TYPE),	\
161 162
		.dst_reg = DST,					\
		.src_reg = SRC,					\
163 164 165
		.off   = 0,					\
		.imm   = IMM })

166
/* Direct packet access, R0 = *(uint *) (skb->data + imm32) */
167

168
#define BPF_LD_ABS(SIZE, IMM)					\
169
	((struct bpf_insn) {					\
170
		.code  = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS,	\
171 172
		.dst_reg = 0,					\
		.src_reg = 0,					\
173
		.off   = 0,					\
174
		.imm   = IMM })
175

176
/* Indirect packet access, R0 = *(uint *) (skb->data + src_reg + imm32) */
177

178
#define BPF_LD_IND(SIZE, SRC, IMM)				\
179
	((struct bpf_insn) {					\
180
		.code  = BPF_LD | BPF_SIZE(SIZE) | BPF_IND,	\
181 182
		.dst_reg = 0,					\
		.src_reg = SRC,					\
183
		.off   = 0,					\
184
		.imm   = IMM })
185

186
/* Memory load, dst_reg = *(uint *) (src_reg + off16) */
187

188
#define BPF_LDX_MEM(SIZE, DST, SRC, OFF)			\
189
	((struct bpf_insn) {					\
190
		.code  = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM,	\
191 192
		.dst_reg = DST,					\
		.src_reg = SRC,					\
193 194 195
		.off   = OFF,					\
		.imm   = 0 })

196 197 198
/* Memory store, *(uint *) (dst_reg + off16) = src_reg */

#define BPF_STX_MEM(SIZE, DST, SRC, OFF)			\
199
	((struct bpf_insn) {					\
200
		.code  = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM,	\
201 202
		.dst_reg = DST,					\
		.src_reg = SRC,					\
203 204 205
		.off   = OFF,					\
		.imm   = 0 })

206 207 208
/* Memory store, *(uint *) (dst_reg + off16) = imm32 */

#define BPF_ST_MEM(SIZE, DST, OFF, IMM)				\
209
	((struct bpf_insn) {					\
210 211 212 213 214 215 216
		.code  = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM,	\
		.dst_reg = DST,					\
		.src_reg = 0,					\
		.off   = OFF,					\
		.imm   = IMM })

/* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */
217

218
#define BPF_JMP_REG(OP, DST, SRC, OFF)				\
219
	((struct bpf_insn) {					\
220
		.code  = BPF_JMP | BPF_OP(OP) | BPF_X,		\
221 222
		.dst_reg = DST,					\
		.src_reg = SRC,					\
223 224 225
		.off   = OFF,					\
		.imm   = 0 })

226
/* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
227

228
#define BPF_JMP_IMM(OP, DST, IMM, OFF)				\
229
	((struct bpf_insn) {					\
230
		.code  = BPF_JMP | BPF_OP(OP) | BPF_K,		\
231 232
		.dst_reg = DST,					\
		.src_reg = 0,					\
233 234 235 236 237 238
		.off   = OFF,					\
		.imm   = IMM })

/* Function call */

#define BPF_EMIT_CALL(FUNC)					\
239
	((struct bpf_insn) {					\
240
		.code  = BPF_JMP | BPF_CALL,			\
241 242
		.dst_reg = 0,					\
		.src_reg = 0,					\
243 244 245 246 247
		.off   = 0,					\
		.imm   = ((FUNC) - __bpf_call_base) })

/* Raw code statement block */

248
#define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM)			\
249
	((struct bpf_insn) {					\
250
		.code  = CODE,					\
251 252
		.dst_reg = DST,					\
		.src_reg = SRC,					\
253 254 255 256 257 258
		.off   = OFF,					\
		.imm   = IMM })

/* Program exit */

#define BPF_EXIT_INSN()						\
259
	((struct bpf_insn) {					\
260
		.code  = BPF_JMP | BPF_EXIT,			\
261 262
		.dst_reg = 0,					\
		.src_reg = 0,					\
263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280
		.off   = 0,					\
		.imm   = 0 })

#define bytes_to_bpf_size(bytes)				\
({								\
	int bpf_size = -EINVAL;					\
								\
	if (bytes == sizeof(u8))				\
		bpf_size = BPF_B;				\
	else if (bytes == sizeof(u16))				\
		bpf_size = BPF_H;				\
	else if (bytes == sizeof(u32))				\
		bpf_size = BPF_W;				\
	else if (bytes == sizeof(u64))				\
		bpf_size = BPF_DW;				\
								\
	bpf_size;						\
})
281

282
/* Macro to invoke filter function. */
283 284
#define SK_RUN_FILTER(filter, ctx) \
	(*filter->prog->bpf_func)(ctx, filter->prog->insnsi)
285 286 287

#ifdef CONFIG_COMPAT
/* A struct sock_filter is architecture independent. */
288 289
struct compat_sock_fprog {
	u16		len;
290
	compat_uptr_t	filter;	/* struct sock_filter * */
291 292 293
};
#endif

294 295 296 297 298
struct sock_fprog_kern {
	u16			len;
	struct sock_filter	*filter;
};

299 300 301 302 303
struct bpf_binary_header {
	unsigned int pages;
	u8 image[];
};

304
struct bpf_prog {
305 306 307
	u16			pages;		/* Number of allocated pages */
	bool			jited;		/* Is our filter JIT'ed? */
	u32			len;		/* Number of filter blocks */
308
	struct sock_fprog_kern	*orig_prog;	/* Original BPF program */
309
	struct bpf_prog_aux	*aux;		/* Auxiliary fields */
310
	unsigned int		(*bpf_func)(const struct sk_buff *skb,
311
					    const struct bpf_insn *filter);
312
	/* Instructions for interpreter */
313
	union {
314
		struct sock_filter	insns[0];
315
		struct bpf_insn		insnsi[0];
316
	};
317 318
};

319 320 321 322 323 324 325 326 327
struct sk_filter {
	atomic_t	refcnt;
	struct rcu_head	rcu;
	struct bpf_prog	*prog;
};

#define BPF_PROG_RUN(filter, ctx)  (*filter->bpf_func)(ctx, filter->insnsi)

static inline unsigned int bpf_prog_size(unsigned int proglen)
328
{
329 330
	return max(sizeof(struct bpf_prog),
		   offsetof(struct bpf_prog, insns[proglen]));
331 332
}

333
#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
334

335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354
#ifdef CONFIG_DEBUG_SET_MODULE_RONX
static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
{
	set_memory_ro((unsigned long)fp, fp->pages);
}

static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
{
	set_memory_rw((unsigned long)fp, fp->pages);
}
#else
static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
{
}

static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
{
}
#endif /* CONFIG_DEBUG_SET_MODULE_RONX */

355
int sk_filter(struct sock *sk, struct sk_buff *skb);
356

357 358
void bpf_prog_select_runtime(struct bpf_prog *fp);
void bpf_prog_free(struct bpf_prog *fp);
359

360 361
int bpf_convert_filter(struct sock_filter *prog, int len,
		       struct bpf_insn *new_prog, int *new_len);
362

363 364 365 366 367 368 369 370 371 372 373
struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
				  gfp_t gfp_extra_flags);
void __bpf_prog_free(struct bpf_prog *fp);

static inline void bpf_prog_unlock_free(struct bpf_prog *fp)
{
	bpf_prog_unlock_ro(fp);
	__bpf_prog_free(fp);
}

374 375
int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog);
void bpf_prog_destroy(struct bpf_prog *fp);
376

377 378
int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
int sk_detach_filter(struct sock *sk);
379

380
int bpf_check_classic(const struct sock_filter *filter, unsigned int flen);
381 382 383
int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
		  unsigned int len);

384
bool sk_filter_charge(struct sock *sk, struct sk_filter *fp);
385
void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
386

387
u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
388
void bpf_int_jit_compile(struct bpf_prog *fp);
389

390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421
#ifdef CONFIG_BPF_JIT
typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);

struct bpf_binary_header *
bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
		     unsigned int alignment,
		     bpf_jit_fill_hole_t bpf_fill_ill_insns);
void bpf_jit_binary_free(struct bpf_binary_header *hdr);

void bpf_jit_compile(struct bpf_prog *fp);
void bpf_jit_free(struct bpf_prog *fp);

static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
				u32 pass, void *image)
{
	pr_err("flen=%u proglen=%u pass=%u image=%pK\n",
	       flen, proglen, pass, image);
	if (image)
		print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET,
			       16, 1, image, proglen, false);
}
#else
static inline void bpf_jit_compile(struct bpf_prog *fp)
{
}

static inline void bpf_jit_free(struct bpf_prog *fp)
{
	bpf_prog_unlock_free(fp);
}
#endif /* CONFIG_BPF_JIT */

422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456
#define BPF_ANC		BIT(15)

static inline u16 bpf_anc_helper(const struct sock_filter *ftest)
{
	BUG_ON(ftest->code & BPF_ANC);

	switch (ftest->code) {
	case BPF_LD | BPF_W | BPF_ABS:
	case BPF_LD | BPF_H | BPF_ABS:
	case BPF_LD | BPF_B | BPF_ABS:
#define BPF_ANCILLARY(CODE)	case SKF_AD_OFF + SKF_AD_##CODE:	\
				return BPF_ANC | SKF_AD_##CODE
		switch (ftest->k) {
		BPF_ANCILLARY(PROTOCOL);
		BPF_ANCILLARY(PKTTYPE);
		BPF_ANCILLARY(IFINDEX);
		BPF_ANCILLARY(NLATTR);
		BPF_ANCILLARY(NLATTR_NEST);
		BPF_ANCILLARY(MARK);
		BPF_ANCILLARY(QUEUE);
		BPF_ANCILLARY(HATYPE);
		BPF_ANCILLARY(RXHASH);
		BPF_ANCILLARY(CPU);
		BPF_ANCILLARY(ALU_XOR_X);
		BPF_ANCILLARY(VLAN_TAG);
		BPF_ANCILLARY(VLAN_TAG_PRESENT);
		BPF_ANCILLARY(PAY_OFFSET);
		BPF_ANCILLARY(RANDOM);
		}
		/* Fallthrough. */
	default:
		return ftest->code;
	}
}

457 458 459 460 461 462 463 464 465 466 467 468
void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb,
					   int k, unsigned int size);

static inline void *bpf_load_pointer(const struct sk_buff *skb, int k,
				     unsigned int size, void *buffer)
{
	if (k >= 0)
		return skb_header_pointer(skb, k, size, buffer);

	return bpf_internal_load_pointer_neg_helper(skb, k, size);
}

469 470
static inline int bpf_tell_extensions(void)
{
471
	return SKF_AD_MAX;
472 473
}

L
Linus Torvalds 已提交
474
#endif /* __LINUX_FILTER_H__ */