filter.c 45.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
/*
 * Linux Socket Filter - Kernel level socket filtering
 *
4 5
 * Based on the design of the Berkeley Packet Filter. The new
 * internal format has been designed by PLUMgrid:
L
Linus Torvalds 已提交
6
 *
7 8 9 10 11 12 13
 *	Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
 *
 * Authors:
 *
 *	Jay Schulist <jschlst@samba.org>
 *	Alexei Starovoitov <ast@plumgrid.com>
 *	Daniel Borkmann <dborkman@redhat.com>
L
Linus Torvalds 已提交
14 15 16 17 18 19 20
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 *
 * Andi Kleen - Fix a few bad bugs and races.
21
 * Kris Katterjohn - Added many additional checks in sk_chk_filter()
L
Linus Torvalds 已提交
22 23 24 25 26 27 28 29 30 31 32
 */

#include <linux/module.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/fcntl.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/if_packet.h>
33
#include <linux/gfp.h>
L
Linus Torvalds 已提交
34 35
#include <net/ip.h>
#include <net/protocol.h>
36
#include <net/netlink.h>
L
Linus Torvalds 已提交
37 38 39 40 41
#include <linux/skbuff.h>
#include <net/sock.h>
#include <linux/errno.h>
#include <linux/timer.h>
#include <asm/uaccess.h>
42
#include <asm/unaligned.h>
L
Linus Torvalds 已提交
43
#include <linux/filter.h>
44
#include <linux/ratelimit.h>
45
#include <linux/seccomp.h>
E
Eric Dumazet 已提交
46
#include <linux/if_vlan.h>
L
Linus Torvalds 已提交
47

48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68
/* Registers */
#define R0	regs[BPF_REG_0]
#define R1	regs[BPF_REG_1]
#define R2	regs[BPF_REG_2]
#define R3	regs[BPF_REG_3]
#define R4	regs[BPF_REG_4]
#define R5	regs[BPF_REG_5]
#define R6	regs[BPF_REG_6]
#define R7	regs[BPF_REG_7]
#define R8	regs[BPF_REG_8]
#define R9	regs[BPF_REG_9]
#define R10	regs[BPF_REG_10]

/* Named registers */
#define A	regs[insn->a_reg]
#define X	regs[insn->x_reg]
#define FP	regs[BPF_REG_FP]
#define ARG1	regs[BPF_REG_ARG1]
#define CTX	regs[BPF_REG_CTX]
#define K	insn->imm

69 70 71 72 73
/* No hurry in this branch
 *
 * Exported for the bpf jit load helper.
 */
void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
L
Linus Torvalds 已提交
74 75 76 77
{
	u8 *ptr = NULL;

	if (k >= SKF_NET_OFF)
78
		ptr = skb_network_header(skb) + k - SKF_NET_OFF;
L
Linus Torvalds 已提交
79
	else if (k >= SKF_LL_OFF)
80
		ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
L
Linus Torvalds 已提交
81

82
	if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
L
Linus Torvalds 已提交
83 84 85 86
		return ptr;
	return NULL;
}

E
Eric Dumazet 已提交
87
static inline void *load_pointer(const struct sk_buff *skb, int k,
88
				 unsigned int size, void *buffer)
89 90 91
{
	if (k >= 0)
		return skb_header_pointer(skb, k, size, buffer);
92
	return bpf_internal_load_pointer_neg_helper(skb, k, size);
93 94
}

S
Stephen Hemminger 已提交
95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
/**
 *	sk_filter - run a packet through a socket filter
 *	@sk: sock associated with &sk_buff
 *	@skb: buffer to filter
 *
 * Run the filter code and then cut skb->data to correct size returned by
 * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller
 * than pkt_len we keep whole skb->data. This is the socket level
 * wrapper to sk_run_filter. It returns 0 if the packet should
 * be accepted or -EPERM if the packet should be tossed.
 *
 */
int sk_filter(struct sock *sk, struct sk_buff *skb)
{
	int err;
	struct sk_filter *filter;

112 113 114 115 116 117 118 119
	/*
	 * If the skb was allocated from pfmemalloc reserves, only
	 * allow SOCK_MEMALLOC sockets to use it as this socket is
	 * helping free memory
	 */
	if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
		return -ENOMEM;

S
Stephen Hemminger 已提交
120 121 122 123
	err = security_sock_rcv_skb(sk, skb);
	if (err)
		return err;

124 125
	rcu_read_lock();
	filter = rcu_dereference(sk->sk_filter);
S
Stephen Hemminger 已提交
126
	if (filter) {
127
		unsigned int pkt_len = SK_RUN_FILTER(filter, skb);
128

S
Stephen Hemminger 已提交
129 130
		err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
	}
131
	rcu_read_unlock();
S
Stephen Hemminger 已提交
132 133 134 135 136

	return err;
}
EXPORT_SYMBOL(sk_filter);

137 138 139 140 141 142 143 144 145
/* Base function for offset calculation. Needs to go into .text section,
 * therefore keeping it non-static as well; will also be used by JITs
 * anyway later on, so do not let the compiler omit it.
 */
noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
{
	return 0;
}

L
Linus Torvalds 已提交
146
/**
147 148
 *	__sk_run_filter - run a filter on a given context
 *	@ctx: buffer to run the filter on
149
 *	@insn: filter to apply
L
Linus Torvalds 已提交
150
 *
151
 * Decode and apply filter instructions to the skb->data. Return length to
152
 * keep, 0 for none. @ctx is the data we are operating on, @insn is the
153
 * array of filter instructions.
L
Linus Torvalds 已提交
154
 */
155
unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn)
L
Linus Torvalds 已提交
156
{
157 158 159 160 161
	u64 stack[MAX_BPF_STACK / sizeof(u64)];
	u64 regs[MAX_BPF_REG], tmp;
	static const void *jumptable[256] = {
		[0 ... 255] = &&default_label,
		/* Now overwrite non-defaults ... */
162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249
#define DL(A, B, C)	[BPF_##A|BPF_##B|BPF_##C] = &&A##_##B##_##C
		DL(ALU, ADD, X),
		DL(ALU, ADD, K),
		DL(ALU, SUB, X),
		DL(ALU, SUB, K),
		DL(ALU, AND, X),
		DL(ALU, AND, K),
		DL(ALU, OR, X),
		DL(ALU, OR, K),
		DL(ALU, LSH, X),
		DL(ALU, LSH, K),
		DL(ALU, RSH, X),
		DL(ALU, RSH, K),
		DL(ALU, XOR, X),
		DL(ALU, XOR, K),
		DL(ALU, MUL, X),
		DL(ALU, MUL, K),
		DL(ALU, MOV, X),
		DL(ALU, MOV, K),
		DL(ALU, DIV, X),
		DL(ALU, DIV, K),
		DL(ALU, MOD, X),
		DL(ALU, MOD, K),
		DL(ALU, NEG, 0),
		DL(ALU, END, TO_BE),
		DL(ALU, END, TO_LE),
		DL(ALU64, ADD, X),
		DL(ALU64, ADD, K),
		DL(ALU64, SUB, X),
		DL(ALU64, SUB, K),
		DL(ALU64, AND, X),
		DL(ALU64, AND, K),
		DL(ALU64, OR, X),
		DL(ALU64, OR, K),
		DL(ALU64, LSH, X),
		DL(ALU64, LSH, K),
		DL(ALU64, RSH, X),
		DL(ALU64, RSH, K),
		DL(ALU64, XOR, X),
		DL(ALU64, XOR, K),
		DL(ALU64, MUL, X),
		DL(ALU64, MUL, K),
		DL(ALU64, MOV, X),
		DL(ALU64, MOV, K),
		DL(ALU64, ARSH, X),
		DL(ALU64, ARSH, K),
		DL(ALU64, DIV, X),
		DL(ALU64, DIV, K),
		DL(ALU64, MOD, X),
		DL(ALU64, MOD, K),
		DL(ALU64, NEG, 0),
		DL(JMP, CALL, 0),
		DL(JMP, JA, 0),
		DL(JMP, JEQ, X),
		DL(JMP, JEQ, K),
		DL(JMP, JNE, X),
		DL(JMP, JNE, K),
		DL(JMP, JGT, X),
		DL(JMP, JGT, K),
		DL(JMP, JGE, X),
		DL(JMP, JGE, K),
		DL(JMP, JSGT, X),
		DL(JMP, JSGT, K),
		DL(JMP, JSGE, X),
		DL(JMP, JSGE, K),
		DL(JMP, JSET, X),
		DL(JMP, JSET, K),
		DL(JMP, EXIT, 0),
		DL(STX, MEM, B),
		DL(STX, MEM, H),
		DL(STX, MEM, W),
		DL(STX, MEM, DW),
		DL(STX, XADD, W),
		DL(STX, XADD, DW),
		DL(ST, MEM, B),
		DL(ST, MEM, H),
		DL(ST, MEM, W),
		DL(ST, MEM, DW),
		DL(LDX, MEM, B),
		DL(LDX, MEM, H),
		DL(LDX, MEM, W),
		DL(LDX, MEM, DW),
		DL(LD, ABS, W),
		DL(LD, ABS, H),
		DL(LD, ABS, B),
		DL(LD, IND, W),
		DL(LD, IND, H),
		DL(LD, IND, B),
250 251
#undef DL
	};
252 253
	void *ptr;
	int off;
L
Linus Torvalds 已提交
254

255 256 257 258 259 260 261 262 263
#define CONT	 ({ insn++; goto select_insn; })
#define CONT_JMP ({ insn++; goto select_insn; })

	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)];
	ARG1 = (u64) (unsigned long) ctx;

	/* Register for user BPF programs need to be reset first. */
	regs[BPF_REG_A] = 0;
	regs[BPF_REG_X] = 0;
264 265 266 267 268 269

select_insn:
	goto *jumptable[insn->code];

	/* ALU */
#define ALU(OPCODE, OP)			\
270
	ALU64_##OPCODE##_X:		\
271 272
		A = A OP X;		\
		CONT;			\
273
	ALU_##OPCODE##_X:		\
274 275
		A = (u32) A OP (u32) X;	\
		CONT;			\
276
	ALU64_##OPCODE##_K:		\
277 278
		A = A OP K;		\
		CONT;			\
279
	ALU_##OPCODE##_K:		\
280 281 282
		A = (u32) A OP (u32) K;	\
		CONT;

283 284 285 286 287 288 289 290
	ALU(ADD,  +)
	ALU(SUB,  -)
	ALU(AND,  &)
	ALU(OR,   |)
	ALU(LSH, <<)
	ALU(RSH, >>)
	ALU(XOR,  ^)
	ALU(MUL,  *)
291
#undef ALU
292
	ALU_NEG_0:
293 294
		A = (u32) -A;
		CONT;
295
	ALU64_NEG_0:
296 297
		A = -A;
		CONT;
298
	ALU_MOV_X:
299 300
		A = (u32) X;
		CONT;
301
	ALU_MOV_K:
302 303
		A = (u32) K;
		CONT;
304
	ALU64_MOV_X:
305 306
		A = X;
		CONT;
307
	ALU64_MOV_K:
308 309
		A = K;
		CONT;
310
	ALU64_ARSH_X:
311 312
		(*(s64 *) &A) >>= X;
		CONT;
313
	ALU64_ARSH_K:
314 315
		(*(s64 *) &A) >>= K;
		CONT;
316
	ALU64_MOD_X:
317 318
		if (unlikely(X == 0))
			return 0;
319
		tmp = A;
320
		A = do_div(tmp, X);
321
		CONT;
322
	ALU_MOD_X:
323 324
		if (unlikely(X == 0))
			return 0;
325
		tmp = (u32) A;
326
		A = do_div(tmp, (u32) X);
327
		CONT;
328
	ALU64_MOD_K:
329
		tmp = A;
330
		A = do_div(tmp, K);
331
		CONT;
332
	ALU_MOD_K:
333
		tmp = (u32) A;
334
		A = do_div(tmp, (u32) K);
335
		CONT;
336
	ALU64_DIV_X:
337 338 339
		if (unlikely(X == 0))
			return 0;
		do_div(A, X);
340
		CONT;
341
	ALU_DIV_X:
342 343
		if (unlikely(X == 0))
			return 0;
344
		tmp = (u32) A;
345
		do_div(tmp, (u32) X);
346 347
		A = (u32) tmp;
		CONT;
348
	ALU64_DIV_K:
349
		do_div(A, K);
350
		CONT;
351
	ALU_DIV_K:
352
		tmp = (u32) A;
353
		do_div(tmp, (u32) K);
354 355
		A = (u32) tmp;
		CONT;
356
	ALU_END_TO_BE:
357 358 359 360 361 362 363 364 365 366 367 368
		switch (K) {
		case 16:
			A = (__force u16) cpu_to_be16(A);
			break;
		case 32:
			A = (__force u32) cpu_to_be32(A);
			break;
		case 64:
			A = (__force u64) cpu_to_be64(A);
			break;
		}
		CONT;
369
	ALU_END_TO_LE:
370 371 372 373 374 375 376 377 378 379 380 381 382 383
		switch (K) {
		case 16:
			A = (__force u16) cpu_to_le16(A);
			break;
		case 32:
			A = (__force u32) cpu_to_le32(A);
			break;
		case 64:
			A = (__force u64) cpu_to_le64(A);
			break;
		}
		CONT;

	/* CALL */
384
	JMP_CALL_0:
385 386 387
		/* Function call scratches R1-R5 registers, preserves R6-R9,
		 * and stores return value into R0.
		 */
388
		R0 = (__bpf_call_base + insn->imm)(R1, R2, R3, R4, R5);
389 390 391
		CONT;

	/* JMP */
392
	JMP_JA_0:
393 394
		insn += insn->off;
		CONT;
395
	JMP_JEQ_X:
396 397 398 399 400
		if (A == X) {
			insn += insn->off;
			CONT_JMP;
		}
		CONT;
401
	JMP_JEQ_K:
402 403 404 405 406
		if (A == K) {
			insn += insn->off;
			CONT_JMP;
		}
		CONT;
407
	JMP_JNE_X:
408 409 410 411 412
		if (A != X) {
			insn += insn->off;
			CONT_JMP;
		}
		CONT;
413
	JMP_JNE_K:
414 415 416 417 418
		if (A != K) {
			insn += insn->off;
			CONT_JMP;
		}
		CONT;
419
	JMP_JGT_X:
420 421 422 423 424
		if (A > X) {
			insn += insn->off;
			CONT_JMP;
		}
		CONT;
425
	JMP_JGT_K:
426 427 428 429 430
		if (A > K) {
			insn += insn->off;
			CONT_JMP;
		}
		CONT;
431
	JMP_JGE_X:
432 433 434 435 436
		if (A >= X) {
			insn += insn->off;
			CONT_JMP;
		}
		CONT;
437
	JMP_JGE_K:
438 439 440 441 442
		if (A >= K) {
			insn += insn->off;
			CONT_JMP;
		}
		CONT;
443 444
	JMP_JSGT_X:
		if (((s64) A) > ((s64) X)) {
445 446 447 448
			insn += insn->off;
			CONT_JMP;
		}
		CONT;
449 450
	JMP_JSGT_K:
		if (((s64) A) > ((s64) K)) {
451 452 453 454
			insn += insn->off;
			CONT_JMP;
		}
		CONT;
455 456
	JMP_JSGE_X:
		if (((s64) A) >= ((s64) X)) {
457 458 459 460
			insn += insn->off;
			CONT_JMP;
		}
		CONT;
461 462
	JMP_JSGE_K:
		if (((s64) A) >= ((s64) K)) {
463 464 465 466
			insn += insn->off;
			CONT_JMP;
		}
		CONT;
467
	JMP_JSET_X:
468 469 470 471 472
		if (A & X) {
			insn += insn->off;
			CONT_JMP;
		}
		CONT;
473
	JMP_JSET_K:
474 475 476 477 478
		if (A & K) {
			insn += insn->off;
			CONT_JMP;
		}
		CONT;
479
	JMP_EXIT_0:
480 481 482 483
		return R0;

	/* STX and ST and LDX*/
#define LDST(SIZEOP, SIZE)					\
484
	STX_MEM_##SIZEOP:					\
485 486
		*(SIZE *)(unsigned long) (A + insn->off) = X;	\
		CONT;						\
487
	ST_MEM_##SIZEOP:					\
488 489
		*(SIZE *)(unsigned long) (A + insn->off) = K;	\
		CONT;						\
490
	LDX_MEM_##SIZEOP:					\
491 492 493
		A = *(SIZE *)(unsigned long) (X + insn->off);	\
		CONT;

494 495 496 497
	LDST(B,   u8)
	LDST(H,  u16)
	LDST(W,  u32)
	LDST(DW, u64)
498
#undef LDST
499
	STX_XADD_W: /* lock xadd *(u32 *)(A + insn->off) += X */
500 501 502
		atomic_add((u32) X, (atomic_t *)(unsigned long)
			   (A + insn->off));
		CONT;
503
	STX_XADD_DW: /* lock xadd *(u64 *)(A + insn->off) += X */
504 505 506
		atomic64_add((u64) X, (atomic64_t *)(unsigned long)
			     (A + insn->off));
		CONT;
507
	LD_ABS_W: /* R0 = ntohl(*(u32 *) (skb->data + K)) */
508 509 510 511
		off = K;
load_word:
		/* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are only
		 * appearing in the programs where ctx == skb. All programs
512
		 * keep 'ctx' in regs[BPF_REG_CTX] == R6, sk_convert_filter()
513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535
		 * saves it in R6, internal BPF verifier will check that
		 * R6 == ctx.
		 *
		 * BPF_ABS and BPF_IND are wrappers of function calls, so
		 * they scratch R1-R5 registers, preserve R6-R9, and store
		 * return value into R0.
		 *
		 * Implicit input:
		 *   ctx
		 *
		 * Explicit input:
		 *   X == any register
		 *   K == 32-bit immediate
		 *
		 * Output:
		 *   R0 - 8/16/32-bit skb data converted to cpu endianness
		 */
		ptr = load_pointer((struct sk_buff *) ctx, off, 4, &tmp);
		if (likely(ptr != NULL)) {
			R0 = get_unaligned_be32(ptr);
			CONT;
		}
		return 0;
536
	LD_ABS_H: /* R0 = ntohs(*(u16 *) (skb->data + K)) */
537 538 539 540 541 542 543 544
		off = K;
load_half:
		ptr = load_pointer((struct sk_buff *) ctx, off, 2, &tmp);
		if (likely(ptr != NULL)) {
			R0 = get_unaligned_be16(ptr);
			CONT;
		}
		return 0;
545
	LD_ABS_B: /* R0 = *(u8 *) (ctx + K) */
546 547 548 549 550 551 552 553
		off = K;
load_byte:
		ptr = load_pointer((struct sk_buff *) ctx, off, 1, &tmp);
		if (likely(ptr != NULL)) {
			R0 = *(u8 *)ptr;
			CONT;
		}
		return 0;
554
	LD_IND_W: /* R0 = ntohl(*(u32 *) (skb->data + X + K)) */
555 556
		off = K + X;
		goto load_word;
557
	LD_IND_H: /* R0 = ntohs(*(u16 *) (skb->data + X + K)) */
558 559
		off = K + X;
		goto load_half;
560
	LD_IND_B: /* R0 = *(u8 *) (skb->data + X + K) */
561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598
		off = K + X;
		goto load_byte;

	default_label:
		/* If we ever reach this, we have a bug somewhere. */
		WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code);
		return 0;
}

u32 sk_run_filter_int_seccomp(const struct seccomp_data *ctx,
			      const struct sock_filter_int *insni)
    __attribute__ ((alias ("__sk_run_filter")));

u32 sk_run_filter_int_skb(const struct sk_buff *ctx,
			  const struct sock_filter_int *insni)
    __attribute__ ((alias ("__sk_run_filter")));
EXPORT_SYMBOL_GPL(sk_run_filter_int_skb);

/* Helper to find the offset of pkt_type in sk_buff structure. We want
 * to make sure its still a 3bit field starting at a byte boundary;
 * taken from arch/x86/net/bpf_jit_comp.c.
 */
#define PKT_TYPE_MAX	7
static unsigned int pkt_type_offset(void)
{
	struct sk_buff skb_probe = { .pkt_type = ~0, };
	u8 *ct = (u8 *) &skb_probe;
	unsigned int off;

	for (off = 0; off < sizeof(struct sk_buff); off++) {
		if (ct[off] == PKT_TYPE_MAX)
			return off;
	}

	pr_err_once("Please fix %s, as pkt_type couldn't be found!\n", __func__);
	return -1;
}

599
static u64 __skb_get_pay_offset(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
600 601 602 603 604 605
{
	struct sk_buff *skb = (struct sk_buff *)(long) ctx;

	return __skb_get_poff(skb);
}

606
static u64 __skb_get_nlattr(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
607 608 609 610 611 612 613
{
	struct sk_buff *skb = (struct sk_buff *)(long) ctx;
	struct nlattr *nla;

	if (skb_is_nonlinear(skb))
		return 0;

614 615 616
	if (skb->len < sizeof(struct nlattr))
		return 0;

617
	if (a > skb->len - sizeof(struct nlattr))
618 619
		return 0;

620
	nla = nla_find((struct nlattr *) &skb->data[a], skb->len - a, x);
621 622 623 624 625 626
	if (nla)
		return (void *) nla - (void *) skb->data;

	return 0;
}

627
static u64 __skb_get_nlattr_nest(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
628 629 630 631 632 633 634
{
	struct sk_buff *skb = (struct sk_buff *)(long) ctx;
	struct nlattr *nla;

	if (skb_is_nonlinear(skb))
		return 0;

635 636 637
	if (skb->len < sizeof(struct nlattr))
		return 0;

638
	if (a > skb->len - sizeof(struct nlattr))
639 640
		return 0;

641 642
	nla = (struct nlattr *) &skb->data[a];
	if (nla->nla_len > skb->len - a)
643 644
		return 0;

645
	nla = nla_find_nested(nla, x);
646 647 648 649 650 651
	if (nla)
		return (void *) nla - (void *) skb->data;

	return 0;
}

652
static u64 __get_raw_cpu_id(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
653 654 655 656
{
	return raw_smp_processor_id();
}

C
Chema Gonzalez 已提交
657
/* note that this only generates 32-bit random numbers */
658
static u64 __get_random_u32(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
C
Chema Gonzalez 已提交
659 660 661 662
{
	return (u64)prandom_u32();
}

663 664 665 666 667 668 669 670 671 672
static bool convert_bpf_extensions(struct sock_filter *fp,
				   struct sock_filter_int **insnp)
{
	struct sock_filter_int *insn = *insnp;

	switch (fp->k) {
	case SKF_AD_OFF + SKF_AD_PROTOCOL:
		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);

		insn->code = BPF_LDX | BPF_MEM | BPF_H;
673 674
		insn->a_reg = BPF_REG_A;
		insn->x_reg = BPF_REG_CTX;
675 676 677 678 679
		insn->off = offsetof(struct sk_buff, protocol);
		insn++;

		/* A = ntohs(A) [emitting a nop or swap16] */
		insn->code = BPF_ALU | BPF_END | BPF_FROM_BE;
680
		insn->a_reg = BPF_REG_A;
681 682 683 684 685
		insn->imm = 16;
		break;

	case SKF_AD_OFF + SKF_AD_PKTTYPE:
		insn->code = BPF_LDX | BPF_MEM | BPF_B;
686 687
		insn->a_reg = BPF_REG_A;
		insn->x_reg = BPF_REG_CTX;
688 689 690 691 692 693
		insn->off = pkt_type_offset();
		if (insn->off < 0)
			return false;
		insn++;

		insn->code = BPF_ALU | BPF_AND | BPF_K;
694
		insn->a_reg = BPF_REG_A;
695 696 697 698 699 700 701 702 703
		insn->imm = PKT_TYPE_MAX;
		break;

	case SKF_AD_OFF + SKF_AD_IFINDEX:
	case SKF_AD_OFF + SKF_AD_HATYPE:
		if (FIELD_SIZEOF(struct sk_buff, dev) == 8)
			insn->code = BPF_LDX | BPF_MEM | BPF_DW;
		else
			insn->code = BPF_LDX | BPF_MEM | BPF_W;
704 705
		insn->a_reg = BPF_REG_TMP;
		insn->x_reg = BPF_REG_CTX;
706 707 708 709
		insn->off = offsetof(struct sk_buff, dev);
		insn++;

		insn->code = BPF_JMP | BPF_JNE | BPF_K;
710
		insn->a_reg = BPF_REG_TMP;
711 712 713 714 715 716 717 718 719 720
		insn->imm = 0;
		insn->off = 1;
		insn++;

		insn->code = BPF_JMP | BPF_EXIT;
		insn++;

		BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
		BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);

721 722
		insn->a_reg = BPF_REG_A;
		insn->x_reg = BPF_REG_TMP;
723 724 725 726 727 728 729 730 731 732 733 734 735 736

		if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX) {
			insn->code = BPF_LDX | BPF_MEM | BPF_W;
			insn->off = offsetof(struct net_device, ifindex);
		} else {
			insn->code = BPF_LDX | BPF_MEM | BPF_H;
			insn->off = offsetof(struct net_device, type);
		}
		break;

	case SKF_AD_OFF + SKF_AD_MARK:
		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);

		insn->code = BPF_LDX | BPF_MEM | BPF_W;
737 738
		insn->a_reg = BPF_REG_A;
		insn->x_reg = BPF_REG_CTX;
739 740 741 742 743 744 745
		insn->off = offsetof(struct sk_buff, mark);
		break;

	case SKF_AD_OFF + SKF_AD_RXHASH:
		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);

		insn->code = BPF_LDX | BPF_MEM | BPF_W;
746 747
		insn->a_reg = BPF_REG_A;
		insn->x_reg = BPF_REG_CTX;
748 749 750 751 752 753 754
		insn->off = offsetof(struct sk_buff, hash);
		break;

	case SKF_AD_OFF + SKF_AD_QUEUE:
		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);

		insn->code = BPF_LDX | BPF_MEM | BPF_H;
755 756
		insn->a_reg = BPF_REG_A;
		insn->x_reg = BPF_REG_CTX;
757 758 759 760 761 762 763 764
		insn->off = offsetof(struct sk_buff, queue_mapping);
		break;

	case SKF_AD_OFF + SKF_AD_VLAN_TAG:
	case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT:
		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);

		insn->code = BPF_LDX | BPF_MEM | BPF_H;
765 766
		insn->a_reg = BPF_REG_A;
		insn->x_reg = BPF_REG_CTX;
767 768 769 770 771 772 773
		insn->off = offsetof(struct sk_buff, vlan_tci);
		insn++;

		BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);

		if (fp->k == SKF_AD_OFF + SKF_AD_VLAN_TAG) {
			insn->code = BPF_ALU | BPF_AND | BPF_K;
774
			insn->a_reg = BPF_REG_A;
775 776 777
			insn->imm = ~VLAN_TAG_PRESENT;
		} else {
			insn->code = BPF_ALU | BPF_RSH | BPF_K;
778
			insn->a_reg = BPF_REG_A;
779 780 781 782
			insn->imm = 12;
			insn++;

			insn->code = BPF_ALU | BPF_AND | BPF_K;
783
			insn->a_reg = BPF_REG_A;
784 785 786 787 788 789 790 791
			insn->imm = 1;
		}
		break;

	case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
	case SKF_AD_OFF + SKF_AD_NLATTR:
	case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
	case SKF_AD_OFF + SKF_AD_CPU:
C
Chema Gonzalez 已提交
792
	case SKF_AD_OFF + SKF_AD_RANDOM:
793 794
		/* arg1 = ctx */
		insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
795 796
		insn->a_reg = BPF_REG_ARG1;
		insn->x_reg = BPF_REG_CTX;
797 798 799 800
		insn++;

		/* arg2 = A */
		insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
801 802
		insn->a_reg = BPF_REG_ARG2;
		insn->x_reg = BPF_REG_A;
803 804 805 806
		insn++;

		/* arg3 = X */
		insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
807 808
		insn->a_reg = BPF_REG_ARG3;
		insn->x_reg = BPF_REG_X;
809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825
		insn++;

		/* Emit call(ctx, arg2=A, arg3=X) */
		insn->code = BPF_JMP | BPF_CALL;
		switch (fp->k) {
		case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
			insn->imm = __skb_get_pay_offset - __bpf_call_base;
			break;
		case SKF_AD_OFF + SKF_AD_NLATTR:
			insn->imm = __skb_get_nlattr - __bpf_call_base;
			break;
		case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
			insn->imm = __skb_get_nlattr_nest - __bpf_call_base;
			break;
		case SKF_AD_OFF + SKF_AD_CPU:
			insn->imm = __get_raw_cpu_id - __bpf_call_base;
			break;
C
Chema Gonzalez 已提交
826 827 828
		case SKF_AD_OFF + SKF_AD_RANDOM:
			insn->imm = __get_random_u32 - __bpf_call_base;
			break;
829 830 831 832 833
		}
		break;

	case SKF_AD_OFF + SKF_AD_ALU_XOR_X:
		insn->code = BPF_ALU | BPF_XOR | BPF_X;
834 835
		insn->a_reg = BPF_REG_A;
		insn->x_reg = BPF_REG_X;
836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884
		break;

	default:
		/* This is just a dummy call to avoid letting the compiler
		 * evict __bpf_call_base() as an optimization. Placed here
		 * where no-one bothers.
		 */
		BUG_ON(__bpf_call_base(0, 0, 0, 0, 0) != 0);
		return false;
	}

	*insnp = insn;
	return true;
}

/**
 *	sk_convert_filter - convert filter program
 *	@prog: the user passed filter program
 *	@len: the length of the user passed filter program
 *	@new_prog: buffer where converted program will be stored
 *	@new_len: pointer to store length of converted program
 *
 * Remap 'sock_filter' style BPF instruction set to 'sock_filter_ext' style.
 * Conversion workflow:
 *
 * 1) First pass for calculating the new program length:
 *   sk_convert_filter(old_prog, old_len, NULL, &new_len)
 *
 * 2) 2nd pass to remap in two passes: 1st pass finds new
 *    jump offsets, 2nd pass remapping:
 *   new_prog = kmalloc(sizeof(struct sock_filter_int) * new_len);
 *   sk_convert_filter(old_prog, old_len, new_prog, &new_len);
 *
 * User BPF's register A is mapped to our BPF register 6, user BPF
 * register X is mapped to BPF register 7; frame pointer is always
 * register 10; Context 'void *ctx' is stored in register 1, that is,
 * for socket filters: ctx == 'struct sk_buff *', for seccomp:
 * ctx == 'struct seccomp_data *'.
 */
int sk_convert_filter(struct sock_filter *prog, int len,
		      struct sock_filter_int *new_prog, int *new_len)
{
	int new_flen = 0, pass = 0, target, i;
	struct sock_filter_int *new_insn;
	struct sock_filter *fp;
	int *addrs = NULL;
	u8 bpf_src;

	BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK);
885
	BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901

	if (len <= 0 || len >= BPF_MAXINSNS)
		return -EINVAL;

	if (new_prog) {
		addrs = kzalloc(len * sizeof(*addrs), GFP_KERNEL);
		if (!addrs)
			return -ENOMEM;
	}

do_pass:
	new_insn = new_prog;
	fp = prog;

	if (new_insn) {
		new_insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
902 903
		new_insn->a_reg = BPF_REG_CTX;
		new_insn->x_reg = BPF_REG_ARG1;
904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952
	}
	new_insn++;

	for (i = 0; i < len; fp++, i++) {
		struct sock_filter_int tmp_insns[6] = { };
		struct sock_filter_int *insn = tmp_insns;

		if (addrs)
			addrs[i] = new_insn - new_prog;

		switch (fp->code) {
		/* All arithmetic insns and skb loads map as-is. */
		case BPF_ALU | BPF_ADD | BPF_X:
		case BPF_ALU | BPF_ADD | BPF_K:
		case BPF_ALU | BPF_SUB | BPF_X:
		case BPF_ALU | BPF_SUB | BPF_K:
		case BPF_ALU | BPF_AND | BPF_X:
		case BPF_ALU | BPF_AND | BPF_K:
		case BPF_ALU | BPF_OR | BPF_X:
		case BPF_ALU | BPF_OR | BPF_K:
		case BPF_ALU | BPF_LSH | BPF_X:
		case BPF_ALU | BPF_LSH | BPF_K:
		case BPF_ALU | BPF_RSH | BPF_X:
		case BPF_ALU | BPF_RSH | BPF_K:
		case BPF_ALU | BPF_XOR | BPF_X:
		case BPF_ALU | BPF_XOR | BPF_K:
		case BPF_ALU | BPF_MUL | BPF_X:
		case BPF_ALU | BPF_MUL | BPF_K:
		case BPF_ALU | BPF_DIV | BPF_X:
		case BPF_ALU | BPF_DIV | BPF_K:
		case BPF_ALU | BPF_MOD | BPF_X:
		case BPF_ALU | BPF_MOD | BPF_K:
		case BPF_ALU | BPF_NEG:
		case BPF_LD | BPF_ABS | BPF_W:
		case BPF_LD | BPF_ABS | BPF_H:
		case BPF_LD | BPF_ABS | BPF_B:
		case BPF_LD | BPF_IND | BPF_W:
		case BPF_LD | BPF_IND | BPF_H:
		case BPF_LD | BPF_IND | BPF_B:
			/* Check for overloaded BPF extension and
			 * directly convert it if found, otherwise
			 * just move on with mapping.
			 */
			if (BPF_CLASS(fp->code) == BPF_LD &&
			    BPF_MODE(fp->code) == BPF_ABS &&
			    convert_bpf_extensions(fp, &insn))
				break;

			insn->code = fp->code;
953 954
			insn->a_reg = BPF_REG_A;
			insn->x_reg = BPF_REG_X;
955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987
			insn->imm = fp->k;
			break;

		/* Jump opcodes map as-is, but offsets need adjustment. */
		case BPF_JMP | BPF_JA:
			target = i + fp->k + 1;
			insn->code = fp->code;
#define EMIT_JMP							\
	do {								\
		if (target >= len || target < 0)			\
			goto err;					\
		insn->off = addrs ? addrs[target] - addrs[i] - 1 : 0;	\
		/* Adjust pc relative offset for 2nd or 3rd insn. */	\
		insn->off -= insn - tmp_insns;				\
	} while (0)

			EMIT_JMP;
			break;

		case BPF_JMP | BPF_JEQ | BPF_K:
		case BPF_JMP | BPF_JEQ | BPF_X:
		case BPF_JMP | BPF_JSET | BPF_K:
		case BPF_JMP | BPF_JSET | BPF_X:
		case BPF_JMP | BPF_JGT | BPF_K:
		case BPF_JMP | BPF_JGT | BPF_X:
		case BPF_JMP | BPF_JGE | BPF_K:
		case BPF_JMP | BPF_JGE | BPF_X:
			if (BPF_SRC(fp->code) == BPF_K && (int) fp->k < 0) {
				/* BPF immediates are signed, zero extend
				 * immediate into tmp register and use it
				 * in compare insn.
				 */
				insn->code = BPF_ALU | BPF_MOV | BPF_K;
988
				insn->a_reg = BPF_REG_TMP;
989 990 991
				insn->imm = fp->k;
				insn++;

992 993
				insn->a_reg = BPF_REG_A;
				insn->x_reg = BPF_REG_TMP;
994 995
				bpf_src = BPF_X;
			} else {
996 997
				insn->a_reg = BPF_REG_A;
				insn->x_reg = BPF_REG_X;
998 999
				insn->imm = fp->k;
				bpf_src = BPF_SRC(fp->code);
L
Linus Torvalds 已提交
1000
			}
1001 1002 1003 1004 1005 1006 1007

			/* Common case where 'jump_false' is next insn. */
			if (fp->jf == 0) {
				insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
				target = i + fp->jt + 1;
				EMIT_JMP;
				break;
L
Linus Torvalds 已提交
1008
			}
1009 1010 1011 1012 1013 1014 1015

			/* Convert JEQ into JNE when 'jump_true' is next insn. */
			if (fp->jt == 0 && BPF_OP(fp->code) == BPF_JEQ) {
				insn->code = BPF_JMP | BPF_JNE | bpf_src;
				target = i + fp->jf + 1;
				EMIT_JMP;
				break;
1016
			}
1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031

			/* Other jumps are mapped into two insns: Jxx and JA. */
			target = i + fp->jt + 1;
			insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
			EMIT_JMP;
			insn++;

			insn->code = BPF_JMP | BPF_JA;
			target = i + fp->jf + 1;
			EMIT_JMP;
			break;

		/* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */
		case BPF_LDX | BPF_MSH | BPF_B:
			insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
1032 1033
			insn->a_reg = BPF_REG_TMP;
			insn->x_reg = BPF_REG_A;
1034 1035 1036
			insn++;

			insn->code = BPF_LD | BPF_ABS | BPF_B;
1037
			insn->a_reg = BPF_REG_A;
1038 1039 1040 1041
			insn->imm = fp->k;
			insn++;

			insn->code = BPF_ALU | BPF_AND | BPF_K;
1042
			insn->a_reg = BPF_REG_A;
1043 1044 1045 1046
			insn->imm = 0xf;
			insn++;

			insn->code = BPF_ALU | BPF_LSH | BPF_K;
1047
			insn->a_reg = BPF_REG_A;
1048 1049 1050 1051
			insn->imm = 2;
			insn++;

			insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
1052 1053
			insn->a_reg = BPF_REG_X;
			insn->x_reg = BPF_REG_A;
1054 1055 1056
			insn++;

			insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
1057 1058
			insn->a_reg = BPF_REG_A;
			insn->x_reg = BPF_REG_TMP;
1059 1060 1061 1062 1063 1064 1065 1066 1067
			break;

		/* RET_K, RET_A are remaped into 2 insns. */
		case BPF_RET | BPF_A:
		case BPF_RET | BPF_K:
			insn->code = BPF_ALU | BPF_MOV |
				     (BPF_RVAL(fp->code) == BPF_K ?
				      BPF_K : BPF_X);
			insn->a_reg = 0;
1068
			insn->x_reg = BPF_REG_A;
1069 1070 1071 1072 1073 1074 1075 1076 1077 1078
			insn->imm = fp->k;
			insn++;

			insn->code = BPF_JMP | BPF_EXIT;
			break;

		/* Store to stack. */
		case BPF_ST:
		case BPF_STX:
			insn->code = BPF_STX | BPF_MEM | BPF_W;
1079 1080 1081
			insn->a_reg = BPF_REG_FP;
			insn->x_reg = fp->code == BPF_ST ?
				      BPF_REG_A : BPF_REG_X;
1082 1083 1084 1085 1086 1087 1088 1089
			insn->off = -(BPF_MEMWORDS - fp->k) * 4;
			break;

		/* Load from stack. */
		case BPF_LD | BPF_MEM:
		case BPF_LDX | BPF_MEM:
			insn->code = BPF_LDX | BPF_MEM | BPF_W;
			insn->a_reg = BPF_CLASS(fp->code) == BPF_LD ?
1090 1091
				      BPF_REG_A : BPF_REG_X;
			insn->x_reg = BPF_REG_FP;
1092 1093 1094 1095 1096 1097 1098 1099
			insn->off = -(BPF_MEMWORDS - fp->k) * 4;
			break;

		/* A = K or X = K */
		case BPF_LD | BPF_IMM:
		case BPF_LDX | BPF_IMM:
			insn->code = BPF_ALU | BPF_MOV | BPF_K;
			insn->a_reg = BPF_CLASS(fp->code) == BPF_LD ?
1100
				      BPF_REG_A : BPF_REG_X;
1101 1102 1103 1104 1105 1106
			insn->imm = fp->k;
			break;

		/* X = A */
		case BPF_MISC | BPF_TAX:
			insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
1107 1108
			insn->a_reg = BPF_REG_X;
			insn->x_reg = BPF_REG_A;
1109 1110 1111 1112 1113
			break;

		/* A = X */
		case BPF_MISC | BPF_TXA:
			insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
1114 1115
			insn->a_reg = BPF_REG_A;
			insn->x_reg = BPF_REG_X;
1116 1117 1118 1119 1120 1121 1122
			break;

		/* A = skb->len or X = skb->len */
		case BPF_LD | BPF_W | BPF_LEN:
		case BPF_LDX | BPF_W | BPF_LEN:
			insn->code = BPF_LDX | BPF_MEM | BPF_W;
			insn->a_reg = BPF_CLASS(fp->code) == BPF_LD ?
1123 1124
				      BPF_REG_A : BPF_REG_X;
			insn->x_reg = BPF_REG_CTX;
1125 1126 1127 1128 1129 1130
			insn->off = offsetof(struct sk_buff, len);
			break;

		/* access seccomp_data fields */
		case BPF_LDX | BPF_ABS | BPF_W:
			insn->code = BPF_LDX | BPF_MEM | BPF_W;
1131 1132
			insn->a_reg = BPF_REG_A;
			insn->x_reg = BPF_REG_CTX;
1133 1134 1135
			insn->off = fp->k;
			break;

L
Linus Torvalds 已提交
1136
		default:
1137
			goto err;
L
Linus Torvalds 已提交
1138
		}
1139 1140 1141 1142 1143 1144 1145

		insn++;
		if (new_prog)
			memcpy(new_insn, tmp_insns,
			       sizeof(*insn) * (insn - tmp_insns));

		new_insn += insn - tmp_insns;
L
Linus Torvalds 已提交
1146 1147
	}

1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164
	if (!new_prog) {
		/* Only calculating new length. */
		*new_len = new_insn - new_prog;
		return 0;
	}

	pass++;
	if (new_flen != new_insn - new_prog) {
		new_flen = new_insn - new_prog;
		if (pass > 2)
			goto err;

		goto do_pass;
	}

	kfree(addrs);
	BUG_ON(*new_len != new_flen);
L
Linus Torvalds 已提交
1165
	return 0;
1166 1167 1168
err:
	kfree(addrs);
	return -EINVAL;
L
Linus Torvalds 已提交
1169 1170
}

1171 1172
/* Security:
 *
1173
 * A BPF program is able to use 16 cells of memory to store intermediate
1174 1175
 * values (check u32 mem[BPF_MEMWORDS] in sk_run_filter()).
 *
1176 1177 1178
 * As we dont want to clear mem[] array for each packet going through
 * sk_run_filter(), we check that filter loaded by user never try to read
 * a cell if not previously written, and we check all branches to be sure
L
Lucas De Marchi 已提交
1179
 * a malicious user doesn't try to abuse us.
1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231
 */
static int check_load_and_stores(struct sock_filter *filter, int flen)
{
	u16 *masks, memvalid = 0; /* one bit per cell, 16 cells */
	int pc, ret = 0;

	BUILD_BUG_ON(BPF_MEMWORDS > 16);
	masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL);
	if (!masks)
		return -ENOMEM;
	memset(masks, 0xff, flen * sizeof(*masks));

	for (pc = 0; pc < flen; pc++) {
		memvalid &= masks[pc];

		switch (filter[pc].code) {
		case BPF_S_ST:
		case BPF_S_STX:
			memvalid |= (1 << filter[pc].k);
			break;
		case BPF_S_LD_MEM:
		case BPF_S_LDX_MEM:
			if (!(memvalid & (1 << filter[pc].k))) {
				ret = -EINVAL;
				goto error;
			}
			break;
		case BPF_S_JMP_JA:
			/* a jump must set masks on target */
			masks[pc + 1 + filter[pc].k] &= memvalid;
			memvalid = ~0;
			break;
		case BPF_S_JMP_JEQ_K:
		case BPF_S_JMP_JEQ_X:
		case BPF_S_JMP_JGE_K:
		case BPF_S_JMP_JGE_X:
		case BPF_S_JMP_JGT_K:
		case BPF_S_JMP_JGT_X:
		case BPF_S_JMP_JSET_X:
		case BPF_S_JMP_JSET_K:
			/* a jump must set masks on targets */
			masks[pc + 1 + filter[pc].jt] &= memvalid;
			masks[pc + 1 + filter[pc].jf] &= memvalid;
			memvalid = ~0;
			break;
		}
	}
error:
	kfree(masks);
	return ret;
}

L
Linus Torvalds 已提交
1232 1233 1234 1235 1236 1237 1238
/**
 *	sk_chk_filter - verify socket filter code
 *	@filter: filter to verify
 *	@flen: length of filter
 *
 * Check the user's filter code. If we let some ugly
 * filter code slip through kaboom! The filter must contain
1239 1240
 * no references or jumps that are out of range, no illegal
 * instructions, and must end with a RET instruction.
L
Linus Torvalds 已提交
1241
 *
1242 1243 1244
 * All jumps are forward as they are not signed.
 *
 * Returns 0 if the rule set is legal or -EINVAL if not.
L
Linus Torvalds 已提交
1245
 */
1246
int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
L
Linus Torvalds 已提交
1247
{
1248 1249 1250 1251 1252
	/*
	 * Valid instructions are initialized to non-0.
	 * Invalid instructions are initialized to 0.
	 */
	static const u8 codes[] = {
E
Eric Dumazet 已提交
1253 1254 1255 1256 1257 1258 1259
		[BPF_ALU|BPF_ADD|BPF_K]  = BPF_S_ALU_ADD_K,
		[BPF_ALU|BPF_ADD|BPF_X]  = BPF_S_ALU_ADD_X,
		[BPF_ALU|BPF_SUB|BPF_K]  = BPF_S_ALU_SUB_K,
		[BPF_ALU|BPF_SUB|BPF_X]  = BPF_S_ALU_SUB_X,
		[BPF_ALU|BPF_MUL|BPF_K]  = BPF_S_ALU_MUL_K,
		[BPF_ALU|BPF_MUL|BPF_X]  = BPF_S_ALU_MUL_X,
		[BPF_ALU|BPF_DIV|BPF_X]  = BPF_S_ALU_DIV_X,
E
Eric Dumazet 已提交
1260 1261
		[BPF_ALU|BPF_MOD|BPF_K]  = BPF_S_ALU_MOD_K,
		[BPF_ALU|BPF_MOD|BPF_X]  = BPF_S_ALU_MOD_X,
E
Eric Dumazet 已提交
1262 1263 1264 1265
		[BPF_ALU|BPF_AND|BPF_K]  = BPF_S_ALU_AND_K,
		[BPF_ALU|BPF_AND|BPF_X]  = BPF_S_ALU_AND_X,
		[BPF_ALU|BPF_OR|BPF_K]   = BPF_S_ALU_OR_K,
		[BPF_ALU|BPF_OR|BPF_X]   = BPF_S_ALU_OR_X,
1266 1267
		[BPF_ALU|BPF_XOR|BPF_K]  = BPF_S_ALU_XOR_K,
		[BPF_ALU|BPF_XOR|BPF_X]  = BPF_S_ALU_XOR_X,
E
Eric Dumazet 已提交
1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301
		[BPF_ALU|BPF_LSH|BPF_K]  = BPF_S_ALU_LSH_K,
		[BPF_ALU|BPF_LSH|BPF_X]  = BPF_S_ALU_LSH_X,
		[BPF_ALU|BPF_RSH|BPF_K]  = BPF_S_ALU_RSH_K,
		[BPF_ALU|BPF_RSH|BPF_X]  = BPF_S_ALU_RSH_X,
		[BPF_ALU|BPF_NEG]        = BPF_S_ALU_NEG,
		[BPF_LD|BPF_W|BPF_ABS]   = BPF_S_LD_W_ABS,
		[BPF_LD|BPF_H|BPF_ABS]   = BPF_S_LD_H_ABS,
		[BPF_LD|BPF_B|BPF_ABS]   = BPF_S_LD_B_ABS,
		[BPF_LD|BPF_W|BPF_LEN]   = BPF_S_LD_W_LEN,
		[BPF_LD|BPF_W|BPF_IND]   = BPF_S_LD_W_IND,
		[BPF_LD|BPF_H|BPF_IND]   = BPF_S_LD_H_IND,
		[BPF_LD|BPF_B|BPF_IND]   = BPF_S_LD_B_IND,
		[BPF_LD|BPF_IMM]         = BPF_S_LD_IMM,
		[BPF_LDX|BPF_W|BPF_LEN]  = BPF_S_LDX_W_LEN,
		[BPF_LDX|BPF_B|BPF_MSH]  = BPF_S_LDX_B_MSH,
		[BPF_LDX|BPF_IMM]        = BPF_S_LDX_IMM,
		[BPF_MISC|BPF_TAX]       = BPF_S_MISC_TAX,
		[BPF_MISC|BPF_TXA]       = BPF_S_MISC_TXA,
		[BPF_RET|BPF_K]          = BPF_S_RET_K,
		[BPF_RET|BPF_A]          = BPF_S_RET_A,
		[BPF_ALU|BPF_DIV|BPF_K]  = BPF_S_ALU_DIV_K,
		[BPF_LD|BPF_MEM]         = BPF_S_LD_MEM,
		[BPF_LDX|BPF_MEM]        = BPF_S_LDX_MEM,
		[BPF_ST]                 = BPF_S_ST,
		[BPF_STX]                = BPF_S_STX,
		[BPF_JMP|BPF_JA]         = BPF_S_JMP_JA,
		[BPF_JMP|BPF_JEQ|BPF_K]  = BPF_S_JMP_JEQ_K,
		[BPF_JMP|BPF_JEQ|BPF_X]  = BPF_S_JMP_JEQ_X,
		[BPF_JMP|BPF_JGE|BPF_K]  = BPF_S_JMP_JGE_K,
		[BPF_JMP|BPF_JGE|BPF_X]  = BPF_S_JMP_JGE_X,
		[BPF_JMP|BPF_JGT|BPF_K]  = BPF_S_JMP_JGT_K,
		[BPF_JMP|BPF_JGT|BPF_X]  = BPF_S_JMP_JGT_X,
		[BPF_JMP|BPF_JSET|BPF_K] = BPF_S_JMP_JSET_K,
		[BPF_JMP|BPF_JSET|BPF_X] = BPF_S_JMP_JSET_X,
1302
	};
L
Linus Torvalds 已提交
1303
	int pc;
1304
	bool anc_found;
L
Linus Torvalds 已提交
1305

1306
	if (flen == 0 || flen > BPF_MAXINSNS)
L
Linus Torvalds 已提交
1307 1308 1309 1310
		return -EINVAL;

	/* check the filter code now */
	for (pc = 0; pc < flen; pc++) {
1311 1312
		struct sock_filter *ftest = &filter[pc];
		u16 code = ftest->code;
1313

1314 1315 1316
		if (code >= ARRAY_SIZE(codes))
			return -EINVAL;
		code = codes[code];
E
Eric Dumazet 已提交
1317
		if (!code)
1318
			return -EINVAL;
1319
		/* Some instructions need special checks */
1320 1321
		switch (code) {
		case BPF_S_ALU_DIV_K:
E
Eric Dumazet 已提交
1322 1323 1324 1325 1326
		case BPF_S_ALU_MOD_K:
			/* check for division by zero */
			if (ftest->k == 0)
				return -EINVAL;
			break;
1327 1328 1329 1330 1331
		case BPF_S_LD_MEM:
		case BPF_S_LDX_MEM:
		case BPF_S_ST:
		case BPF_S_STX:
			/* check for invalid memory addresses */
1332 1333 1334
			if (ftest->k >= BPF_MEMWORDS)
				return -EINVAL;
			break;
1335
		case BPF_S_JMP_JA:
1336 1337 1338 1339 1340
			/*
			 * Note, the large ftest->k might cause loops.
			 * Compare this with conditional jumps below,
			 * where offsets are limited. --ANK (981016)
			 */
1341
			if (ftest->k >= (unsigned int)(flen-pc-1))
1342
				return -EINVAL;
1343 1344 1345 1346 1347 1348 1349 1350 1351
			break;
		case BPF_S_JMP_JEQ_K:
		case BPF_S_JMP_JEQ_X:
		case BPF_S_JMP_JGE_K:
		case BPF_S_JMP_JGE_X:
		case BPF_S_JMP_JGT_K:
		case BPF_S_JMP_JGT_X:
		case BPF_S_JMP_JSET_X:
		case BPF_S_JMP_JSET_K:
1352
			/* for conditionals both must be safe */
1353
			if (pc + ftest->jt + 1 >= flen ||
1354 1355
			    pc + ftest->jf + 1 >= flen)
				return -EINVAL;
1356
			break;
1357 1358 1359
		case BPF_S_LD_W_ABS:
		case BPF_S_LD_H_ABS:
		case BPF_S_LD_B_ABS:
1360
			anc_found = false;
1361 1362
#define ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE:	\
				code = BPF_S_ANC_##CODE;	\
1363
				anc_found = true;		\
1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375
				break
			switch (ftest->k) {
			ANCILLARY(PROTOCOL);
			ANCILLARY(PKTTYPE);
			ANCILLARY(IFINDEX);
			ANCILLARY(NLATTR);
			ANCILLARY(NLATTR_NEST);
			ANCILLARY(MARK);
			ANCILLARY(QUEUE);
			ANCILLARY(HATYPE);
			ANCILLARY(RXHASH);
			ANCILLARY(CPU);
J
Jiri Pirko 已提交
1376
			ANCILLARY(ALU_XOR_X);
E
Eric Dumazet 已提交
1377 1378
			ANCILLARY(VLAN_TAG);
			ANCILLARY(VLAN_TAG_PRESENT);
1379
			ANCILLARY(PAY_OFFSET);
C
Chema Gonzalez 已提交
1380
			ANCILLARY(RANDOM);
1381
			}
1382 1383 1384 1385

			/* ancillary operation unknown or unsupported */
			if (anc_found == false && ftest->k >= SKF_AD_OFF)
				return -EINVAL;
1386
		}
1387
		ftest->code = code;
1388
	}
1389

1390 1391 1392 1393
	/* last instruction must be a RET code */
	switch (filter[flen - 1].code) {
	case BPF_S_RET_K:
	case BPF_S_RET_A:
1394
		return check_load_and_stores(filter, flen);
1395 1396
	}
	return -EINVAL;
L
Linus Torvalds 已提交
1397
}
1398
EXPORT_SYMBOL(sk_chk_filter);
L
Linus Torvalds 已提交
1399

1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430
static int sk_store_orig_filter(struct sk_filter *fp,
				const struct sock_fprog *fprog)
{
	unsigned int fsize = sk_filter_proglen(fprog);
	struct sock_fprog_kern *fkprog;

	fp->orig_prog = kmalloc(sizeof(*fkprog), GFP_KERNEL);
	if (!fp->orig_prog)
		return -ENOMEM;

	fkprog = fp->orig_prog;
	fkprog->len = fprog->len;
	fkprog->filter = kmemdup(fp->insns, fsize, GFP_KERNEL);
	if (!fkprog->filter) {
		kfree(fp->orig_prog);
		return -ENOMEM;
	}

	return 0;
}

static void sk_release_orig_filter(struct sk_filter *fp)
{
	struct sock_fprog_kern *fprog = fp->orig_prog;

	if (fprog) {
		kfree(fprog->filter);
		kfree(fprog);
	}
}

1431
/**
E
Eric Dumazet 已提交
1432
 * 	sk_filter_release_rcu - Release a socket filter by rcu_head
1433 1434
 *	@rcu: rcu_head that contains the sk_filter to free
 */
1435
static void sk_filter_release_rcu(struct rcu_head *rcu)
1436 1437 1438
{
	struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);

1439
	sk_release_orig_filter(fp);
1440
	bpf_jit_free(fp);
1441
}
1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465

/**
 *	sk_filter_release - release a socket filter
 *	@fp: filter to remove
 *
 *	Remove a filter from a socket and release its resources.
 */
static void sk_filter_release(struct sk_filter *fp)
{
	if (atomic_dec_and_test(&fp->refcnt))
		call_rcu(&fp->rcu, sk_filter_release_rcu);
}

void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
{
	atomic_sub(sk_filter_size(fp->len), &sk->sk_omem_alloc);
	sk_filter_release(fp);
}

void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
{
	atomic_inc(&fp->refcnt);
	atomic_add(sk_filter_size(fp->len), &sk->sk_omem_alloc);
}
1466

1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568
static struct sk_filter *__sk_migrate_realloc(struct sk_filter *fp,
					      struct sock *sk,
					      unsigned int len)
{
	struct sk_filter *fp_new;

	if (sk == NULL)
		return krealloc(fp, len, GFP_KERNEL);

	fp_new = sock_kmalloc(sk, len, GFP_KERNEL);
	if (fp_new) {
		memcpy(fp_new, fp, sizeof(struct sk_filter));
		/* As we're kepping orig_prog in fp_new along,
		 * we need to make sure we're not evicting it
		 * from the old fp.
		 */
		fp->orig_prog = NULL;
		sk_filter_uncharge(sk, fp);
	}

	return fp_new;
}

static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp,
					     struct sock *sk)
{
	struct sock_filter *old_prog;
	struct sk_filter *old_fp;
	int i, err, new_len, old_len = fp->len;

	/* We are free to overwrite insns et al right here as it
	 * won't be used at this point in time anymore internally
	 * after the migration to the internal BPF instruction
	 * representation.
	 */
	BUILD_BUG_ON(sizeof(struct sock_filter) !=
		     sizeof(struct sock_filter_int));

	/* For now, we need to unfiddle BPF_S_* identifiers in place.
	 * This can sooner or later on be subject to removal, e.g. when
	 * JITs have been converted.
	 */
	for (i = 0; i < fp->len; i++)
		sk_decode_filter(&fp->insns[i], &fp->insns[i]);

	/* Conversion cannot happen on overlapping memory areas,
	 * so we need to keep the user BPF around until the 2nd
	 * pass. At this time, the user BPF is stored in fp->insns.
	 */
	old_prog = kmemdup(fp->insns, old_len * sizeof(struct sock_filter),
			   GFP_KERNEL);
	if (!old_prog) {
		err = -ENOMEM;
		goto out_err;
	}

	/* 1st pass: calculate the new program length. */
	err = sk_convert_filter(old_prog, old_len, NULL, &new_len);
	if (err)
		goto out_err_free;

	/* Expand fp for appending the new filter representation. */
	old_fp = fp;
	fp = __sk_migrate_realloc(old_fp, sk, sk_filter_size(new_len));
	if (!fp) {
		/* The old_fp is still around in case we couldn't
		 * allocate new memory, so uncharge on that one.
		 */
		fp = old_fp;
		err = -ENOMEM;
		goto out_err_free;
	}

	fp->bpf_func = sk_run_filter_int_skb;
	fp->len = new_len;

	/* 2nd pass: remap sock_filter insns into sock_filter_int insns. */
	err = sk_convert_filter(old_prog, old_len, fp->insnsi, &new_len);
	if (err)
		/* 2nd sk_convert_filter() can fail only if it fails
		 * to allocate memory, remapping must succeed. Note,
		 * that at this time old_fp has already been released
		 * by __sk_migrate_realloc().
		 */
		goto out_err_free;

	kfree(old_prog);
	return fp;

out_err_free:
	kfree(old_prog);
out_err:
	/* Rollback filter setup. */
	if (sk != NULL)
		sk_filter_uncharge(sk, fp);
	else
		kfree(fp);
	return ERR_PTR(err);
}

static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp,
					     struct sock *sk)
1569 1570 1571
{
	int err;

1572
	fp->bpf_func = NULL;
1573
	fp->jited = 0;
1574 1575 1576

	err = sk_chk_filter(fp->insns, fp->len);
	if (err)
1577
		return ERR_PTR(err);
1578

1579 1580 1581
	/* Probe if we can JIT compile the filter and if so, do
	 * the compilation of the filter.
	 */
1582
	bpf_jit_compile(fp);
1583 1584 1585 1586 1587 1588 1589 1590

	/* JIT compiler couldn't process this filter, so do the
	 * internal BPF translation for the optimized interpreter.
	 */
	if (!fp->jited)
		fp = __sk_migrate_filter(fp, sk);

	return fp;
1591 1592 1593 1594 1595
}

/**
 *	sk_unattached_filter_create - create an unattached filter
 *	@fprog: the filter program
R
Randy Dunlap 已提交
1596
 *	@pfp: the unattached filter that is created
1597
 *
R
Randy Dunlap 已提交
1598
 * Create a filter independent of any socket. We first run some
1599 1600 1601 1602 1603 1604 1605
 * sanity checks on it to make sure it does not explode on us later.
 * If an error occurs or there is insufficient memory for the filter
 * a negative errno code is returned. On success the return is zero.
 */
int sk_unattached_filter_create(struct sk_filter **pfp,
				struct sock_fprog *fprog)
{
1606
	unsigned int fsize = sk_filter_proglen(fprog);
1607 1608 1609 1610 1611 1612
	struct sk_filter *fp;

	/* Make sure new filter is there and in the right amounts. */
	if (fprog->filter == NULL)
		return -EINVAL;

1613
	fp = kmalloc(sk_filter_size(fprog->len), GFP_KERNEL);
1614 1615
	if (!fp)
		return -ENOMEM;
1616

1617 1618 1619 1620
	memcpy(fp->insns, fprog->filter, fsize);

	atomic_set(&fp->refcnt, 1);
	fp->len = fprog->len;
1621 1622 1623 1624 1625
	/* Since unattached filters are not copied back to user
	 * space through sk_get_filter(), we do not need to hold
	 * a copy here, and can spare us the work.
	 */
	fp->orig_prog = NULL;
1626

1627 1628 1629 1630 1631 1632
	/* __sk_prepare_filter() already takes care of uncharging
	 * memory in case something goes wrong.
	 */
	fp = __sk_prepare_filter(fp, NULL);
	if (IS_ERR(fp))
		return PTR_ERR(fp);
1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644

	*pfp = fp;
	return 0;
}
EXPORT_SYMBOL_GPL(sk_unattached_filter_create);

void sk_unattached_filter_destroy(struct sk_filter *fp)
{
	sk_filter_release(fp);
}
EXPORT_SYMBOL_GPL(sk_unattached_filter_destroy);

L
Linus Torvalds 已提交
1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656
/**
 *	sk_attach_filter - attach a socket filter
 *	@fprog: the filter program
 *	@sk: the socket to use
 *
 * Attach the user's filter code. We first run some sanity checks on
 * it to make sure it does not explode on us later. If an error
 * occurs or there is insufficient memory for the filter a negative
 * errno code is returned. On success the return is zero.
 */
int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
{
1657
	struct sk_filter *fp, *old_fp;
1658
	unsigned int fsize = sk_filter_proglen(fprog);
1659
	unsigned int sk_fsize = sk_filter_size(fprog->len);
L
Linus Torvalds 已提交
1660 1661
	int err;

1662 1663 1664
	if (sock_flag(sk, SOCK_FILTER_LOCKED))
		return -EPERM;

L
Linus Torvalds 已提交
1665
	/* Make sure new filter is there and in the right amounts. */
1666 1667
	if (fprog->filter == NULL)
		return -EINVAL;
L
Linus Torvalds 已提交
1668

1669
	fp = sock_kmalloc(sk, sk_fsize, GFP_KERNEL);
L
Linus Torvalds 已提交
1670 1671
	if (!fp)
		return -ENOMEM;
1672

L
Linus Torvalds 已提交
1673
	if (copy_from_user(fp->insns, fprog->filter, fsize)) {
1674
		sock_kfree_s(sk, fp, sk_fsize);
L
Linus Torvalds 已提交
1675 1676 1677 1678 1679 1680
		return -EFAULT;
	}

	atomic_set(&fp->refcnt, 1);
	fp->len = fprog->len;

1681 1682 1683 1684 1685 1686
	err = sk_store_orig_filter(fp, fprog);
	if (err) {
		sk_filter_uncharge(sk, fp);
		return -ENOMEM;
	}

1687 1688 1689 1690 1691 1692
	/* __sk_prepare_filter() already takes care of uncharging
	 * memory in case something goes wrong.
	 */
	fp = __sk_prepare_filter(fp, sk);
	if (IS_ERR(fp))
		return PTR_ERR(fp);
L
Linus Torvalds 已提交
1693

1694 1695
	old_fp = rcu_dereference_protected(sk->sk_filter,
					   sock_owned_by_user(sk));
1696 1697
	rcu_assign_pointer(sk->sk_filter, fp);

1698
	if (old_fp)
E
Eric Dumazet 已提交
1699
		sk_filter_uncharge(sk, old_fp);
1700

1701
	return 0;
L
Linus Torvalds 已提交
1702
}
1703
EXPORT_SYMBOL_GPL(sk_attach_filter);
L
Linus Torvalds 已提交
1704

1705 1706 1707 1708 1709
int sk_detach_filter(struct sock *sk)
{
	int ret = -ENOENT;
	struct sk_filter *filter;

1710 1711 1712
	if (sock_flag(sk, SOCK_FILTER_LOCKED))
		return -EPERM;

1713 1714
	filter = rcu_dereference_protected(sk->sk_filter,
					   sock_owned_by_user(sk));
1715
	if (filter) {
1716
		RCU_INIT_POINTER(sk->sk_filter, NULL);
E
Eric Dumazet 已提交
1717
		sk_filter_uncharge(sk, filter);
1718 1719
		ret = 0;
	}
1720

1721 1722
	return ret;
}
1723
EXPORT_SYMBOL_GPL(sk_detach_filter);
1724

1725
void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to)
1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763
{
	static const u16 decodes[] = {
		[BPF_S_ALU_ADD_K]	= BPF_ALU|BPF_ADD|BPF_K,
		[BPF_S_ALU_ADD_X]	= BPF_ALU|BPF_ADD|BPF_X,
		[BPF_S_ALU_SUB_K]	= BPF_ALU|BPF_SUB|BPF_K,
		[BPF_S_ALU_SUB_X]	= BPF_ALU|BPF_SUB|BPF_X,
		[BPF_S_ALU_MUL_K]	= BPF_ALU|BPF_MUL|BPF_K,
		[BPF_S_ALU_MUL_X]	= BPF_ALU|BPF_MUL|BPF_X,
		[BPF_S_ALU_DIV_X]	= BPF_ALU|BPF_DIV|BPF_X,
		[BPF_S_ALU_MOD_K]	= BPF_ALU|BPF_MOD|BPF_K,
		[BPF_S_ALU_MOD_X]	= BPF_ALU|BPF_MOD|BPF_X,
		[BPF_S_ALU_AND_K]	= BPF_ALU|BPF_AND|BPF_K,
		[BPF_S_ALU_AND_X]	= BPF_ALU|BPF_AND|BPF_X,
		[BPF_S_ALU_OR_K]	= BPF_ALU|BPF_OR|BPF_K,
		[BPF_S_ALU_OR_X]	= BPF_ALU|BPF_OR|BPF_X,
		[BPF_S_ALU_XOR_K]	= BPF_ALU|BPF_XOR|BPF_K,
		[BPF_S_ALU_XOR_X]	= BPF_ALU|BPF_XOR|BPF_X,
		[BPF_S_ALU_LSH_K]	= BPF_ALU|BPF_LSH|BPF_K,
		[BPF_S_ALU_LSH_X]	= BPF_ALU|BPF_LSH|BPF_X,
		[BPF_S_ALU_RSH_K]	= BPF_ALU|BPF_RSH|BPF_K,
		[BPF_S_ALU_RSH_X]	= BPF_ALU|BPF_RSH|BPF_X,
		[BPF_S_ALU_NEG]		= BPF_ALU|BPF_NEG,
		[BPF_S_LD_W_ABS]	= BPF_LD|BPF_W|BPF_ABS,
		[BPF_S_LD_H_ABS]	= BPF_LD|BPF_H|BPF_ABS,
		[BPF_S_LD_B_ABS]	= BPF_LD|BPF_B|BPF_ABS,
		[BPF_S_ANC_PROTOCOL]	= BPF_LD|BPF_B|BPF_ABS,
		[BPF_S_ANC_PKTTYPE]	= BPF_LD|BPF_B|BPF_ABS,
		[BPF_S_ANC_IFINDEX]	= BPF_LD|BPF_B|BPF_ABS,
		[BPF_S_ANC_NLATTR]	= BPF_LD|BPF_B|BPF_ABS,
		[BPF_S_ANC_NLATTR_NEST]	= BPF_LD|BPF_B|BPF_ABS,
		[BPF_S_ANC_MARK]	= BPF_LD|BPF_B|BPF_ABS,
		[BPF_S_ANC_QUEUE]	= BPF_LD|BPF_B|BPF_ABS,
		[BPF_S_ANC_HATYPE]	= BPF_LD|BPF_B|BPF_ABS,
		[BPF_S_ANC_RXHASH]	= BPF_LD|BPF_B|BPF_ABS,
		[BPF_S_ANC_CPU]		= BPF_LD|BPF_B|BPF_ABS,
		[BPF_S_ANC_ALU_XOR_X]	= BPF_LD|BPF_B|BPF_ABS,
		[BPF_S_ANC_VLAN_TAG]	= BPF_LD|BPF_B|BPF_ABS,
		[BPF_S_ANC_VLAN_TAG_PRESENT] = BPF_LD|BPF_B|BPF_ABS,
1764
		[BPF_S_ANC_PAY_OFFSET]	= BPF_LD|BPF_B|BPF_ABS,
C
Chema Gonzalez 已提交
1765
		[BPF_S_ANC_RANDOM]	= BPF_LD|BPF_B|BPF_ABS,
1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799
		[BPF_S_LD_W_LEN]	= BPF_LD|BPF_W|BPF_LEN,
		[BPF_S_LD_W_IND]	= BPF_LD|BPF_W|BPF_IND,
		[BPF_S_LD_H_IND]	= BPF_LD|BPF_H|BPF_IND,
		[BPF_S_LD_B_IND]	= BPF_LD|BPF_B|BPF_IND,
		[BPF_S_LD_IMM]		= BPF_LD|BPF_IMM,
		[BPF_S_LDX_W_LEN]	= BPF_LDX|BPF_W|BPF_LEN,
		[BPF_S_LDX_B_MSH]	= BPF_LDX|BPF_B|BPF_MSH,
		[BPF_S_LDX_IMM]		= BPF_LDX|BPF_IMM,
		[BPF_S_MISC_TAX]	= BPF_MISC|BPF_TAX,
		[BPF_S_MISC_TXA]	= BPF_MISC|BPF_TXA,
		[BPF_S_RET_K]		= BPF_RET|BPF_K,
		[BPF_S_RET_A]		= BPF_RET|BPF_A,
		[BPF_S_ALU_DIV_K]	= BPF_ALU|BPF_DIV|BPF_K,
		[BPF_S_LD_MEM]		= BPF_LD|BPF_MEM,
		[BPF_S_LDX_MEM]		= BPF_LDX|BPF_MEM,
		[BPF_S_ST]		= BPF_ST,
		[BPF_S_STX]		= BPF_STX,
		[BPF_S_JMP_JA]		= BPF_JMP|BPF_JA,
		[BPF_S_JMP_JEQ_K]	= BPF_JMP|BPF_JEQ|BPF_K,
		[BPF_S_JMP_JEQ_X]	= BPF_JMP|BPF_JEQ|BPF_X,
		[BPF_S_JMP_JGE_K]	= BPF_JMP|BPF_JGE|BPF_K,
		[BPF_S_JMP_JGE_X]	= BPF_JMP|BPF_JGE|BPF_X,
		[BPF_S_JMP_JGT_K]	= BPF_JMP|BPF_JGT|BPF_K,
		[BPF_S_JMP_JGT_X]	= BPF_JMP|BPF_JGT|BPF_X,
		[BPF_S_JMP_JSET_K]	= BPF_JMP|BPF_JSET|BPF_K,
		[BPF_S_JMP_JSET_X]	= BPF_JMP|BPF_JSET|BPF_X,
	};
	u16 code;

	code = filt->code;

	to->code = decodes[code];
	to->jt = filt->jt;
	to->jf = filt->jf;
E
Eric Dumazet 已提交
1800
	to->k = filt->k;
1801 1802
}

1803 1804
int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
		  unsigned int len)
1805
{
1806
	struct sock_fprog_kern *fprog;
1807
	struct sk_filter *filter;
1808
	int ret = 0;
1809 1810 1811

	lock_sock(sk);
	filter = rcu_dereference_protected(sk->sk_filter,
1812
					   sock_owned_by_user(sk));
1813 1814
	if (!filter)
		goto out;
1815 1816 1817 1818 1819 1820 1821

	/* We're copying the filter that has been originally attached,
	 * so no conversion/decode needed anymore.
	 */
	fprog = filter->orig_prog;

	ret = fprog->len;
1822
	if (!len)
1823
		/* User space only enquires number of filter blocks. */
1824
		goto out;
1825

1826
	ret = -EINVAL;
1827
	if (len < fprog->len)
1828 1829 1830
		goto out;

	ret = -EFAULT;
1831 1832
	if (copy_to_user(ubuf, fprog->filter, sk_filter_proglen(fprog)))
		goto out;
1833

1834 1835 1836 1837
	/* Instead of bytes, the API requests to return the number
	 * of filter blocks.
	 */
	ret = fprog->len;
1838 1839 1840 1841
out:
	release_sock(sk);
	return ret;
}