filter.c 33.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
/*
 * Linux Socket Filter - Kernel level socket filtering
 *
4 5
 * Based on the design of the Berkeley Packet Filter. The new
 * internal format has been designed by PLUMgrid:
L
Linus Torvalds 已提交
6
 *
7 8 9 10 11 12 13
 *	Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
 *
 * Authors:
 *
 *	Jay Schulist <jschlst@samba.org>
 *	Alexei Starovoitov <ast@plumgrid.com>
 *	Daniel Borkmann <dborkman@redhat.com>
L
Linus Torvalds 已提交
14 15 16 17 18 19 20
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 *
 * Andi Kleen - Fix a few bad bugs and races.
21
 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
L
Linus Torvalds 已提交
22 23 24 25 26 27 28 29 30 31 32
 */

#include <linux/module.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/fcntl.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/if_packet.h>
33
#include <linux/gfp.h>
L
Linus Torvalds 已提交
34 35
#include <net/ip.h>
#include <net/protocol.h>
36
#include <net/netlink.h>
L
Linus Torvalds 已提交
37 38 39 40 41
#include <linux/skbuff.h>
#include <net/sock.h>
#include <linux/errno.h>
#include <linux/timer.h>
#include <asm/uaccess.h>
42
#include <asm/unaligned.h>
L
Linus Torvalds 已提交
43
#include <linux/filter.h>
44
#include <linux/ratelimit.h>
45
#include <linux/seccomp.h>
E
Eric Dumazet 已提交
46
#include <linux/if_vlan.h>
47
#include <linux/bpf.h>
L
Linus Torvalds 已提交
48

S
Stephen Hemminger 已提交
49 50 51 52 53 54
/**
 *	sk_filter - run a packet through a socket filter
 *	@sk: sock associated with &sk_buff
 *	@skb: buffer to filter
 *
 * Run the filter code and then cut skb->data to correct size returned by
L
Li RongQing 已提交
55
 * SK_RUN_FILTER. If pkt_len is 0 we toss packet. If skb->len is smaller
S
Stephen Hemminger 已提交
56
 * than pkt_len we keep whole skb->data. This is the socket level
L
Li RongQing 已提交
57
 * wrapper to SK_RUN_FILTER. It returns 0 if the packet should
S
Stephen Hemminger 已提交
58 59 60 61 62 63 64 65
 * be accepted or -EPERM if the packet should be tossed.
 *
 */
int sk_filter(struct sock *sk, struct sk_buff *skb)
{
	int err;
	struct sk_filter *filter;

66 67 68 69 70 71 72 73
	/*
	 * If the skb was allocated from pfmemalloc reserves, only
	 * allow SOCK_MEMALLOC sockets to use it as this socket is
	 * helping free memory
	 */
	if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
		return -ENOMEM;

S
Stephen Hemminger 已提交
74 75 76 77
	err = security_sock_rcv_skb(sk, skb);
	if (err)
		return err;

78 79
	rcu_read_lock();
	filter = rcu_dereference(sk->sk_filter);
S
Stephen Hemminger 已提交
80
	if (filter) {
81
		unsigned int pkt_len = SK_RUN_FILTER(filter, skb);
82

S
Stephen Hemminger 已提交
83 84
		err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
	}
85
	rcu_read_unlock();
S
Stephen Hemminger 已提交
86 87 88 89 90

	return err;
}
EXPORT_SYMBOL(sk_filter);

91
static u64 __skb_get_pay_offset(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
92
{
93
	return skb_get_poff((struct sk_buff *)(unsigned long) ctx);
94 95
}

96
static u64 __skb_get_nlattr(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
97
{
98
	struct sk_buff *skb = (struct sk_buff *)(unsigned long) ctx;
99 100 101 102 103
	struct nlattr *nla;

	if (skb_is_nonlinear(skb))
		return 0;

104 105 106
	if (skb->len < sizeof(struct nlattr))
		return 0;

107
	if (a > skb->len - sizeof(struct nlattr))
108 109
		return 0;

110
	nla = nla_find((struct nlattr *) &skb->data[a], skb->len - a, x);
111 112 113 114 115 116
	if (nla)
		return (void *) nla - (void *) skb->data;

	return 0;
}

117
static u64 __skb_get_nlattr_nest(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
118
{
119
	struct sk_buff *skb = (struct sk_buff *)(unsigned long) ctx;
120 121 122 123 124
	struct nlattr *nla;

	if (skb_is_nonlinear(skb))
		return 0;

125 126 127
	if (skb->len < sizeof(struct nlattr))
		return 0;

128
	if (a > skb->len - sizeof(struct nlattr))
129 130
		return 0;

131 132
	nla = (struct nlattr *) &skb->data[a];
	if (nla->nla_len > skb->len - a)
133 134
		return 0;

135
	nla = nla_find_nested(nla, x);
136 137 138 139 140 141
	if (nla)
		return (void *) nla - (void *) skb->data;

	return 0;
}

142
static u64 __get_raw_cpu_id(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
143 144 145 146
{
	return raw_smp_processor_id();
}

C
Chema Gonzalez 已提交
147
/* note that this only generates 32-bit random numbers */
148
static u64 __get_random_u32(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
C
Chema Gonzalez 已提交
149
{
150
	return prandom_u32();
C
Chema Gonzalez 已提交
151 152
}

153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179
static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg,
			      struct bpf_insn *insn_buf)
{
	struct bpf_insn *insn = insn_buf;

	switch (skb_field) {
	case SKF_AD_MARK:
		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);

		*insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
				      offsetof(struct sk_buff, mark));
		break;

	case SKF_AD_PKTTYPE:
		*insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_TYPE_OFFSET());
		*insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, PKT_TYPE_MAX);
#ifdef __BIG_ENDIAN_BITFIELD
		*insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 5);
#endif
		break;

	case SKF_AD_QUEUE:
		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);

		*insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
				      offsetof(struct sk_buff, queue_mapping));
		break;
180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198

	case SKF_AD_VLAN_TAG:
	case SKF_AD_VLAN_TAG_PRESENT:
		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
		BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);

		/* dst_reg = *(u16 *) (src_reg + offsetof(vlan_tci)) */
		*insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
				      offsetof(struct sk_buff, vlan_tci));
		if (skb_field == SKF_AD_VLAN_TAG) {
			*insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg,
						~VLAN_TAG_PRESENT);
		} else {
			/* dst_reg >>= 12 */
			*insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 12);
			/* dst_reg &= 1 */
			*insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, 1);
		}
		break;
199 200 201 202 203
	}

	return insn - insn_buf;
}

204
static bool convert_bpf_extensions(struct sock_filter *fp,
205
				   struct bpf_insn **insnp)
206
{
207
	struct bpf_insn *insn = *insnp;
208
	u32 cnt;
209 210 211

	switch (fp->k) {
	case SKF_AD_OFF + SKF_AD_PROTOCOL:
212 213 214 215 216 217 218
		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);

		/* A = *(u16 *) (CTX + offsetof(protocol)) */
		*insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
				      offsetof(struct sk_buff, protocol));
		/* A = ntohs(A) [emitting a nop or swap16] */
		*insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
219 220 221
		break;

	case SKF_AD_OFF + SKF_AD_PKTTYPE:
222 223
		cnt = convert_skb_access(SKF_AD_PKTTYPE, BPF_REG_A, BPF_REG_CTX, insn);
		insn += cnt - 1;
224 225 226 227 228 229
		break;

	case SKF_AD_OFF + SKF_AD_IFINDEX:
	case SKF_AD_OFF + SKF_AD_HATYPE:
		BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
		BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
230 231 232 233 234 235 236 237 238 239 240 241 242 243
		BUILD_BUG_ON(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)) < 0);

		*insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)),
				      BPF_REG_TMP, BPF_REG_CTX,
				      offsetof(struct sk_buff, dev));
		/* if (tmp != 0) goto pc + 1 */
		*insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1);
		*insn++ = BPF_EXIT_INSN();
		if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX)
			*insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_TMP,
					    offsetof(struct net_device, ifindex));
		else
			*insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_TMP,
					    offsetof(struct net_device, type));
244 245 246
		break;

	case SKF_AD_OFF + SKF_AD_MARK:
247 248
		cnt = convert_skb_access(SKF_AD_MARK, BPF_REG_A, BPF_REG_CTX, insn);
		insn += cnt - 1;
249 250 251 252 253
		break;

	case SKF_AD_OFF + SKF_AD_RXHASH:
		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);

254 255
		*insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX,
				    offsetof(struct sk_buff, hash));
256 257 258
		break;

	case SKF_AD_OFF + SKF_AD_QUEUE:
259 260
		cnt = convert_skb_access(SKF_AD_QUEUE, BPF_REG_A, BPF_REG_CTX, insn);
		insn += cnt - 1;
261 262 263
		break;

	case SKF_AD_OFF + SKF_AD_VLAN_TAG:
264 265 266 267
		cnt = convert_skb_access(SKF_AD_VLAN_TAG,
					 BPF_REG_A, BPF_REG_CTX, insn);
		insn += cnt - 1;
		break;
268

269 270 271 272
	case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT:
		cnt = convert_skb_access(SKF_AD_VLAN_TAG_PRESENT,
					 BPF_REG_A, BPF_REG_CTX, insn);
		insn += cnt - 1;
273 274 275 276 277 278
		break;

	case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
	case SKF_AD_OFF + SKF_AD_NLATTR:
	case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
	case SKF_AD_OFF + SKF_AD_CPU:
C
Chema Gonzalez 已提交
279
	case SKF_AD_OFF + SKF_AD_RANDOM:
280
		/* arg1 = CTX */
281
		*insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX);
282
		/* arg2 = A */
283
		*insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_A);
284
		/* arg3 = X */
285
		*insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_X);
286
		/* Emit call(arg1=CTX, arg2=A, arg3=X) */
287 288
		switch (fp->k) {
		case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
289
			*insn = BPF_EMIT_CALL(__skb_get_pay_offset);
290 291
			break;
		case SKF_AD_OFF + SKF_AD_NLATTR:
292
			*insn = BPF_EMIT_CALL(__skb_get_nlattr);
293 294
			break;
		case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
295
			*insn = BPF_EMIT_CALL(__skb_get_nlattr_nest);
296 297
			break;
		case SKF_AD_OFF + SKF_AD_CPU:
298
			*insn = BPF_EMIT_CALL(__get_raw_cpu_id);
299
			break;
C
Chema Gonzalez 已提交
300
		case SKF_AD_OFF + SKF_AD_RANDOM:
301
			*insn = BPF_EMIT_CALL(__get_random_u32);
C
Chema Gonzalez 已提交
302
			break;
303 304 305 306
		}
		break;

	case SKF_AD_OFF + SKF_AD_ALU_XOR_X:
307 308
		/* A ^= X */
		*insn = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_X);
309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324
		break;

	default:
		/* This is just a dummy call to avoid letting the compiler
		 * evict __bpf_call_base() as an optimization. Placed here
		 * where no-one bothers.
		 */
		BUG_ON(__bpf_call_base(0, 0, 0, 0, 0) != 0);
		return false;
	}

	*insnp = insn;
	return true;
}

/**
325
 *	bpf_convert_filter - convert filter program
326 327 328 329 330 331 332 333 334
 *	@prog: the user passed filter program
 *	@len: the length of the user passed filter program
 *	@new_prog: buffer where converted program will be stored
 *	@new_len: pointer to store length of converted program
 *
 * Remap 'sock_filter' style BPF instruction set to 'sock_filter_ext' style.
 * Conversion workflow:
 *
 * 1) First pass for calculating the new program length:
335
 *   bpf_convert_filter(old_prog, old_len, NULL, &new_len)
336 337 338
 *
 * 2) 2nd pass to remap in two passes: 1st pass finds new
 *    jump offsets, 2nd pass remapping:
339
 *   new_prog = kmalloc(sizeof(struct bpf_insn) * new_len);
340
 *   bpf_convert_filter(old_prog, old_len, new_prog, &new_len);
341 342 343 344 345 346 347
 *
 * User BPF's register A is mapped to our BPF register 6, user BPF
 * register X is mapped to BPF register 7; frame pointer is always
 * register 10; Context 'void *ctx' is stored in register 1, that is,
 * for socket filters: ctx == 'struct sk_buff *', for seccomp:
 * ctx == 'struct seccomp_data *'.
 */
348 349
int bpf_convert_filter(struct sock_filter *prog, int len,
		       struct bpf_insn *new_prog, int *new_len)
350 351
{
	int new_flen = 0, pass = 0, target, i;
352
	struct bpf_insn *new_insn;
353 354 355 356 357
	struct sock_filter *fp;
	int *addrs = NULL;
	u8 bpf_src;

	BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK);
358
	BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
359

360
	if (len <= 0 || len > BPF_MAXINSNS)
361 362 363
		return -EINVAL;

	if (new_prog) {
364
		addrs = kcalloc(len, sizeof(*addrs), GFP_KERNEL);
365 366 367 368 369 370 371 372
		if (!addrs)
			return -ENOMEM;
	}

do_pass:
	new_insn = new_prog;
	fp = prog;

373 374
	if (new_insn)
		*new_insn = BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1);
375 376 377
	new_insn++;

	for (i = 0; i < len; fp++, i++) {
378 379
		struct bpf_insn tmp_insns[6] = { };
		struct bpf_insn *insn = tmp_insns;
380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421

		if (addrs)
			addrs[i] = new_insn - new_prog;

		switch (fp->code) {
		/* All arithmetic insns and skb loads map as-is. */
		case BPF_ALU | BPF_ADD | BPF_X:
		case BPF_ALU | BPF_ADD | BPF_K:
		case BPF_ALU | BPF_SUB | BPF_X:
		case BPF_ALU | BPF_SUB | BPF_K:
		case BPF_ALU | BPF_AND | BPF_X:
		case BPF_ALU | BPF_AND | BPF_K:
		case BPF_ALU | BPF_OR | BPF_X:
		case BPF_ALU | BPF_OR | BPF_K:
		case BPF_ALU | BPF_LSH | BPF_X:
		case BPF_ALU | BPF_LSH | BPF_K:
		case BPF_ALU | BPF_RSH | BPF_X:
		case BPF_ALU | BPF_RSH | BPF_K:
		case BPF_ALU | BPF_XOR | BPF_X:
		case BPF_ALU | BPF_XOR | BPF_K:
		case BPF_ALU | BPF_MUL | BPF_X:
		case BPF_ALU | BPF_MUL | BPF_K:
		case BPF_ALU | BPF_DIV | BPF_X:
		case BPF_ALU | BPF_DIV | BPF_K:
		case BPF_ALU | BPF_MOD | BPF_X:
		case BPF_ALU | BPF_MOD | BPF_K:
		case BPF_ALU | BPF_NEG:
		case BPF_LD | BPF_ABS | BPF_W:
		case BPF_LD | BPF_ABS | BPF_H:
		case BPF_LD | BPF_ABS | BPF_B:
		case BPF_LD | BPF_IND | BPF_W:
		case BPF_LD | BPF_IND | BPF_H:
		case BPF_LD | BPF_IND | BPF_B:
			/* Check for overloaded BPF extension and
			 * directly convert it if found, otherwise
			 * just move on with mapping.
			 */
			if (BPF_CLASS(fp->code) == BPF_LD &&
			    BPF_MODE(fp->code) == BPF_ABS &&
			    convert_bpf_extensions(fp, &insn))
				break;

422
			*insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k);
423 424
			break;

425 426 427 428 429 430 431
		/* Jump transformation cannot use BPF block macros
		 * everywhere as offset calculation and target updates
		 * require a bit more work than the rest, i.e. jump
		 * opcodes map as-is, but offsets need adjustment.
		 */

#define BPF_EMIT_JMP							\
432 433 434 435 436 437 438 439
	do {								\
		if (target >= len || target < 0)			\
			goto err;					\
		insn->off = addrs ? addrs[target] - addrs[i] - 1 : 0;	\
		/* Adjust pc relative offset for 2nd or 3rd insn. */	\
		insn->off -= insn - tmp_insns;				\
	} while (0)

440 441 442 443
		case BPF_JMP | BPF_JA:
			target = i + fp->k + 1;
			insn->code = fp->code;
			BPF_EMIT_JMP;
444 445 446 447 448 449 450 451 452 453 454 455 456 457 458
			break;

		case BPF_JMP | BPF_JEQ | BPF_K:
		case BPF_JMP | BPF_JEQ | BPF_X:
		case BPF_JMP | BPF_JSET | BPF_K:
		case BPF_JMP | BPF_JSET | BPF_X:
		case BPF_JMP | BPF_JGT | BPF_K:
		case BPF_JMP | BPF_JGT | BPF_X:
		case BPF_JMP | BPF_JGE | BPF_K:
		case BPF_JMP | BPF_JGE | BPF_X:
			if (BPF_SRC(fp->code) == BPF_K && (int) fp->k < 0) {
				/* BPF immediates are signed, zero extend
				 * immediate into tmp register and use it
				 * in compare insn.
				 */
459
				*insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k);
460

461 462
				insn->dst_reg = BPF_REG_A;
				insn->src_reg = BPF_REG_TMP;
463 464
				bpf_src = BPF_X;
			} else {
465 466
				insn->dst_reg = BPF_REG_A;
				insn->src_reg = BPF_REG_X;
467 468
				insn->imm = fp->k;
				bpf_src = BPF_SRC(fp->code);
L
Linus Torvalds 已提交
469
			}
470 471 472 473 474

			/* Common case where 'jump_false' is next insn. */
			if (fp->jf == 0) {
				insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
				target = i + fp->jt + 1;
475
				BPF_EMIT_JMP;
476
				break;
L
Linus Torvalds 已提交
477
			}
478 479 480 481 482

			/* Convert JEQ into JNE when 'jump_true' is next insn. */
			if (fp->jt == 0 && BPF_OP(fp->code) == BPF_JEQ) {
				insn->code = BPF_JMP | BPF_JNE | bpf_src;
				target = i + fp->jf + 1;
483
				BPF_EMIT_JMP;
484
				break;
485
			}
486 487 488 489

			/* Other jumps are mapped into two insns: Jxx and JA. */
			target = i + fp->jt + 1;
			insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
490
			BPF_EMIT_JMP;
491 492 493 494
			insn++;

			insn->code = BPF_JMP | BPF_JA;
			target = i + fp->jf + 1;
495
			BPF_EMIT_JMP;
496 497 498 499
			break;

		/* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */
		case BPF_LDX | BPF_MSH | BPF_B:
500
			/* tmp = A */
501
			*insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_A);
502
			/* A = BPF_R0 = *(u8 *) (skb->data + K) */
503
			*insn++ = BPF_LD_ABS(BPF_B, fp->k);
504
			/* A &= 0xf */
505
			*insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf);
506
			/* A <<= 2 */
507
			*insn++ = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2);
508
			/* X = A */
509
			*insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
510
			/* A = tmp */
511
			*insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP);
512 513 514 515 516
			break;

		/* RET_K, RET_A are remaped into 2 insns. */
		case BPF_RET | BPF_A:
		case BPF_RET | BPF_K:
517 518 519
			*insn++ = BPF_MOV32_RAW(BPF_RVAL(fp->code) == BPF_K ?
						BPF_K : BPF_X, BPF_REG_0,
						BPF_REG_A, fp->k);
520
			*insn = BPF_EXIT_INSN();
521 522 523 524 525
			break;

		/* Store to stack. */
		case BPF_ST:
		case BPF_STX:
526 527 528
			*insn = BPF_STX_MEM(BPF_W, BPF_REG_FP, BPF_CLASS(fp->code) ==
					    BPF_ST ? BPF_REG_A : BPF_REG_X,
					    -(BPF_MEMWORDS - fp->k) * 4);
529 530 531 532 533
			break;

		/* Load from stack. */
		case BPF_LD | BPF_MEM:
		case BPF_LDX | BPF_MEM:
534 535 536
			*insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD  ?
					    BPF_REG_A : BPF_REG_X, BPF_REG_FP,
					    -(BPF_MEMWORDS - fp->k) * 4);
537 538 539 540 541
			break;

		/* A = K or X = K */
		case BPF_LD | BPF_IMM:
		case BPF_LDX | BPF_IMM:
542 543
			*insn = BPF_MOV32_IMM(BPF_CLASS(fp->code) == BPF_LD ?
					      BPF_REG_A : BPF_REG_X, fp->k);
544 545 546 547
			break;

		/* X = A */
		case BPF_MISC | BPF_TAX:
548
			*insn = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
549 550 551 552
			break;

		/* A = X */
		case BPF_MISC | BPF_TXA:
553
			*insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_X);
554 555 556 557 558
			break;

		/* A = skb->len or X = skb->len */
		case BPF_LD | BPF_W | BPF_LEN:
		case BPF_LDX | BPF_W | BPF_LEN:
559 560 561
			*insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
					    BPF_REG_A : BPF_REG_X, BPF_REG_CTX,
					    offsetof(struct sk_buff, len));
562 563
			break;

564
		/* Access seccomp_data fields. */
565
		case BPF_LDX | BPF_ABS | BPF_W:
566 567
			/* A = *(u32 *) (ctx + K) */
			*insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k);
568 569
			break;

S
Stephen Hemminger 已提交
570
		/* Unknown instruction. */
L
Linus Torvalds 已提交
571
		default:
572
			goto err;
L
Linus Torvalds 已提交
573
		}
574 575 576 577 578 579

		insn++;
		if (new_prog)
			memcpy(new_insn, tmp_insns,
			       sizeof(*insn) * (insn - tmp_insns));
		new_insn += insn - tmp_insns;
L
Linus Torvalds 已提交
580 581
	}

582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597
	if (!new_prog) {
		/* Only calculating new length. */
		*new_len = new_insn - new_prog;
		return 0;
	}

	pass++;
	if (new_flen != new_insn - new_prog) {
		new_flen = new_insn - new_prog;
		if (pass > 2)
			goto err;
		goto do_pass;
	}

	kfree(addrs);
	BUG_ON(*new_len != new_flen);
L
Linus Torvalds 已提交
598
	return 0;
599 600 601
err:
	kfree(addrs);
	return -EINVAL;
L
Linus Torvalds 已提交
602 603
}

604 605
/* Security:
 *
606
 * As we dont want to clear mem[] array for each packet going through
L
Li RongQing 已提交
607
 * __bpf_prog_run(), we check that filter loaded by user never try to read
608
 * a cell if not previously written, and we check all branches to be sure
L
Lucas De Marchi 已提交
609
 * a malicious user doesn't try to abuse us.
610
 */
611
static int check_load_and_stores(const struct sock_filter *filter, int flen)
612
{
613
	u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */
614 615 616
	int pc, ret = 0;

	BUILD_BUG_ON(BPF_MEMWORDS > 16);
617

618
	masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
619 620
	if (!masks)
		return -ENOMEM;
621

622 623 624 625 626 627
	memset(masks, 0xff, flen * sizeof(*masks));

	for (pc = 0; pc < flen; pc++) {
		memvalid &= masks[pc];

		switch (filter[pc].code) {
628 629
		case BPF_ST:
		case BPF_STX:
630 631
			memvalid |= (1 << filter[pc].k);
			break;
632 633
		case BPF_LD | BPF_MEM:
		case BPF_LDX | BPF_MEM:
634 635 636 637 638
			if (!(memvalid & (1 << filter[pc].k))) {
				ret = -EINVAL;
				goto error;
			}
			break;
639 640
		case BPF_JMP | BPF_JA:
			/* A jump must set masks on target */
641 642 643
			masks[pc + 1 + filter[pc].k] &= memvalid;
			memvalid = ~0;
			break;
644 645 646 647 648 649 650 651 652
		case BPF_JMP | BPF_JEQ | BPF_K:
		case BPF_JMP | BPF_JEQ | BPF_X:
		case BPF_JMP | BPF_JGE | BPF_K:
		case BPF_JMP | BPF_JGE | BPF_X:
		case BPF_JMP | BPF_JGT | BPF_K:
		case BPF_JMP | BPF_JGT | BPF_X:
		case BPF_JMP | BPF_JSET | BPF_K:
		case BPF_JMP | BPF_JSET | BPF_X:
			/* A jump must set masks on targets */
653 654 655 656 657 658 659 660 661 662 663
			masks[pc + 1 + filter[pc].jt] &= memvalid;
			masks[pc + 1 + filter[pc].jf] &= memvalid;
			memvalid = ~0;
			break;
		}
	}
error:
	kfree(masks);
	return ret;
}

664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729
static bool chk_code_allowed(u16 code_to_probe)
{
	static const bool codes[] = {
		/* 32 bit ALU operations */
		[BPF_ALU | BPF_ADD | BPF_K] = true,
		[BPF_ALU | BPF_ADD | BPF_X] = true,
		[BPF_ALU | BPF_SUB | BPF_K] = true,
		[BPF_ALU | BPF_SUB | BPF_X] = true,
		[BPF_ALU | BPF_MUL | BPF_K] = true,
		[BPF_ALU | BPF_MUL | BPF_X] = true,
		[BPF_ALU | BPF_DIV | BPF_K] = true,
		[BPF_ALU | BPF_DIV | BPF_X] = true,
		[BPF_ALU | BPF_MOD | BPF_K] = true,
		[BPF_ALU | BPF_MOD | BPF_X] = true,
		[BPF_ALU | BPF_AND | BPF_K] = true,
		[BPF_ALU | BPF_AND | BPF_X] = true,
		[BPF_ALU | BPF_OR | BPF_K] = true,
		[BPF_ALU | BPF_OR | BPF_X] = true,
		[BPF_ALU | BPF_XOR | BPF_K] = true,
		[BPF_ALU | BPF_XOR | BPF_X] = true,
		[BPF_ALU | BPF_LSH | BPF_K] = true,
		[BPF_ALU | BPF_LSH | BPF_X] = true,
		[BPF_ALU | BPF_RSH | BPF_K] = true,
		[BPF_ALU | BPF_RSH | BPF_X] = true,
		[BPF_ALU | BPF_NEG] = true,
		/* Load instructions */
		[BPF_LD | BPF_W | BPF_ABS] = true,
		[BPF_LD | BPF_H | BPF_ABS] = true,
		[BPF_LD | BPF_B | BPF_ABS] = true,
		[BPF_LD | BPF_W | BPF_LEN] = true,
		[BPF_LD | BPF_W | BPF_IND] = true,
		[BPF_LD | BPF_H | BPF_IND] = true,
		[BPF_LD | BPF_B | BPF_IND] = true,
		[BPF_LD | BPF_IMM] = true,
		[BPF_LD | BPF_MEM] = true,
		[BPF_LDX | BPF_W | BPF_LEN] = true,
		[BPF_LDX | BPF_B | BPF_MSH] = true,
		[BPF_LDX | BPF_IMM] = true,
		[BPF_LDX | BPF_MEM] = true,
		/* Store instructions */
		[BPF_ST] = true,
		[BPF_STX] = true,
		/* Misc instructions */
		[BPF_MISC | BPF_TAX] = true,
		[BPF_MISC | BPF_TXA] = true,
		/* Return instructions */
		[BPF_RET | BPF_K] = true,
		[BPF_RET | BPF_A] = true,
		/* Jump instructions */
		[BPF_JMP | BPF_JA] = true,
		[BPF_JMP | BPF_JEQ | BPF_K] = true,
		[BPF_JMP | BPF_JEQ | BPF_X] = true,
		[BPF_JMP | BPF_JGE | BPF_K] = true,
		[BPF_JMP | BPF_JGE | BPF_X] = true,
		[BPF_JMP | BPF_JGT | BPF_K] = true,
		[BPF_JMP | BPF_JGT | BPF_X] = true,
		[BPF_JMP | BPF_JSET | BPF_K] = true,
		[BPF_JMP | BPF_JSET | BPF_X] = true,
	};

	if (code_to_probe >= ARRAY_SIZE(codes))
		return false;

	return codes[code_to_probe];
}

L
Linus Torvalds 已提交
730
/**
731
 *	bpf_check_classic - verify socket filter code
L
Linus Torvalds 已提交
732 733 734 735 736
 *	@filter: filter to verify
 *	@flen: length of filter
 *
 * Check the user's filter code. If we let some ugly
 * filter code slip through kaboom! The filter must contain
737 738
 * no references or jumps that are out of range, no illegal
 * instructions, and must end with a RET instruction.
L
Linus Torvalds 已提交
739
 *
740 741 742
 * All jumps are forward as they are not signed.
 *
 * Returns 0 if the rule set is legal or -EINVAL if not.
L
Linus Torvalds 已提交
743
 */
744
int bpf_check_classic(const struct sock_filter *filter, unsigned int flen)
L
Linus Torvalds 已提交
745
{
746
	bool anc_found;
747
	int pc;
L
Linus Torvalds 已提交
748

749
	if (flen == 0 || flen > BPF_MAXINSNS)
L
Linus Torvalds 已提交
750 751
		return -EINVAL;

752
	/* Check the filter code now */
L
Linus Torvalds 已提交
753
	for (pc = 0; pc < flen; pc++) {
754
		const struct sock_filter *ftest = &filter[pc];
755

756 757
		/* May we actually operate on this code? */
		if (!chk_code_allowed(ftest->code))
758
			return -EINVAL;
759

760
		/* Some instructions need special checks */
761 762 763 764
		switch (ftest->code) {
		case BPF_ALU | BPF_DIV | BPF_K:
		case BPF_ALU | BPF_MOD | BPF_K:
			/* Check for division by zero */
E
Eric Dumazet 已提交
765 766 767
			if (ftest->k == 0)
				return -EINVAL;
			break;
768 769 770 771 772
		case BPF_LD | BPF_MEM:
		case BPF_LDX | BPF_MEM:
		case BPF_ST:
		case BPF_STX:
			/* Check for invalid memory addresses */
773 774 775
			if (ftest->k >= BPF_MEMWORDS)
				return -EINVAL;
			break;
776 777
		case BPF_JMP | BPF_JA:
			/* Note, the large ftest->k might cause loops.
778 779 780
			 * Compare this with conditional jumps below,
			 * where offsets are limited. --ANK (981016)
			 */
781
			if (ftest->k >= (unsigned int)(flen - pc - 1))
782
				return -EINVAL;
783
			break;
784 785 786 787 788 789 790 791 792
		case BPF_JMP | BPF_JEQ | BPF_K:
		case BPF_JMP | BPF_JEQ | BPF_X:
		case BPF_JMP | BPF_JGE | BPF_K:
		case BPF_JMP | BPF_JGE | BPF_X:
		case BPF_JMP | BPF_JGT | BPF_K:
		case BPF_JMP | BPF_JGT | BPF_X:
		case BPF_JMP | BPF_JSET | BPF_K:
		case BPF_JMP | BPF_JSET | BPF_X:
			/* Both conditionals must be safe */
793
			if (pc + ftest->jt + 1 >= flen ||
794 795
			    pc + ftest->jf + 1 >= flen)
				return -EINVAL;
796
			break;
797 798 799
		case BPF_LD | BPF_W | BPF_ABS:
		case BPF_LD | BPF_H | BPF_ABS:
		case BPF_LD | BPF_B | BPF_ABS:
800
			anc_found = false;
801 802 803
			if (bpf_anc_helper(ftest) & BPF_ANC)
				anc_found = true;
			/* Ancillary operation unknown or unsupported */
804 805
			if (anc_found == false && ftest->k >= SKF_AD_OFF)
				return -EINVAL;
806 807
		}
	}
808

809
	/* Last instruction must be a RET code */
810
	switch (filter[flen - 1].code) {
811 812
	case BPF_RET | BPF_K:
	case BPF_RET | BPF_A:
813
		return check_load_and_stores(filter, flen);
814
	}
815

816
	return -EINVAL;
L
Linus Torvalds 已提交
817
}
818
EXPORT_SYMBOL(bpf_check_classic);
L
Linus Torvalds 已提交
819

820 821
static int bpf_prog_store_orig_filter(struct bpf_prog *fp,
				      const struct sock_fprog *fprog)
822
{
823
	unsigned int fsize = bpf_classic_proglen(fprog);
824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840
	struct sock_fprog_kern *fkprog;

	fp->orig_prog = kmalloc(sizeof(*fkprog), GFP_KERNEL);
	if (!fp->orig_prog)
		return -ENOMEM;

	fkprog = fp->orig_prog;
	fkprog->len = fprog->len;
	fkprog->filter = kmemdup(fp->insns, fsize, GFP_KERNEL);
	if (!fkprog->filter) {
		kfree(fp->orig_prog);
		return -ENOMEM;
	}

	return 0;
}

841
static void bpf_release_orig_filter(struct bpf_prog *fp)
842 843 844 845 846 847 848 849 850
{
	struct sock_fprog_kern *fprog = fp->orig_prog;

	if (fprog) {
		kfree(fprog->filter);
		kfree(fprog);
	}
}

851 852
static void __bpf_prog_release(struct bpf_prog *prog)
{
853
	if (prog->type == BPF_PROG_TYPE_SOCKET_FILTER) {
854 855 856 857 858
		bpf_prog_put(prog);
	} else {
		bpf_release_orig_filter(prog);
		bpf_prog_free(prog);
	}
859 860
}

861 862
static void __sk_filter_release(struct sk_filter *fp)
{
863 864
	__bpf_prog_release(fp->prog);
	kfree(fp);
865 866
}

867
/**
E
Eric Dumazet 已提交
868
 * 	sk_filter_release_rcu - Release a socket filter by rcu_head
869 870
 *	@rcu: rcu_head that contains the sk_filter to free
 */
871
static void sk_filter_release_rcu(struct rcu_head *rcu)
872 873 874
{
	struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);

875
	__sk_filter_release(fp);
876
}
877 878 879 880 881 882 883 884 885 886 887 888 889 890 891

/**
 *	sk_filter_release - release a socket filter
 *	@fp: filter to remove
 *
 *	Remove a filter from a socket and release its resources.
 */
static void sk_filter_release(struct sk_filter *fp)
{
	if (atomic_dec_and_test(&fp->refcnt))
		call_rcu(&fp->rcu, sk_filter_release_rcu);
}

void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
{
892
	u32 filter_size = bpf_prog_size(fp->prog->len);
893

894 895
	atomic_sub(filter_size, &sk->sk_omem_alloc);
	sk_filter_release(fp);
896
}
897

898 899 900 901
/* try to charge the socket memory if there is space available
 * return true on success
 */
bool sk_filter_charge(struct sock *sk, struct sk_filter *fp)
902
{
903
	u32 filter_size = bpf_prog_size(fp->prog->len);
904 905 906 907 908 909 910

	/* same check as in sock_kmalloc() */
	if (filter_size <= sysctl_optmem_max &&
	    atomic_read(&sk->sk_omem_alloc) + filter_size < sysctl_optmem_max) {
		atomic_inc(&fp->refcnt);
		atomic_add(filter_size, &sk->sk_omem_alloc);
		return true;
911
	}
912
	return false;
913 914
}

915
static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
916 917
{
	struct sock_filter *old_prog;
918
	struct bpf_prog *old_fp;
919
	int err, new_len, old_len = fp->len;
920 921 922 923 924 925 926

	/* We are free to overwrite insns et al right here as it
	 * won't be used at this point in time anymore internally
	 * after the migration to the internal BPF instruction
	 * representation.
	 */
	BUILD_BUG_ON(sizeof(struct sock_filter) !=
927
		     sizeof(struct bpf_insn));
928 929 930 931 932 933 934 935 936 937 938 939 940

	/* Conversion cannot happen on overlapping memory areas,
	 * so we need to keep the user BPF around until the 2nd
	 * pass. At this time, the user BPF is stored in fp->insns.
	 */
	old_prog = kmemdup(fp->insns, old_len * sizeof(struct sock_filter),
			   GFP_KERNEL);
	if (!old_prog) {
		err = -ENOMEM;
		goto out_err;
	}

	/* 1st pass: calculate the new program length. */
941
	err = bpf_convert_filter(old_prog, old_len, NULL, &new_len);
942 943 944 945 946
	if (err)
		goto out_err_free;

	/* Expand fp for appending the new filter representation. */
	old_fp = fp;
947
	fp = bpf_prog_realloc(old_fp, bpf_prog_size(new_len), 0);
948 949 950 951 952 953 954 955 956 957 958
	if (!fp) {
		/* The old_fp is still around in case we couldn't
		 * allocate new memory, so uncharge on that one.
		 */
		fp = old_fp;
		err = -ENOMEM;
		goto out_err_free;
	}

	fp->len = new_len;

959
	/* 2nd pass: remap sock_filter insns into bpf_insn insns. */
960
	err = bpf_convert_filter(old_prog, old_len, fp->insnsi, &new_len);
961
	if (err)
962
		/* 2nd bpf_convert_filter() can fail only if it fails
963 964
		 * to allocate memory, remapping must succeed. Note,
		 * that at this time old_fp has already been released
965
		 * by krealloc().
966 967 968
		 */
		goto out_err_free;

969
	bpf_prog_select_runtime(fp);
970

971 972 973 974 975 976
	kfree(old_prog);
	return fp;

out_err_free:
	kfree(old_prog);
out_err:
977
	__bpf_prog_release(fp);
978 979 980
	return ERR_PTR(err);
}

981
static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp)
982 983 984
{
	int err;

985
	fp->bpf_func = NULL;
986
	fp->jited = false;
987

988
	err = bpf_check_classic(fp->insns, fp->len);
989
	if (err) {
990
		__bpf_prog_release(fp);
991
		return ERR_PTR(err);
992
	}
993

994 995 996
	/* Probe if we can JIT compile the filter and if so, do
	 * the compilation of the filter.
	 */
997
	bpf_jit_compile(fp);
998 999 1000 1001

	/* JIT compiler couldn't process this filter, so do the
	 * internal BPF translation for the optimized interpreter.
	 */
1002
	if (!fp->jited)
1003
		fp = bpf_migrate_filter(fp);
1004 1005

	return fp;
1006 1007 1008
}

/**
1009
 *	bpf_prog_create - create an unattached filter
R
Randy Dunlap 已提交
1010
 *	@pfp: the unattached filter that is created
1011
 *	@fprog: the filter program
1012
 *
R
Randy Dunlap 已提交
1013
 * Create a filter independent of any socket. We first run some
1014 1015 1016 1017
 * sanity checks on it to make sure it does not explode on us later.
 * If an error occurs or there is insufficient memory for the filter
 * a negative errno code is returned. On success the return is zero.
 */
1018
int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
1019
{
1020
	unsigned int fsize = bpf_classic_proglen(fprog);
1021
	struct bpf_prog *fp;
1022 1023 1024 1025 1026

	/* Make sure new filter is there and in the right amounts. */
	if (fprog->filter == NULL)
		return -EINVAL;

1027
	fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
1028 1029
	if (!fp)
		return -ENOMEM;
1030

1031 1032 1033
	memcpy(fp->insns, fprog->filter, fsize);

	fp->len = fprog->len;
1034 1035 1036 1037 1038
	/* Since unattached filters are not copied back to user
	 * space through sk_get_filter(), we do not need to hold
	 * a copy here, and can spare us the work.
	 */
	fp->orig_prog = NULL;
1039

1040
	/* bpf_prepare_filter() already takes care of freeing
1041 1042
	 * memory in case something goes wrong.
	 */
1043
	fp = bpf_prepare_filter(fp);
1044 1045
	if (IS_ERR(fp))
		return PTR_ERR(fp);
1046 1047 1048 1049

	*pfp = fp;
	return 0;
}
1050
EXPORT_SYMBOL_GPL(bpf_prog_create);
1051

1052
void bpf_prog_destroy(struct bpf_prog *fp)
1053
{
1054
	__bpf_prog_release(fp);
1055
}
1056
EXPORT_SYMBOL_GPL(bpf_prog_destroy);
1057

1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083
static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk)
{
	struct sk_filter *fp, *old_fp;

	fp = kmalloc(sizeof(*fp), GFP_KERNEL);
	if (!fp)
		return -ENOMEM;

	fp->prog = prog;
	atomic_set(&fp->refcnt, 0);

	if (!sk_filter_charge(sk, fp)) {
		kfree(fp);
		return -ENOMEM;
	}

	old_fp = rcu_dereference_protected(sk->sk_filter,
					   sock_owned_by_user(sk));
	rcu_assign_pointer(sk->sk_filter, fp);

	if (old_fp)
		sk_filter_uncharge(sk, old_fp);

	return 0;
}

L
Linus Torvalds 已提交
1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095
/**
 *	sk_attach_filter - attach a socket filter
 *	@fprog: the filter program
 *	@sk: the socket to use
 *
 * Attach the user's filter code. We first run some sanity checks on
 * it to make sure it does not explode on us later. If an error
 * occurs or there is insufficient memory for the filter a negative
 * errno code is returned. On success the return is zero.
 */
int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
{
1096
	unsigned int fsize = bpf_classic_proglen(fprog);
1097 1098
	unsigned int bpf_fsize = bpf_prog_size(fprog->len);
	struct bpf_prog *prog;
L
Linus Torvalds 已提交
1099 1100
	int err;

1101 1102 1103
	if (sock_flag(sk, SOCK_FILTER_LOCKED))
		return -EPERM;

L
Linus Torvalds 已提交
1104
	/* Make sure new filter is there and in the right amounts. */
1105 1106
	if (fprog->filter == NULL)
		return -EINVAL;
L
Linus Torvalds 已提交
1107

1108
	prog = bpf_prog_alloc(bpf_fsize, 0);
1109
	if (!prog)
L
Linus Torvalds 已提交
1110
		return -ENOMEM;
1111

1112
	if (copy_from_user(prog->insns, fprog->filter, fsize)) {
1113
		__bpf_prog_free(prog);
L
Linus Torvalds 已提交
1114 1115 1116
		return -EFAULT;
	}

1117
	prog->len = fprog->len;
L
Linus Torvalds 已提交
1118

1119
	err = bpf_prog_store_orig_filter(prog, fprog);
1120
	if (err) {
1121
		__bpf_prog_free(prog);
1122 1123 1124
		return -ENOMEM;
	}

1125
	/* bpf_prepare_filter() already takes care of freeing
1126 1127
	 * memory in case something goes wrong.
	 */
1128 1129 1130 1131
	prog = bpf_prepare_filter(prog);
	if (IS_ERR(prog))
		return PTR_ERR(prog);

1132 1133
	err = __sk_attach_prog(prog, sk);
	if (err < 0) {
1134
		__bpf_prog_release(prog);
1135
		return err;
1136 1137
	}

1138
	return 0;
L
Linus Torvalds 已提交
1139
}
1140
EXPORT_SYMBOL_GPL(sk_attach_filter);
L
Linus Torvalds 已提交
1141

1142 1143 1144
int sk_attach_bpf(u32 ufd, struct sock *sk)
{
	struct bpf_prog *prog;
1145
	int err;
1146 1147 1148 1149 1150

	if (sock_flag(sk, SOCK_FILTER_LOCKED))
		return -EPERM;

	prog = bpf_prog_get(ufd);
1151 1152
	if (IS_ERR(prog))
		return PTR_ERR(prog);
1153

1154
	if (prog->type != BPF_PROG_TYPE_SOCKET_FILTER) {
1155 1156 1157 1158
		bpf_prog_put(prog);
		return -EINVAL;
	}

1159 1160
	err = __sk_attach_prog(prog, sk);
	if (err < 0) {
1161
		bpf_prog_put(prog);
1162
		return err;
1163 1164 1165 1166 1167
	}

	return 0;
}

1168 1169
static const struct bpf_func_proto *
sk_filter_func_proto(enum bpf_func_id func_id)
1170 1171 1172 1173 1174 1175 1176 1177
{
	switch (func_id) {
	case BPF_FUNC_map_lookup_elem:
		return &bpf_map_lookup_elem_proto;
	case BPF_FUNC_map_update_elem:
		return &bpf_map_update_elem_proto;
	case BPF_FUNC_map_delete_elem:
		return &bpf_map_delete_elem_proto;
1178 1179
	case BPF_FUNC_get_prandom_u32:
		return &bpf_get_prandom_u32_proto;
1180 1181
	case BPF_FUNC_get_smp_processor_id:
		return &bpf_get_smp_processor_id_proto;
1182 1183 1184 1185 1186
	default:
		return NULL;
	}
}

1187 1188
static bool sk_filter_is_valid_access(int off, int size,
				      enum bpf_access_type type)
1189
{
1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221
	/* only read is allowed */
	if (type != BPF_READ)
		return false;

	/* check bounds */
	if (off < 0 || off >= sizeof(struct __sk_buff))
		return false;

	/* disallow misaligned access */
	if (off % size != 0)
		return false;

	/* all __sk_buff fields are __u32 */
	if (size != 4)
		return false;

	return true;
}

static u32 sk_filter_convert_ctx_access(int dst_reg, int src_reg, int ctx_off,
					struct bpf_insn *insn_buf)
{
	struct bpf_insn *insn = insn_buf;

	switch (ctx_off) {
	case offsetof(struct __sk_buff, len):
		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);

		*insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
				      offsetof(struct sk_buff, len));
		break;

1222 1223 1224 1225 1226 1227 1228
	case offsetof(struct __sk_buff, protocol):
		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);

		*insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
				      offsetof(struct sk_buff, protocol));
		break;

1229 1230 1231 1232 1233 1234 1235 1236
	case offsetof(struct __sk_buff, mark):
		return convert_skb_access(SKF_AD_MARK, dst_reg, src_reg, insn);

	case offsetof(struct __sk_buff, pkt_type):
		return convert_skb_access(SKF_AD_PKTTYPE, dst_reg, src_reg, insn);

	case offsetof(struct __sk_buff, queue_mapping):
		return convert_skb_access(SKF_AD_QUEUE, dst_reg, src_reg, insn);
1237 1238 1239 1240 1241 1242 1243 1244

	case offsetof(struct __sk_buff, vlan_present):
		return convert_skb_access(SKF_AD_VLAN_TAG_PRESENT,
					  dst_reg, src_reg, insn);

	case offsetof(struct __sk_buff, vlan_tci):
		return convert_skb_access(SKF_AD_VLAN_TAG,
					  dst_reg, src_reg, insn);
1245 1246 1247
	}

	return insn - insn_buf;
1248 1249
}

1250 1251 1252
static const struct bpf_verifier_ops sk_filter_ops = {
	.get_func_proto = sk_filter_func_proto,
	.is_valid_access = sk_filter_is_valid_access,
1253
	.convert_ctx_access = sk_filter_convert_ctx_access,
1254 1255
};

1256 1257
static struct bpf_prog_type_list sk_filter_type __read_mostly = {
	.ops = &sk_filter_ops,
1258 1259 1260
	.type = BPF_PROG_TYPE_SOCKET_FILTER,
};

1261 1262 1263 1264 1265
static struct bpf_prog_type_list sched_cls_type __read_mostly = {
	.ops = &sk_filter_ops,
	.type = BPF_PROG_TYPE_SCHED_CLS,
};

1266
static int __init register_sk_filter_ops(void)
1267
{
1268
	bpf_register_prog_type(&sk_filter_type);
1269 1270
	bpf_register_prog_type(&sched_cls_type);

1271 1272
	return 0;
}
1273 1274
late_initcall(register_sk_filter_ops);

1275 1276 1277 1278 1279
int sk_detach_filter(struct sock *sk)
{
	int ret = -ENOENT;
	struct sk_filter *filter;

1280 1281 1282
	if (sock_flag(sk, SOCK_FILTER_LOCKED))
		return -EPERM;

1283 1284
	filter = rcu_dereference_protected(sk->sk_filter,
					   sock_owned_by_user(sk));
1285
	if (filter) {
1286
		RCU_INIT_POINTER(sk->sk_filter, NULL);
E
Eric Dumazet 已提交
1287
		sk_filter_uncharge(sk, filter);
1288 1289
		ret = 0;
	}
1290

1291 1292
	return ret;
}
1293
EXPORT_SYMBOL_GPL(sk_detach_filter);
1294

1295 1296
int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
		  unsigned int len)
1297
{
1298
	struct sock_fprog_kern *fprog;
1299
	struct sk_filter *filter;
1300
	int ret = 0;
1301 1302 1303

	lock_sock(sk);
	filter = rcu_dereference_protected(sk->sk_filter,
1304
					   sock_owned_by_user(sk));
1305 1306
	if (!filter)
		goto out;
1307 1308 1309 1310

	/* We're copying the filter that has been originally attached,
	 * so no conversion/decode needed anymore.
	 */
1311
	fprog = filter->prog->orig_prog;
1312 1313

	ret = fprog->len;
1314
	if (!len)
1315
		/* User space only enquires number of filter blocks. */
1316
		goto out;
1317

1318
	ret = -EINVAL;
1319
	if (len < fprog->len)
1320 1321 1322
		goto out;

	ret = -EFAULT;
1323
	if (copy_to_user(ubuf, fprog->filter, bpf_classic_proglen(fprog)))
1324
		goto out;
1325

1326 1327 1328 1329
	/* Instead of bytes, the API requests to return the number
	 * of filter blocks.
	 */
	ret = fprog->len;
1330 1331 1332 1333
out:
	release_sock(sk);
	return ret;
}