filter.c 50.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
/*
 * Linux Socket Filter - Kernel level socket filtering
 *
4 5
 * Based on the design of the Berkeley Packet Filter. The new
 * internal format has been designed by PLUMgrid:
L
Linus Torvalds 已提交
6
 *
7 8 9 10 11 12 13
 *	Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
 *
 * Authors:
 *
 *	Jay Schulist <jschlst@samba.org>
 *	Alexei Starovoitov <ast@plumgrid.com>
 *	Daniel Borkmann <dborkman@redhat.com>
L
Linus Torvalds 已提交
14 15 16 17 18 19 20
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 *
 * Andi Kleen - Fix a few bad bugs and races.
21
 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
L
Linus Torvalds 已提交
22 23 24 25 26 27 28 29 30 31 32
 */

#include <linux/module.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/fcntl.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/if_packet.h>
33
#include <linux/gfp.h>
L
Linus Torvalds 已提交
34 35
#include <net/ip.h>
#include <net/protocol.h>
36
#include <net/netlink.h>
L
Linus Torvalds 已提交
37 38
#include <linux/skbuff.h>
#include <net/sock.h>
39
#include <net/flow_dissector.h>
L
Linus Torvalds 已提交
40 41 42
#include <linux/errno.h>
#include <linux/timer.h>
#include <asm/uaccess.h>
43
#include <asm/unaligned.h>
L
Linus Torvalds 已提交
44
#include <linux/filter.h>
45
#include <linux/ratelimit.h>
46
#include <linux/seccomp.h>
E
Eric Dumazet 已提交
47
#include <linux/if_vlan.h>
48
#include <linux/bpf.h>
49
#include <net/sch_generic.h>
50
#include <net/cls_cgroup.h>
51
#include <net/dst_metadata.h>
52
#include <net/dst.h>
L
Linus Torvalds 已提交
53

S
Stephen Hemminger 已提交
54 55 56 57 58
/**
 *	sk_filter - run a packet through a socket filter
 *	@sk: sock associated with &sk_buff
 *	@skb: buffer to filter
 *
59 60
 * Run the eBPF program and then cut skb->data to correct size returned by
 * the program. If pkt_len is 0 we toss packet. If skb->len is smaller
S
Stephen Hemminger 已提交
61
 * than pkt_len we keep whole skb->data. This is the socket level
62
 * wrapper to BPF_PROG_RUN. It returns 0 if the packet should
S
Stephen Hemminger 已提交
63 64 65 66 67 68 69 70
 * be accepted or -EPERM if the packet should be tossed.
 *
 */
int sk_filter(struct sock *sk, struct sk_buff *skb)
{
	int err;
	struct sk_filter *filter;

71 72 73 74 75 76 77 78
	/*
	 * If the skb was allocated from pfmemalloc reserves, only
	 * allow SOCK_MEMALLOC sockets to use it as this socket is
	 * helping free memory
	 */
	if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
		return -ENOMEM;

S
Stephen Hemminger 已提交
79 80 81 82
	err = security_sock_rcv_skb(sk, skb);
	if (err)
		return err;

83 84
	rcu_read_lock();
	filter = rcu_dereference(sk->sk_filter);
S
Stephen Hemminger 已提交
85
	if (filter) {
86
		unsigned int pkt_len = bpf_prog_run_save_cb(filter->prog, skb);
87

S
Stephen Hemminger 已提交
88 89
		err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
	}
90
	rcu_read_unlock();
S
Stephen Hemminger 已提交
91 92 93 94 95

	return err;
}
EXPORT_SYMBOL(sk_filter);

96
static u64 __skb_get_pay_offset(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
97
{
98
	return skb_get_poff((struct sk_buff *)(unsigned long) ctx);
99 100
}

101
static u64 __skb_get_nlattr(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
102
{
103
	struct sk_buff *skb = (struct sk_buff *)(unsigned long) ctx;
104 105 106 107 108
	struct nlattr *nla;

	if (skb_is_nonlinear(skb))
		return 0;

109 110 111
	if (skb->len < sizeof(struct nlattr))
		return 0;

112
	if (a > skb->len - sizeof(struct nlattr))
113 114
		return 0;

115
	nla = nla_find((struct nlattr *) &skb->data[a], skb->len - a, x);
116 117 118 119 120 121
	if (nla)
		return (void *) nla - (void *) skb->data;

	return 0;
}

122
static u64 __skb_get_nlattr_nest(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
123
{
124
	struct sk_buff *skb = (struct sk_buff *)(unsigned long) ctx;
125 126 127 128 129
	struct nlattr *nla;

	if (skb_is_nonlinear(skb))
		return 0;

130 131 132
	if (skb->len < sizeof(struct nlattr))
		return 0;

133
	if (a > skb->len - sizeof(struct nlattr))
134 135
		return 0;

136 137
	nla = (struct nlattr *) &skb->data[a];
	if (nla->nla_len > skb->len - a)
138 139
		return 0;

140
	nla = nla_find_nested(nla, x);
141 142 143 144 145 146
	if (nla)
		return (void *) nla - (void *) skb->data;

	return 0;
}

147
static u64 __get_raw_cpu_id(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
148 149 150 151
{
	return raw_smp_processor_id();
}

152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178
static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg,
			      struct bpf_insn *insn_buf)
{
	struct bpf_insn *insn = insn_buf;

	switch (skb_field) {
	case SKF_AD_MARK:
		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);

		*insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
				      offsetof(struct sk_buff, mark));
		break;

	case SKF_AD_PKTTYPE:
		*insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_TYPE_OFFSET());
		*insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, PKT_TYPE_MAX);
#ifdef __BIG_ENDIAN_BITFIELD
		*insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 5);
#endif
		break;

	case SKF_AD_QUEUE:
		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);

		*insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
				      offsetof(struct sk_buff, queue_mapping));
		break;
179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197

	case SKF_AD_VLAN_TAG:
	case SKF_AD_VLAN_TAG_PRESENT:
		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
		BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);

		/* dst_reg = *(u16 *) (src_reg + offsetof(vlan_tci)) */
		*insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
				      offsetof(struct sk_buff, vlan_tci));
		if (skb_field == SKF_AD_VLAN_TAG) {
			*insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg,
						~VLAN_TAG_PRESENT);
		} else {
			/* dst_reg >>= 12 */
			*insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 12);
			/* dst_reg &= 1 */
			*insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, 1);
		}
		break;
198 199 200 201 202
	}

	return insn - insn_buf;
}

203
static bool convert_bpf_extensions(struct sock_filter *fp,
204
				   struct bpf_insn **insnp)
205
{
206
	struct bpf_insn *insn = *insnp;
207
	u32 cnt;
208 209 210

	switch (fp->k) {
	case SKF_AD_OFF + SKF_AD_PROTOCOL:
211 212 213 214 215 216 217
		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);

		/* A = *(u16 *) (CTX + offsetof(protocol)) */
		*insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
				      offsetof(struct sk_buff, protocol));
		/* A = ntohs(A) [emitting a nop or swap16] */
		*insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
218 219 220
		break;

	case SKF_AD_OFF + SKF_AD_PKTTYPE:
221 222
		cnt = convert_skb_access(SKF_AD_PKTTYPE, BPF_REG_A, BPF_REG_CTX, insn);
		insn += cnt - 1;
223 224 225 226 227 228
		break;

	case SKF_AD_OFF + SKF_AD_IFINDEX:
	case SKF_AD_OFF + SKF_AD_HATYPE:
		BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
		BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
229 230 231 232 233 234 235 236 237 238 239 240 241 242
		BUILD_BUG_ON(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)) < 0);

		*insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)),
				      BPF_REG_TMP, BPF_REG_CTX,
				      offsetof(struct sk_buff, dev));
		/* if (tmp != 0) goto pc + 1 */
		*insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1);
		*insn++ = BPF_EXIT_INSN();
		if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX)
			*insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_TMP,
					    offsetof(struct net_device, ifindex));
		else
			*insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_TMP,
					    offsetof(struct net_device, type));
243 244 245
		break;

	case SKF_AD_OFF + SKF_AD_MARK:
246 247
		cnt = convert_skb_access(SKF_AD_MARK, BPF_REG_A, BPF_REG_CTX, insn);
		insn += cnt - 1;
248 249 250 251 252
		break;

	case SKF_AD_OFF + SKF_AD_RXHASH:
		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);

253 254
		*insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX,
				    offsetof(struct sk_buff, hash));
255 256 257
		break;

	case SKF_AD_OFF + SKF_AD_QUEUE:
258 259
		cnt = convert_skb_access(SKF_AD_QUEUE, BPF_REG_A, BPF_REG_CTX, insn);
		insn += cnt - 1;
260 261 262
		break;

	case SKF_AD_OFF + SKF_AD_VLAN_TAG:
263 264 265 266
		cnt = convert_skb_access(SKF_AD_VLAN_TAG,
					 BPF_REG_A, BPF_REG_CTX, insn);
		insn += cnt - 1;
		break;
267

268 269 270 271
	case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT:
		cnt = convert_skb_access(SKF_AD_VLAN_TAG_PRESENT,
					 BPF_REG_A, BPF_REG_CTX, insn);
		insn += cnt - 1;
272 273
		break;

274 275 276 277 278 279 280 281 282 283
	case SKF_AD_OFF + SKF_AD_VLAN_TPID:
		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_proto) != 2);

		/* A = *(u16 *) (CTX + offsetof(vlan_proto)) */
		*insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
				      offsetof(struct sk_buff, vlan_proto));
		/* A = ntohs(A) [emitting a nop or swap16] */
		*insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
		break;

284 285 286 287
	case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
	case SKF_AD_OFF + SKF_AD_NLATTR:
	case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
	case SKF_AD_OFF + SKF_AD_CPU:
C
Chema Gonzalez 已提交
288
	case SKF_AD_OFF + SKF_AD_RANDOM:
289
		/* arg1 = CTX */
290
		*insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX);
291
		/* arg2 = A */
292
		*insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_A);
293
		/* arg3 = X */
294
		*insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_X);
295
		/* Emit call(arg1=CTX, arg2=A, arg3=X) */
296 297
		switch (fp->k) {
		case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
298
			*insn = BPF_EMIT_CALL(__skb_get_pay_offset);
299 300
			break;
		case SKF_AD_OFF + SKF_AD_NLATTR:
301
			*insn = BPF_EMIT_CALL(__skb_get_nlattr);
302 303
			break;
		case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
304
			*insn = BPF_EMIT_CALL(__skb_get_nlattr_nest);
305 306
			break;
		case SKF_AD_OFF + SKF_AD_CPU:
307
			*insn = BPF_EMIT_CALL(__get_raw_cpu_id);
308
			break;
C
Chema Gonzalez 已提交
309
		case SKF_AD_OFF + SKF_AD_RANDOM:
310 311
			*insn = BPF_EMIT_CALL(bpf_user_rnd_u32);
			bpf_user_rnd_init_once();
C
Chema Gonzalez 已提交
312
			break;
313 314 315 316
		}
		break;

	case SKF_AD_OFF + SKF_AD_ALU_XOR_X:
317 318
		/* A ^= X */
		*insn = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_X);
319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334
		break;

	default:
		/* This is just a dummy call to avoid letting the compiler
		 * evict __bpf_call_base() as an optimization. Placed here
		 * where no-one bothers.
		 */
		BUG_ON(__bpf_call_base(0, 0, 0, 0, 0) != 0);
		return false;
	}

	*insnp = insn;
	return true;
}

/**
335
 *	bpf_convert_filter - convert filter program
336 337 338 339 340 341 342 343 344
 *	@prog: the user passed filter program
 *	@len: the length of the user passed filter program
 *	@new_prog: buffer where converted program will be stored
 *	@new_len: pointer to store length of converted program
 *
 * Remap 'sock_filter' style BPF instruction set to 'sock_filter_ext' style.
 * Conversion workflow:
 *
 * 1) First pass for calculating the new program length:
345
 *   bpf_convert_filter(old_prog, old_len, NULL, &new_len)
346 347 348
 *
 * 2) 2nd pass to remap in two passes: 1st pass finds new
 *    jump offsets, 2nd pass remapping:
349
 *   new_prog = kmalloc(sizeof(struct bpf_insn) * new_len);
350
 *   bpf_convert_filter(old_prog, old_len, new_prog, &new_len);
351 352 353 354 355 356 357
 *
 * User BPF's register A is mapped to our BPF register 6, user BPF
 * register X is mapped to BPF register 7; frame pointer is always
 * register 10; Context 'void *ctx' is stored in register 1, that is,
 * for socket filters: ctx == 'struct sk_buff *', for seccomp:
 * ctx == 'struct seccomp_data *'.
 */
358 359
static int bpf_convert_filter(struct sock_filter *prog, int len,
			      struct bpf_insn *new_prog, int *new_len)
360 361
{
	int new_flen = 0, pass = 0, target, i;
362
	struct bpf_insn *new_insn;
363 364 365 366 367
	struct sock_filter *fp;
	int *addrs = NULL;
	u8 bpf_src;

	BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK);
368
	BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
369

370
	if (len <= 0 || len > BPF_MAXINSNS)
371 372 373
		return -EINVAL;

	if (new_prog) {
374 375
		addrs = kcalloc(len, sizeof(*addrs),
				GFP_KERNEL | __GFP_NOWARN);
376 377 378 379 380 381 382 383
		if (!addrs)
			return -ENOMEM;
	}

do_pass:
	new_insn = new_prog;
	fp = prog;

384 385
	if (new_insn)
		*new_insn = BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1);
386 387 388
	new_insn++;

	for (i = 0; i < len; fp++, i++) {
389 390
		struct bpf_insn tmp_insns[6] = { };
		struct bpf_insn *insn = tmp_insns;
391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432

		if (addrs)
			addrs[i] = new_insn - new_prog;

		switch (fp->code) {
		/* All arithmetic insns and skb loads map as-is. */
		case BPF_ALU | BPF_ADD | BPF_X:
		case BPF_ALU | BPF_ADD | BPF_K:
		case BPF_ALU | BPF_SUB | BPF_X:
		case BPF_ALU | BPF_SUB | BPF_K:
		case BPF_ALU | BPF_AND | BPF_X:
		case BPF_ALU | BPF_AND | BPF_K:
		case BPF_ALU | BPF_OR | BPF_X:
		case BPF_ALU | BPF_OR | BPF_K:
		case BPF_ALU | BPF_LSH | BPF_X:
		case BPF_ALU | BPF_LSH | BPF_K:
		case BPF_ALU | BPF_RSH | BPF_X:
		case BPF_ALU | BPF_RSH | BPF_K:
		case BPF_ALU | BPF_XOR | BPF_X:
		case BPF_ALU | BPF_XOR | BPF_K:
		case BPF_ALU | BPF_MUL | BPF_X:
		case BPF_ALU | BPF_MUL | BPF_K:
		case BPF_ALU | BPF_DIV | BPF_X:
		case BPF_ALU | BPF_DIV | BPF_K:
		case BPF_ALU | BPF_MOD | BPF_X:
		case BPF_ALU | BPF_MOD | BPF_K:
		case BPF_ALU | BPF_NEG:
		case BPF_LD | BPF_ABS | BPF_W:
		case BPF_LD | BPF_ABS | BPF_H:
		case BPF_LD | BPF_ABS | BPF_B:
		case BPF_LD | BPF_IND | BPF_W:
		case BPF_LD | BPF_IND | BPF_H:
		case BPF_LD | BPF_IND | BPF_B:
			/* Check for overloaded BPF extension and
			 * directly convert it if found, otherwise
			 * just move on with mapping.
			 */
			if (BPF_CLASS(fp->code) == BPF_LD &&
			    BPF_MODE(fp->code) == BPF_ABS &&
			    convert_bpf_extensions(fp, &insn))
				break;

433
			*insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k);
434 435
			break;

436 437 438 439 440 441 442
		/* Jump transformation cannot use BPF block macros
		 * everywhere as offset calculation and target updates
		 * require a bit more work than the rest, i.e. jump
		 * opcodes map as-is, but offsets need adjustment.
		 */

#define BPF_EMIT_JMP							\
443 444 445 446 447 448 449 450
	do {								\
		if (target >= len || target < 0)			\
			goto err;					\
		insn->off = addrs ? addrs[target] - addrs[i] - 1 : 0;	\
		/* Adjust pc relative offset for 2nd or 3rd insn. */	\
		insn->off -= insn - tmp_insns;				\
	} while (0)

451 452 453 454
		case BPF_JMP | BPF_JA:
			target = i + fp->k + 1;
			insn->code = fp->code;
			BPF_EMIT_JMP;
455 456 457 458 459 460 461 462 463 464 465 466 467 468 469
			break;

		case BPF_JMP | BPF_JEQ | BPF_K:
		case BPF_JMP | BPF_JEQ | BPF_X:
		case BPF_JMP | BPF_JSET | BPF_K:
		case BPF_JMP | BPF_JSET | BPF_X:
		case BPF_JMP | BPF_JGT | BPF_K:
		case BPF_JMP | BPF_JGT | BPF_X:
		case BPF_JMP | BPF_JGE | BPF_K:
		case BPF_JMP | BPF_JGE | BPF_X:
			if (BPF_SRC(fp->code) == BPF_K && (int) fp->k < 0) {
				/* BPF immediates are signed, zero extend
				 * immediate into tmp register and use it
				 * in compare insn.
				 */
470
				*insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k);
471

472 473
				insn->dst_reg = BPF_REG_A;
				insn->src_reg = BPF_REG_TMP;
474 475
				bpf_src = BPF_X;
			} else {
476
				insn->dst_reg = BPF_REG_A;
477 478
				insn->imm = fp->k;
				bpf_src = BPF_SRC(fp->code);
479
				insn->src_reg = bpf_src == BPF_X ? BPF_REG_X : 0;
L
Linus Torvalds 已提交
480
			}
481 482 483 484 485

			/* Common case where 'jump_false' is next insn. */
			if (fp->jf == 0) {
				insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
				target = i + fp->jt + 1;
486
				BPF_EMIT_JMP;
487
				break;
L
Linus Torvalds 已提交
488
			}
489 490 491 492 493

			/* Convert JEQ into JNE when 'jump_true' is next insn. */
			if (fp->jt == 0 && BPF_OP(fp->code) == BPF_JEQ) {
				insn->code = BPF_JMP | BPF_JNE | bpf_src;
				target = i + fp->jf + 1;
494
				BPF_EMIT_JMP;
495
				break;
496
			}
497 498 499 500

			/* Other jumps are mapped into two insns: Jxx and JA. */
			target = i + fp->jt + 1;
			insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
501
			BPF_EMIT_JMP;
502 503 504 505
			insn++;

			insn->code = BPF_JMP | BPF_JA;
			target = i + fp->jf + 1;
506
			BPF_EMIT_JMP;
507 508 509 510
			break;

		/* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */
		case BPF_LDX | BPF_MSH | BPF_B:
511
			/* tmp = A */
512
			*insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_A);
513
			/* A = BPF_R0 = *(u8 *) (skb->data + K) */
514
			*insn++ = BPF_LD_ABS(BPF_B, fp->k);
515
			/* A &= 0xf */
516
			*insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf);
517
			/* A <<= 2 */
518
			*insn++ = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2);
519
			/* X = A */
520
			*insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
521
			/* A = tmp */
522
			*insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP);
523 524 525 526 527
			break;

		/* RET_K, RET_A are remaped into 2 insns. */
		case BPF_RET | BPF_A:
		case BPF_RET | BPF_K:
528 529 530
			*insn++ = BPF_MOV32_RAW(BPF_RVAL(fp->code) == BPF_K ?
						BPF_K : BPF_X, BPF_REG_0,
						BPF_REG_A, fp->k);
531
			*insn = BPF_EXIT_INSN();
532 533 534 535 536
			break;

		/* Store to stack. */
		case BPF_ST:
		case BPF_STX:
537 538 539
			*insn = BPF_STX_MEM(BPF_W, BPF_REG_FP, BPF_CLASS(fp->code) ==
					    BPF_ST ? BPF_REG_A : BPF_REG_X,
					    -(BPF_MEMWORDS - fp->k) * 4);
540 541 542 543 544
			break;

		/* Load from stack. */
		case BPF_LD | BPF_MEM:
		case BPF_LDX | BPF_MEM:
545 546 547
			*insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD  ?
					    BPF_REG_A : BPF_REG_X, BPF_REG_FP,
					    -(BPF_MEMWORDS - fp->k) * 4);
548 549 550 551 552
			break;

		/* A = K or X = K */
		case BPF_LD | BPF_IMM:
		case BPF_LDX | BPF_IMM:
553 554
			*insn = BPF_MOV32_IMM(BPF_CLASS(fp->code) == BPF_LD ?
					      BPF_REG_A : BPF_REG_X, fp->k);
555 556 557 558
			break;

		/* X = A */
		case BPF_MISC | BPF_TAX:
559
			*insn = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
560 561 562 563
			break;

		/* A = X */
		case BPF_MISC | BPF_TXA:
564
			*insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_X);
565 566 567 568 569
			break;

		/* A = skb->len or X = skb->len */
		case BPF_LD | BPF_W | BPF_LEN:
		case BPF_LDX | BPF_W | BPF_LEN:
570 571 572
			*insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
					    BPF_REG_A : BPF_REG_X, BPF_REG_CTX,
					    offsetof(struct sk_buff, len));
573 574
			break;

575
		/* Access seccomp_data fields. */
576
		case BPF_LDX | BPF_ABS | BPF_W:
577 578
			/* A = *(u32 *) (ctx + K) */
			*insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k);
579 580
			break;

S
Stephen Hemminger 已提交
581
		/* Unknown instruction. */
L
Linus Torvalds 已提交
582
		default:
583
			goto err;
L
Linus Torvalds 已提交
584
		}
585 586 587 588 589 590

		insn++;
		if (new_prog)
			memcpy(new_insn, tmp_insns,
			       sizeof(*insn) * (insn - tmp_insns));
		new_insn += insn - tmp_insns;
L
Linus Torvalds 已提交
591 592
	}

593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608
	if (!new_prog) {
		/* Only calculating new length. */
		*new_len = new_insn - new_prog;
		return 0;
	}

	pass++;
	if (new_flen != new_insn - new_prog) {
		new_flen = new_insn - new_prog;
		if (pass > 2)
			goto err;
		goto do_pass;
	}

	kfree(addrs);
	BUG_ON(*new_len != new_flen);
L
Linus Torvalds 已提交
609
	return 0;
610 611 612
err:
	kfree(addrs);
	return -EINVAL;
L
Linus Torvalds 已提交
613 614
}

615 616
/* Security:
 *
617
 * As we dont want to clear mem[] array for each packet going through
L
Li RongQing 已提交
618
 * __bpf_prog_run(), we check that filter loaded by user never try to read
619
 * a cell if not previously written, and we check all branches to be sure
L
Lucas De Marchi 已提交
620
 * a malicious user doesn't try to abuse us.
621
 */
622
static int check_load_and_stores(const struct sock_filter *filter, int flen)
623
{
624
	u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */
625 626 627
	int pc, ret = 0;

	BUILD_BUG_ON(BPF_MEMWORDS > 16);
628

629
	masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
630 631
	if (!masks)
		return -ENOMEM;
632

633 634 635 636 637 638
	memset(masks, 0xff, flen * sizeof(*masks));

	for (pc = 0; pc < flen; pc++) {
		memvalid &= masks[pc];

		switch (filter[pc].code) {
639 640
		case BPF_ST:
		case BPF_STX:
641 642
			memvalid |= (1 << filter[pc].k);
			break;
643 644
		case BPF_LD | BPF_MEM:
		case BPF_LDX | BPF_MEM:
645 646 647 648 649
			if (!(memvalid & (1 << filter[pc].k))) {
				ret = -EINVAL;
				goto error;
			}
			break;
650 651
		case BPF_JMP | BPF_JA:
			/* A jump must set masks on target */
652 653 654
			masks[pc + 1 + filter[pc].k] &= memvalid;
			memvalid = ~0;
			break;
655 656 657 658 659 660 661 662 663
		case BPF_JMP | BPF_JEQ | BPF_K:
		case BPF_JMP | BPF_JEQ | BPF_X:
		case BPF_JMP | BPF_JGE | BPF_K:
		case BPF_JMP | BPF_JGE | BPF_X:
		case BPF_JMP | BPF_JGT | BPF_K:
		case BPF_JMP | BPF_JGT | BPF_X:
		case BPF_JMP | BPF_JSET | BPF_K:
		case BPF_JMP | BPF_JSET | BPF_X:
			/* A jump must set masks on targets */
664 665 666 667 668 669 670 671 672 673 674
			masks[pc + 1 + filter[pc].jt] &= memvalid;
			masks[pc + 1 + filter[pc].jf] &= memvalid;
			memvalid = ~0;
			break;
		}
	}
error:
	kfree(masks);
	return ret;
}

675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740
static bool chk_code_allowed(u16 code_to_probe)
{
	static const bool codes[] = {
		/* 32 bit ALU operations */
		[BPF_ALU | BPF_ADD | BPF_K] = true,
		[BPF_ALU | BPF_ADD | BPF_X] = true,
		[BPF_ALU | BPF_SUB | BPF_K] = true,
		[BPF_ALU | BPF_SUB | BPF_X] = true,
		[BPF_ALU | BPF_MUL | BPF_K] = true,
		[BPF_ALU | BPF_MUL | BPF_X] = true,
		[BPF_ALU | BPF_DIV | BPF_K] = true,
		[BPF_ALU | BPF_DIV | BPF_X] = true,
		[BPF_ALU | BPF_MOD | BPF_K] = true,
		[BPF_ALU | BPF_MOD | BPF_X] = true,
		[BPF_ALU | BPF_AND | BPF_K] = true,
		[BPF_ALU | BPF_AND | BPF_X] = true,
		[BPF_ALU | BPF_OR | BPF_K] = true,
		[BPF_ALU | BPF_OR | BPF_X] = true,
		[BPF_ALU | BPF_XOR | BPF_K] = true,
		[BPF_ALU | BPF_XOR | BPF_X] = true,
		[BPF_ALU | BPF_LSH | BPF_K] = true,
		[BPF_ALU | BPF_LSH | BPF_X] = true,
		[BPF_ALU | BPF_RSH | BPF_K] = true,
		[BPF_ALU | BPF_RSH | BPF_X] = true,
		[BPF_ALU | BPF_NEG] = true,
		/* Load instructions */
		[BPF_LD | BPF_W | BPF_ABS] = true,
		[BPF_LD | BPF_H | BPF_ABS] = true,
		[BPF_LD | BPF_B | BPF_ABS] = true,
		[BPF_LD | BPF_W | BPF_LEN] = true,
		[BPF_LD | BPF_W | BPF_IND] = true,
		[BPF_LD | BPF_H | BPF_IND] = true,
		[BPF_LD | BPF_B | BPF_IND] = true,
		[BPF_LD | BPF_IMM] = true,
		[BPF_LD | BPF_MEM] = true,
		[BPF_LDX | BPF_W | BPF_LEN] = true,
		[BPF_LDX | BPF_B | BPF_MSH] = true,
		[BPF_LDX | BPF_IMM] = true,
		[BPF_LDX | BPF_MEM] = true,
		/* Store instructions */
		[BPF_ST] = true,
		[BPF_STX] = true,
		/* Misc instructions */
		[BPF_MISC | BPF_TAX] = true,
		[BPF_MISC | BPF_TXA] = true,
		/* Return instructions */
		[BPF_RET | BPF_K] = true,
		[BPF_RET | BPF_A] = true,
		/* Jump instructions */
		[BPF_JMP | BPF_JA] = true,
		[BPF_JMP | BPF_JEQ | BPF_K] = true,
		[BPF_JMP | BPF_JEQ | BPF_X] = true,
		[BPF_JMP | BPF_JGE | BPF_K] = true,
		[BPF_JMP | BPF_JGE | BPF_X] = true,
		[BPF_JMP | BPF_JGT | BPF_K] = true,
		[BPF_JMP | BPF_JGT | BPF_X] = true,
		[BPF_JMP | BPF_JSET | BPF_K] = true,
		[BPF_JMP | BPF_JSET | BPF_X] = true,
	};

	if (code_to_probe >= ARRAY_SIZE(codes))
		return false;

	return codes[code_to_probe];
}

L
Linus Torvalds 已提交
741
/**
742
 *	bpf_check_classic - verify socket filter code
L
Linus Torvalds 已提交
743 744 745 746 747
 *	@filter: filter to verify
 *	@flen: length of filter
 *
 * Check the user's filter code. If we let some ugly
 * filter code slip through kaboom! The filter must contain
748 749
 * no references or jumps that are out of range, no illegal
 * instructions, and must end with a RET instruction.
L
Linus Torvalds 已提交
750
 *
751 752 753
 * All jumps are forward as they are not signed.
 *
 * Returns 0 if the rule set is legal or -EINVAL if not.
L
Linus Torvalds 已提交
754
 */
755 756
static int bpf_check_classic(const struct sock_filter *filter,
			     unsigned int flen)
L
Linus Torvalds 已提交
757
{
758
	bool anc_found;
759
	int pc;
L
Linus Torvalds 已提交
760

761
	if (flen == 0 || flen > BPF_MAXINSNS)
L
Linus Torvalds 已提交
762 763
		return -EINVAL;

764
	/* Check the filter code now */
L
Linus Torvalds 已提交
765
	for (pc = 0; pc < flen; pc++) {
766
		const struct sock_filter *ftest = &filter[pc];
767

768 769
		/* May we actually operate on this code? */
		if (!chk_code_allowed(ftest->code))
770
			return -EINVAL;
771

772
		/* Some instructions need special checks */
773 774 775 776
		switch (ftest->code) {
		case BPF_ALU | BPF_DIV | BPF_K:
		case BPF_ALU | BPF_MOD | BPF_K:
			/* Check for division by zero */
E
Eric Dumazet 已提交
777 778 779
			if (ftest->k == 0)
				return -EINVAL;
			break;
780 781 782 783 784
		case BPF_LD | BPF_MEM:
		case BPF_LDX | BPF_MEM:
		case BPF_ST:
		case BPF_STX:
			/* Check for invalid memory addresses */
785 786 787
			if (ftest->k >= BPF_MEMWORDS)
				return -EINVAL;
			break;
788 789
		case BPF_JMP | BPF_JA:
			/* Note, the large ftest->k might cause loops.
790 791 792
			 * Compare this with conditional jumps below,
			 * where offsets are limited. --ANK (981016)
			 */
793
			if (ftest->k >= (unsigned int)(flen - pc - 1))
794
				return -EINVAL;
795
			break;
796 797 798 799 800 801 802 803 804
		case BPF_JMP | BPF_JEQ | BPF_K:
		case BPF_JMP | BPF_JEQ | BPF_X:
		case BPF_JMP | BPF_JGE | BPF_K:
		case BPF_JMP | BPF_JGE | BPF_X:
		case BPF_JMP | BPF_JGT | BPF_K:
		case BPF_JMP | BPF_JGT | BPF_X:
		case BPF_JMP | BPF_JSET | BPF_K:
		case BPF_JMP | BPF_JSET | BPF_X:
			/* Both conditionals must be safe */
805
			if (pc + ftest->jt + 1 >= flen ||
806 807
			    pc + ftest->jf + 1 >= flen)
				return -EINVAL;
808
			break;
809 810 811
		case BPF_LD | BPF_W | BPF_ABS:
		case BPF_LD | BPF_H | BPF_ABS:
		case BPF_LD | BPF_B | BPF_ABS:
812
			anc_found = false;
813 814 815
			if (bpf_anc_helper(ftest) & BPF_ANC)
				anc_found = true;
			/* Ancillary operation unknown or unsupported */
816 817
			if (anc_found == false && ftest->k >= SKF_AD_OFF)
				return -EINVAL;
818 819
		}
	}
820

821
	/* Last instruction must be a RET code */
822
	switch (filter[flen - 1].code) {
823 824
	case BPF_RET | BPF_K:
	case BPF_RET | BPF_A:
825
		return check_load_and_stores(filter, flen);
826
	}
827

828
	return -EINVAL;
L
Linus Torvalds 已提交
829 830
}

831 832
static int bpf_prog_store_orig_filter(struct bpf_prog *fp,
				      const struct sock_fprog *fprog)
833
{
834
	unsigned int fsize = bpf_classic_proglen(fprog);
835 836 837 838 839 840 841 842
	struct sock_fprog_kern *fkprog;

	fp->orig_prog = kmalloc(sizeof(*fkprog), GFP_KERNEL);
	if (!fp->orig_prog)
		return -ENOMEM;

	fkprog = fp->orig_prog;
	fkprog->len = fprog->len;
843 844 845

	fkprog->filter = kmemdup(fp->insns, fsize,
				 GFP_KERNEL | __GFP_NOWARN);
846 847 848 849 850 851 852 853
	if (!fkprog->filter) {
		kfree(fp->orig_prog);
		return -ENOMEM;
	}

	return 0;
}

854
static void bpf_release_orig_filter(struct bpf_prog *fp)
855 856 857 858 859 860 861 862 863
{
	struct sock_fprog_kern *fprog = fp->orig_prog;

	if (fprog) {
		kfree(fprog->filter);
		kfree(fprog);
	}
}

864 865
static void __bpf_prog_release(struct bpf_prog *prog)
{
866
	if (prog->type == BPF_PROG_TYPE_SOCKET_FILTER) {
867 868 869 870 871
		bpf_prog_put(prog);
	} else {
		bpf_release_orig_filter(prog);
		bpf_prog_free(prog);
	}
872 873
}

874 875
static void __sk_filter_release(struct sk_filter *fp)
{
876 877
	__bpf_prog_release(fp->prog);
	kfree(fp);
878 879
}

880
/**
E
Eric Dumazet 已提交
881
 * 	sk_filter_release_rcu - Release a socket filter by rcu_head
882 883
 *	@rcu: rcu_head that contains the sk_filter to free
 */
884
static void sk_filter_release_rcu(struct rcu_head *rcu)
885 886 887
{
	struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);

888
	__sk_filter_release(fp);
889
}
890 891 892 893 894 895 896 897 898 899 900 901 902 903 904

/**
 *	sk_filter_release - release a socket filter
 *	@fp: filter to remove
 *
 *	Remove a filter from a socket and release its resources.
 */
static void sk_filter_release(struct sk_filter *fp)
{
	if (atomic_dec_and_test(&fp->refcnt))
		call_rcu(&fp->rcu, sk_filter_release_rcu);
}

void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
{
905
	u32 filter_size = bpf_prog_size(fp->prog->len);
906

907 908
	atomic_sub(filter_size, &sk->sk_omem_alloc);
	sk_filter_release(fp);
909
}
910

911 912 913 914
/* try to charge the socket memory if there is space available
 * return true on success
 */
bool sk_filter_charge(struct sock *sk, struct sk_filter *fp)
915
{
916
	u32 filter_size = bpf_prog_size(fp->prog->len);
917 918 919 920 921 922 923

	/* same check as in sock_kmalloc() */
	if (filter_size <= sysctl_optmem_max &&
	    atomic_read(&sk->sk_omem_alloc) + filter_size < sysctl_optmem_max) {
		atomic_inc(&fp->refcnt);
		atomic_add(filter_size, &sk->sk_omem_alloc);
		return true;
924
	}
925
	return false;
926 927
}

928
static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
929 930
{
	struct sock_filter *old_prog;
931
	struct bpf_prog *old_fp;
932
	int err, new_len, old_len = fp->len;
933 934 935 936 937 938 939

	/* We are free to overwrite insns et al right here as it
	 * won't be used at this point in time anymore internally
	 * after the migration to the internal BPF instruction
	 * representation.
	 */
	BUILD_BUG_ON(sizeof(struct sock_filter) !=
940
		     sizeof(struct bpf_insn));
941 942 943 944 945 946

	/* Conversion cannot happen on overlapping memory areas,
	 * so we need to keep the user BPF around until the 2nd
	 * pass. At this time, the user BPF is stored in fp->insns.
	 */
	old_prog = kmemdup(fp->insns, old_len * sizeof(struct sock_filter),
947
			   GFP_KERNEL | __GFP_NOWARN);
948 949 950 951 952 953
	if (!old_prog) {
		err = -ENOMEM;
		goto out_err;
	}

	/* 1st pass: calculate the new program length. */
954
	err = bpf_convert_filter(old_prog, old_len, NULL, &new_len);
955 956 957 958 959
	if (err)
		goto out_err_free;

	/* Expand fp for appending the new filter representation. */
	old_fp = fp;
960
	fp = bpf_prog_realloc(old_fp, bpf_prog_size(new_len), 0);
961 962 963 964 965 966 967 968 969 970 971
	if (!fp) {
		/* The old_fp is still around in case we couldn't
		 * allocate new memory, so uncharge on that one.
		 */
		fp = old_fp;
		err = -ENOMEM;
		goto out_err_free;
	}

	fp->len = new_len;

972
	/* 2nd pass: remap sock_filter insns into bpf_insn insns. */
973
	err = bpf_convert_filter(old_prog, old_len, fp->insnsi, &new_len);
974
	if (err)
975
		/* 2nd bpf_convert_filter() can fail only if it fails
976 977
		 * to allocate memory, remapping must succeed. Note,
		 * that at this time old_fp has already been released
978
		 * by krealloc().
979 980 981
		 */
		goto out_err_free;

982
	bpf_prog_select_runtime(fp);
983

984 985 986 987 988 989
	kfree(old_prog);
	return fp;

out_err_free:
	kfree(old_prog);
out_err:
990
	__bpf_prog_release(fp);
991 992 993
	return ERR_PTR(err);
}

994 995
static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp,
					   bpf_aux_classic_check_t trans)
996 997 998
{
	int err;

999
	fp->bpf_func = NULL;
1000
	fp->jited = 0;
1001

1002
	err = bpf_check_classic(fp->insns, fp->len);
1003
	if (err) {
1004
		__bpf_prog_release(fp);
1005
		return ERR_PTR(err);
1006
	}
1007

1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018
	/* There might be additional checks and transformations
	 * needed on classic filters, f.e. in case of seccomp.
	 */
	if (trans) {
		err = trans(fp->insns, fp->len);
		if (err) {
			__bpf_prog_release(fp);
			return ERR_PTR(err);
		}
	}

1019 1020 1021
	/* Probe if we can JIT compile the filter and if so, do
	 * the compilation of the filter.
	 */
1022
	bpf_jit_compile(fp);
1023 1024 1025 1026

	/* JIT compiler couldn't process this filter, so do the
	 * internal BPF translation for the optimized interpreter.
	 */
1027
	if (!fp->jited)
1028
		fp = bpf_migrate_filter(fp);
1029 1030

	return fp;
1031 1032 1033
}

/**
1034
 *	bpf_prog_create - create an unattached filter
R
Randy Dunlap 已提交
1035
 *	@pfp: the unattached filter that is created
1036
 *	@fprog: the filter program
1037
 *
R
Randy Dunlap 已提交
1038
 * Create a filter independent of any socket. We first run some
1039 1040 1041 1042
 * sanity checks on it to make sure it does not explode on us later.
 * If an error occurs or there is insufficient memory for the filter
 * a negative errno code is returned. On success the return is zero.
 */
1043
int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
1044
{
1045
	unsigned int fsize = bpf_classic_proglen(fprog);
1046
	struct bpf_prog *fp;
1047 1048 1049 1050 1051

	/* Make sure new filter is there and in the right amounts. */
	if (fprog->filter == NULL)
		return -EINVAL;

1052
	fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
1053 1054
	if (!fp)
		return -ENOMEM;
1055

1056 1057 1058
	memcpy(fp->insns, fprog->filter, fsize);

	fp->len = fprog->len;
1059 1060 1061 1062 1063
	/* Since unattached filters are not copied back to user
	 * space through sk_get_filter(), we do not need to hold
	 * a copy here, and can spare us the work.
	 */
	fp->orig_prog = NULL;
1064

1065
	/* bpf_prepare_filter() already takes care of freeing
1066 1067
	 * memory in case something goes wrong.
	 */
1068
	fp = bpf_prepare_filter(fp, NULL);
1069 1070
	if (IS_ERR(fp))
		return PTR_ERR(fp);
1071 1072 1073 1074

	*pfp = fp;
	return 0;
}
1075
EXPORT_SYMBOL_GPL(bpf_prog_create);
1076

1077 1078 1079 1080 1081
/**
 *	bpf_prog_create_from_user - create an unattached filter from user buffer
 *	@pfp: the unattached filter that is created
 *	@fprog: the filter program
 *	@trans: post-classic verifier transformation handler
1082
 *	@save_orig: save classic BPF program
1083 1084 1085 1086 1087 1088
 *
 * This function effectively does the same as bpf_prog_create(), only
 * that it builds up its insns buffer from user space provided buffer.
 * It also allows for passing a bpf_aux_classic_check_t handler.
 */
int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
1089
			      bpf_aux_classic_check_t trans, bool save_orig)
1090 1091 1092
{
	unsigned int fsize = bpf_classic_proglen(fprog);
	struct bpf_prog *fp;
1093
	int err;
1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110

	/* Make sure new filter is there and in the right amounts. */
	if (fprog->filter == NULL)
		return -EINVAL;

	fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
	if (!fp)
		return -ENOMEM;

	if (copy_from_user(fp->insns, fprog->filter, fsize)) {
		__bpf_prog_free(fp);
		return -EFAULT;
	}

	fp->len = fprog->len;
	fp->orig_prog = NULL;

1111 1112 1113 1114 1115 1116 1117 1118
	if (save_orig) {
		err = bpf_prog_store_orig_filter(fp, fprog);
		if (err) {
			__bpf_prog_free(fp);
			return -ENOMEM;
		}
	}

1119 1120 1121 1122 1123 1124 1125 1126 1127 1128
	/* bpf_prepare_filter() already takes care of freeing
	 * memory in case something goes wrong.
	 */
	fp = bpf_prepare_filter(fp, trans);
	if (IS_ERR(fp))
		return PTR_ERR(fp);

	*pfp = fp;
	return 0;
}
1129
EXPORT_SYMBOL_GPL(bpf_prog_create_from_user);
1130

1131
void bpf_prog_destroy(struct bpf_prog *fp)
1132
{
1133
	__bpf_prog_release(fp);
1134
}
1135
EXPORT_SYMBOL_GPL(bpf_prog_destroy);
1136

1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162
static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk)
{
	struct sk_filter *fp, *old_fp;

	fp = kmalloc(sizeof(*fp), GFP_KERNEL);
	if (!fp)
		return -ENOMEM;

	fp->prog = prog;
	atomic_set(&fp->refcnt, 0);

	if (!sk_filter_charge(sk, fp)) {
		kfree(fp);
		return -ENOMEM;
	}

	old_fp = rcu_dereference_protected(sk->sk_filter,
					   sock_owned_by_user(sk));
	rcu_assign_pointer(sk->sk_filter, fp);

	if (old_fp)
		sk_filter_uncharge(sk, old_fp);

	return 0;
}

L
Linus Torvalds 已提交
1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174
/**
 *	sk_attach_filter - attach a socket filter
 *	@fprog: the filter program
 *	@sk: the socket to use
 *
 * Attach the user's filter code. We first run some sanity checks on
 * it to make sure it does not explode on us later. If an error
 * occurs or there is insufficient memory for the filter a negative
 * errno code is returned. On success the return is zero.
 */
int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
{
1175
	unsigned int fsize = bpf_classic_proglen(fprog);
1176 1177
	unsigned int bpf_fsize = bpf_prog_size(fprog->len);
	struct bpf_prog *prog;
L
Linus Torvalds 已提交
1178 1179
	int err;

1180 1181 1182
	if (sock_flag(sk, SOCK_FILTER_LOCKED))
		return -EPERM;

L
Linus Torvalds 已提交
1183
	/* Make sure new filter is there and in the right amounts. */
1184 1185
	if (fprog->filter == NULL)
		return -EINVAL;
L
Linus Torvalds 已提交
1186

1187
	prog = bpf_prog_alloc(bpf_fsize, 0);
1188
	if (!prog)
L
Linus Torvalds 已提交
1189
		return -ENOMEM;
1190

1191
	if (copy_from_user(prog->insns, fprog->filter, fsize)) {
1192
		__bpf_prog_free(prog);
L
Linus Torvalds 已提交
1193 1194 1195
		return -EFAULT;
	}

1196
	prog->len = fprog->len;
L
Linus Torvalds 已提交
1197

1198
	err = bpf_prog_store_orig_filter(prog, fprog);
1199
	if (err) {
1200
		__bpf_prog_free(prog);
1201 1202 1203
		return -ENOMEM;
	}

1204
	/* bpf_prepare_filter() already takes care of freeing
1205 1206
	 * memory in case something goes wrong.
	 */
1207
	prog = bpf_prepare_filter(prog, NULL);
1208 1209 1210
	if (IS_ERR(prog))
		return PTR_ERR(prog);

1211 1212
	err = __sk_attach_prog(prog, sk);
	if (err < 0) {
1213
		__bpf_prog_release(prog);
1214
		return err;
1215 1216
	}

1217
	return 0;
L
Linus Torvalds 已提交
1218
}
1219
EXPORT_SYMBOL_GPL(sk_attach_filter);
L
Linus Torvalds 已提交
1220

1221 1222 1223
int sk_attach_bpf(u32 ufd, struct sock *sk)
{
	struct bpf_prog *prog;
1224
	int err;
1225 1226 1227 1228 1229

	if (sock_flag(sk, SOCK_FILTER_LOCKED))
		return -EPERM;

	prog = bpf_prog_get(ufd);
1230 1231
	if (IS_ERR(prog))
		return PTR_ERR(prog);
1232

1233
	if (prog->type != BPF_PROG_TYPE_SOCKET_FILTER) {
1234 1235 1236 1237
		bpf_prog_put(prog);
		return -EINVAL;
	}

1238 1239
	err = __sk_attach_prog(prog, sk);
	if (err < 0) {
1240
		bpf_prog_put(prog);
1241
		return err;
1242 1243 1244 1245 1246
	}

	return 0;
}

1247 1248 1249
#define BPF_RECOMPUTE_CSUM(flags)	((flags) & 1)

static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
1250 1251
{
	struct sk_buff *skb = (struct sk_buff *) (long) r1;
1252
	int offset = (int) r2;
1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265
	void *from = (void *) (long) r3;
	unsigned int len = (unsigned int) r4;
	char buf[16];
	void *ptr;

	/* bpf verifier guarantees that:
	 * 'from' pointer points to bpf program stack
	 * 'len' bytes of it were initialized
	 * 'len' > 0
	 * 'skb' is a valid pointer to 'struct sk_buff'
	 *
	 * so check for invalid 'offset' and too large 'len'
	 */
1266
	if (unlikely((u32) offset > 0xffff || len > sizeof(buf)))
1267 1268
		return -EFAULT;

1269
	if (unlikely(skb_cloned(skb) &&
1270
		     !skb_clone_writable(skb, offset + len)))
1271 1272 1273 1274 1275 1276
		return -EFAULT;

	ptr = skb_header_pointer(skb, offset, len, buf);
	if (unlikely(!ptr))
		return -EFAULT;

1277 1278
	if (BPF_RECOMPUTE_CSUM(flags))
		skb_postpull_rcsum(skb, ptr, len);
1279 1280 1281 1282 1283 1284 1285

	memcpy(ptr, from, len);

	if (ptr == buf)
		/* skb_store_bits cannot return -EFAULT here */
		skb_store_bits(skb, offset, ptr, len);

1286
	if (BPF_RECOMPUTE_CSUM(flags) && skb->ip_summed == CHECKSUM_COMPLETE)
1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298
		skb->csum = csum_add(skb->csum, csum_partial(ptr, len, 0));
	return 0;
}

const struct bpf_func_proto bpf_skb_store_bytes_proto = {
	.func		= bpf_skb_store_bytes,
	.gpl_only	= false,
	.ret_type	= RET_INTEGER,
	.arg1_type	= ARG_PTR_TO_CTX,
	.arg2_type	= ARG_ANYTHING,
	.arg3_type	= ARG_PTR_TO_STACK,
	.arg4_type	= ARG_CONST_STACK_SIZE,
1299 1300 1301 1302 1303 1304
	.arg5_type	= ARG_ANYTHING,
};

#define BPF_HEADER_FIELD_SIZE(flags)	((flags) & 0x0f)
#define BPF_IS_PSEUDO_HEADER(flags)	((flags) & 0x10)

1305
static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
1306 1307
{
	struct sk_buff *skb = (struct sk_buff *) (long) r1;
1308
	int offset = (int) r2;
1309 1310
	__sum16 sum, *ptr;

1311
	if (unlikely((u32) offset > 0xffff))
1312 1313
		return -EFAULT;

1314
	if (unlikely(skb_cloned(skb) &&
1315
		     !skb_clone_writable(skb, offset + sizeof(sum))))
1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350
		return -EFAULT;

	ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
	if (unlikely(!ptr))
		return -EFAULT;

	switch (BPF_HEADER_FIELD_SIZE(flags)) {
	case 2:
		csum_replace2(ptr, from, to);
		break;
	case 4:
		csum_replace4(ptr, from, to);
		break;
	default:
		return -EINVAL;
	}

	if (ptr == &sum)
		/* skb_store_bits guaranteed to not return -EFAULT here */
		skb_store_bits(skb, offset, ptr, sizeof(sum));

	return 0;
}

const struct bpf_func_proto bpf_l3_csum_replace_proto = {
	.func		= bpf_l3_csum_replace,
	.gpl_only	= false,
	.ret_type	= RET_INTEGER,
	.arg1_type	= ARG_PTR_TO_CTX,
	.arg2_type	= ARG_ANYTHING,
	.arg3_type	= ARG_ANYTHING,
	.arg4_type	= ARG_ANYTHING,
	.arg5_type	= ARG_ANYTHING,
};

1351
static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
1352 1353
{
	struct sk_buff *skb = (struct sk_buff *) (long) r1;
1354
	bool is_pseudo = !!BPF_IS_PSEUDO_HEADER(flags);
1355
	int offset = (int) r2;
1356 1357
	__sum16 sum, *ptr;

1358
	if (unlikely((u32) offset > 0xffff))
1359 1360
		return -EFAULT;

1361
	if (unlikely(skb_cloned(skb) &&
1362
		     !skb_clone_writable(skb, offset + sizeof(sum))))
1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395
		return -EFAULT;

	ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
	if (unlikely(!ptr))
		return -EFAULT;

	switch (BPF_HEADER_FIELD_SIZE(flags)) {
	case 2:
		inet_proto_csum_replace2(ptr, skb, from, to, is_pseudo);
		break;
	case 4:
		inet_proto_csum_replace4(ptr, skb, from, to, is_pseudo);
		break;
	default:
		return -EINVAL;
	}

	if (ptr == &sum)
		/* skb_store_bits guaranteed to not return -EFAULT here */
		skb_store_bits(skb, offset, ptr, sizeof(sum));

	return 0;
}

const struct bpf_func_proto bpf_l4_csum_replace_proto = {
	.func		= bpf_l4_csum_replace,
	.gpl_only	= false,
	.ret_type	= RET_INTEGER,
	.arg1_type	= ARG_PTR_TO_CTX,
	.arg2_type	= ARG_ANYTHING,
	.arg3_type	= ARG_ANYTHING,
	.arg4_type	= ARG_ANYTHING,
	.arg5_type	= ARG_ANYTHING,
1396 1397
};

1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416
#define BPF_IS_REDIRECT_INGRESS(flags)	((flags) & 1)

static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5)
{
	struct sk_buff *skb = (struct sk_buff *) (long) r1, *skb2;
	struct net_device *dev;

	dev = dev_get_by_index_rcu(dev_net(skb->dev), ifindex);
	if (unlikely(!dev))
		return -EINVAL;

	skb2 = skb_clone(skb, GFP_ATOMIC);
	if (unlikely(!skb2))
		return -ENOMEM;

	if (BPF_IS_REDIRECT_INGRESS(flags))
		return dev_forward_skb(dev, skb2);

	skb2->dev = dev;
1417
	skb_sender_cpu_clear(skb2);
1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429
	return dev_queue_xmit(skb2);
}

const struct bpf_func_proto bpf_clone_redirect_proto = {
	.func           = bpf_clone_redirect,
	.gpl_only       = false,
	.ret_type       = RET_INTEGER,
	.arg1_type      = ARG_PTR_TO_CTX,
	.arg2_type      = ARG_ANYTHING,
	.arg3_type      = ARG_ANYTHING,
};

1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460
struct redirect_info {
	u32 ifindex;
	u32 flags;
};

static DEFINE_PER_CPU(struct redirect_info, redirect_info);
static u64 bpf_redirect(u64 ifindex, u64 flags, u64 r3, u64 r4, u64 r5)
{
	struct redirect_info *ri = this_cpu_ptr(&redirect_info);

	ri->ifindex = ifindex;
	ri->flags = flags;
	return TC_ACT_REDIRECT;
}

int skb_do_redirect(struct sk_buff *skb)
{
	struct redirect_info *ri = this_cpu_ptr(&redirect_info);
	struct net_device *dev;

	dev = dev_get_by_index_rcu(dev_net(skb->dev), ri->ifindex);
	ri->ifindex = 0;
	if (unlikely(!dev)) {
		kfree_skb(skb);
		return -EINVAL;
	}

	if (BPF_IS_REDIRECT_INGRESS(ri->flags))
		return dev_forward_skb(dev, skb);

	skb->dev = dev;
1461
	skb_sender_cpu_clear(skb);
1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472
	return dev_queue_xmit(skb);
}

const struct bpf_func_proto bpf_redirect_proto = {
	.func           = bpf_redirect,
	.gpl_only       = false,
	.ret_type       = RET_INTEGER,
	.arg1_type      = ARG_ANYTHING,
	.arg2_type      = ARG_ANYTHING,
};

1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484
static u64 bpf_get_cgroup_classid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
{
	return task_get_classid((struct sk_buff *) (unsigned long) r1);
}

static const struct bpf_func_proto bpf_get_cgroup_classid_proto = {
	.func           = bpf_get_cgroup_classid,
	.gpl_only       = false,
	.ret_type       = RET_INTEGER,
	.arg1_type      = ARG_PTR_TO_CTX,
};

1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503
static u64 bpf_get_route_realm(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
{
#ifdef CONFIG_IP_ROUTE_CLASSID
	const struct dst_entry *dst;

	dst = skb_dst((struct sk_buff *) (unsigned long) r1);
	if (dst)
		return dst->tclassid;
#endif
	return 0;
}

static const struct bpf_func_proto bpf_get_route_realm_proto = {
	.func           = bpf_get_route_realm,
	.gpl_only       = false,
	.ret_type       = RET_INTEGER,
	.arg1_type      = ARG_PTR_TO_CTX,
};

1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523
static u64 bpf_skb_vlan_push(u64 r1, u64 r2, u64 vlan_tci, u64 r4, u64 r5)
{
	struct sk_buff *skb = (struct sk_buff *) (long) r1;
	__be16 vlan_proto = (__force __be16) r2;

	if (unlikely(vlan_proto != htons(ETH_P_8021Q) &&
		     vlan_proto != htons(ETH_P_8021AD)))
		vlan_proto = htons(ETH_P_8021Q);

	return skb_vlan_push(skb, vlan_proto, vlan_tci);
}

const struct bpf_func_proto bpf_skb_vlan_push_proto = {
	.func           = bpf_skb_vlan_push,
	.gpl_only       = false,
	.ret_type       = RET_INTEGER,
	.arg1_type      = ARG_PTR_TO_CTX,
	.arg2_type      = ARG_ANYTHING,
	.arg3_type      = ARG_ANYTHING,
};
1524
EXPORT_SYMBOL_GPL(bpf_skb_vlan_push_proto);
1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538

static u64 bpf_skb_vlan_pop(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
{
	struct sk_buff *skb = (struct sk_buff *) (long) r1;

	return skb_vlan_pop(skb);
}

const struct bpf_func_proto bpf_skb_vlan_pop_proto = {
	.func           = bpf_skb_vlan_pop,
	.gpl_only       = false,
	.ret_type       = RET_INTEGER,
	.arg1_type      = ARG_PTR_TO_CTX,
};
1539
EXPORT_SYMBOL_GPL(bpf_skb_vlan_pop_proto);
1540 1541 1542 1543 1544 1545 1546 1547 1548 1549

bool bpf_helper_changes_skb_data(void *func)
{
	if (func == bpf_skb_vlan_push)
		return true;
	if (func == bpf_skb_vlan_pop)
		return true;
	return false;
}

1550 1551 1552 1553
static u64 bpf_skb_get_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
{
	struct sk_buff *skb = (struct sk_buff *) (long) r1;
	struct bpf_tunnel_key *to = (struct bpf_tunnel_key *) (long) r2;
1554
	struct ip_tunnel_info *info = skb_tunnel_info(skb);
1555 1556 1557

	if (unlikely(size != sizeof(struct bpf_tunnel_key) || flags || !info))
		return -EINVAL;
1558 1559
	if (ip_tunnel_info_af(info) != AF_INET)
		return -EINVAL;
1560 1561

	to->tunnel_id = be64_to_cpu(info->key.tun_id);
1562
	to->remote_ipv4 = be32_to_cpu(info->key.u.ipv4.src);
1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594

	return 0;
}

const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = {
	.func		= bpf_skb_get_tunnel_key,
	.gpl_only	= false,
	.ret_type	= RET_INTEGER,
	.arg1_type	= ARG_PTR_TO_CTX,
	.arg2_type	= ARG_PTR_TO_STACK,
	.arg3_type	= ARG_CONST_STACK_SIZE,
	.arg4_type	= ARG_ANYTHING,
};

static struct metadata_dst __percpu *md_dst;

static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
{
	struct sk_buff *skb = (struct sk_buff *) (long) r1;
	struct bpf_tunnel_key *from = (struct bpf_tunnel_key *) (long) r2;
	struct metadata_dst *md = this_cpu_ptr(md_dst);
	struct ip_tunnel_info *info;

	if (unlikely(size != sizeof(struct bpf_tunnel_key) || flags))
		return -EINVAL;

	skb_dst_drop(skb);
	dst_hold((struct dst_entry *) md);
	skb_dst_set(skb, (struct dst_entry *) md);

	info = &md->u.tun_info;
	info->mode = IP_TUNNEL_INFO_TX;
1595
	info->key.tun_flags = TUNNEL_KEY;
1596
	info->key.tun_id = cpu_to_be64(from->tunnel_id);
1597
	info->key.u.ipv4.dst = cpu_to_be32(from->remote_ipv4);
1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624

	return 0;
}

const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = {
	.func		= bpf_skb_set_tunnel_key,
	.gpl_only	= false,
	.ret_type	= RET_INTEGER,
	.arg1_type	= ARG_PTR_TO_CTX,
	.arg2_type	= ARG_PTR_TO_STACK,
	.arg3_type	= ARG_CONST_STACK_SIZE,
	.arg4_type	= ARG_ANYTHING,
};

static const struct bpf_func_proto *bpf_get_skb_set_tunnel_key_proto(void)
{
	if (!md_dst) {
		/* race is not possible, since it's called from
		 * verifier that is holding verifier mutex
		 */
		md_dst = metadata_dst_alloc_percpu(0, GFP_KERNEL);
		if (!md_dst)
			return NULL;
	}
	return &bpf_skb_set_tunnel_key_proto;
}

1625 1626
static const struct bpf_func_proto *
sk_filter_func_proto(enum bpf_func_id func_id)
1627 1628 1629 1630 1631 1632 1633 1634
{
	switch (func_id) {
	case BPF_FUNC_map_lookup_elem:
		return &bpf_map_lookup_elem_proto;
	case BPF_FUNC_map_update_elem:
		return &bpf_map_update_elem_proto;
	case BPF_FUNC_map_delete_elem:
		return &bpf_map_delete_elem_proto;
1635 1636
	case BPF_FUNC_get_prandom_u32:
		return &bpf_get_prandom_u32_proto;
1637 1638
	case BPF_FUNC_get_smp_processor_id:
		return &bpf_get_smp_processor_id_proto;
1639 1640
	case BPF_FUNC_tail_call:
		return &bpf_tail_call_proto;
1641 1642
	case BPF_FUNC_ktime_get_ns:
		return &bpf_ktime_get_ns_proto;
1643
	case BPF_FUNC_trace_printk:
1644 1645
		if (capable(CAP_SYS_ADMIN))
			return bpf_get_trace_printk_proto();
1646 1647 1648 1649 1650
	default:
		return NULL;
	}
}

1651 1652 1653 1654 1655 1656
static const struct bpf_func_proto *
tc_cls_act_func_proto(enum bpf_func_id func_id)
{
	switch (func_id) {
	case BPF_FUNC_skb_store_bytes:
		return &bpf_skb_store_bytes_proto;
1657 1658 1659 1660
	case BPF_FUNC_l3_csum_replace:
		return &bpf_l3_csum_replace_proto;
	case BPF_FUNC_l4_csum_replace:
		return &bpf_l4_csum_replace_proto;
1661 1662
	case BPF_FUNC_clone_redirect:
		return &bpf_clone_redirect_proto;
1663 1664
	case BPF_FUNC_get_cgroup_classid:
		return &bpf_get_cgroup_classid_proto;
1665 1666 1667 1668
	case BPF_FUNC_skb_vlan_push:
		return &bpf_skb_vlan_push_proto;
	case BPF_FUNC_skb_vlan_pop:
		return &bpf_skb_vlan_pop_proto;
1669 1670 1671 1672
	case BPF_FUNC_skb_get_tunnel_key:
		return &bpf_skb_get_tunnel_key_proto;
	case BPF_FUNC_skb_set_tunnel_key:
		return bpf_get_skb_set_tunnel_key_proto();
1673 1674
	case BPF_FUNC_redirect:
		return &bpf_redirect_proto;
1675 1676
	case BPF_FUNC_get_route_realm:
		return &bpf_get_route_realm_proto;
1677 1678 1679 1680 1681
	default:
		return sk_filter_func_proto(func_id);
	}
}

1682
static bool __is_valid_access(int off, int size, enum bpf_access_type type)
1683
{
1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698
	/* check bounds */
	if (off < 0 || off >= sizeof(struct __sk_buff))
		return false;

	/* disallow misaligned access */
	if (off % size != 0)
		return false;

	/* all __sk_buff fields are __u32 */
	if (size != 4)
		return false;

	return true;
}

1699 1700 1701
static bool sk_filter_is_valid_access(int off, int size,
				      enum bpf_access_type type)
{
1702 1703 1704
	if (off == offsetof(struct __sk_buff, tc_classid))
		return false;

1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720
	if (type == BPF_WRITE) {
		switch (off) {
		case offsetof(struct __sk_buff, cb[0]) ...
			offsetof(struct __sk_buff, cb[4]):
			break;
		default:
			return false;
		}
	}

	return __is_valid_access(off, size, type);
}

static bool tc_cls_act_is_valid_access(int off, int size,
				       enum bpf_access_type type)
{
1721 1722 1723
	if (off == offsetof(struct __sk_buff, tc_classid))
		return type == BPF_WRITE ? true : false;

1724 1725 1726 1727
	if (type == BPF_WRITE) {
		switch (off) {
		case offsetof(struct __sk_buff, mark):
		case offsetof(struct __sk_buff, tc_index):
1728
		case offsetof(struct __sk_buff, priority):
1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740
		case offsetof(struct __sk_buff, cb[0]) ...
			offsetof(struct __sk_buff, cb[4]):
			break;
		default:
			return false;
		}
	}
	return __is_valid_access(off, size, type);
}

static u32 bpf_net_convert_ctx_access(enum bpf_access_type type, int dst_reg,
				      int src_reg, int ctx_off,
1741 1742
				      struct bpf_insn *insn_buf,
				      struct bpf_prog *prog)
1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753
{
	struct bpf_insn *insn = insn_buf;

	switch (ctx_off) {
	case offsetof(struct __sk_buff, len):
		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);

		*insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
				      offsetof(struct sk_buff, len));
		break;

1754 1755 1756 1757 1758 1759 1760
	case offsetof(struct __sk_buff, protocol):
		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);

		*insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
				      offsetof(struct sk_buff, protocol));
		break;

1761 1762 1763 1764 1765 1766 1767
	case offsetof(struct __sk_buff, vlan_proto):
		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_proto) != 2);

		*insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
				      offsetof(struct sk_buff, vlan_proto));
		break;

1768 1769 1770
	case offsetof(struct __sk_buff, priority):
		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, priority) != 4);

1771 1772 1773 1774 1775 1776
		if (type == BPF_WRITE)
			*insn++ = BPF_STX_MEM(BPF_W, dst_reg, src_reg,
					      offsetof(struct sk_buff, priority));
		else
			*insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
					      offsetof(struct sk_buff, priority));
1777 1778
		break;

1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796
	case offsetof(struct __sk_buff, ingress_ifindex):
		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, skb_iif) != 4);

		*insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
				      offsetof(struct sk_buff, skb_iif));
		break;

	case offsetof(struct __sk_buff, ifindex):
		BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);

		*insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)),
				      dst_reg, src_reg,
				      offsetof(struct sk_buff, dev));
		*insn++ = BPF_JMP_IMM(BPF_JEQ, dst_reg, 0, 1);
		*insn++ = BPF_LDX_MEM(BPF_W, dst_reg, dst_reg,
				      offsetof(struct net_device, ifindex));
		break;

1797 1798 1799 1800 1801 1802 1803
	case offsetof(struct __sk_buff, hash):
		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);

		*insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
				      offsetof(struct sk_buff, hash));
		break;

1804
	case offsetof(struct __sk_buff, mark):
1805 1806 1807 1808 1809 1810 1811 1812 1813
		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);

		if (type == BPF_WRITE)
			*insn++ = BPF_STX_MEM(BPF_W, dst_reg, src_reg,
					      offsetof(struct sk_buff, mark));
		else
			*insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
					      offsetof(struct sk_buff, mark));
		break;
1814 1815 1816 1817 1818 1819

	case offsetof(struct __sk_buff, pkt_type):
		return convert_skb_access(SKF_AD_PKTTYPE, dst_reg, src_reg, insn);

	case offsetof(struct __sk_buff, queue_mapping):
		return convert_skb_access(SKF_AD_QUEUE, dst_reg, src_reg, insn);
1820 1821 1822 1823 1824 1825 1826 1827

	case offsetof(struct __sk_buff, vlan_present):
		return convert_skb_access(SKF_AD_VLAN_TAG_PRESENT,
					  dst_reg, src_reg, insn);

	case offsetof(struct __sk_buff, vlan_tci):
		return convert_skb_access(SKF_AD_VLAN_TAG,
					  dst_reg, src_reg, insn);
1828 1829 1830 1831 1832

	case offsetof(struct __sk_buff, cb[0]) ...
		offsetof(struct __sk_buff, cb[4]):
		BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, data) < 20);

1833
		prog->cb_access = 1;
1834 1835 1836 1837 1838 1839 1840 1841 1842
		ctx_off -= offsetof(struct __sk_buff, cb[0]);
		ctx_off += offsetof(struct sk_buff, cb);
		ctx_off += offsetof(struct qdisc_skb_cb, data);
		if (type == BPF_WRITE)
			*insn++ = BPF_STX_MEM(BPF_W, dst_reg, src_reg, ctx_off);
		else
			*insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, ctx_off);
		break;

1843 1844 1845 1846 1847 1848 1849 1850
	case offsetof(struct __sk_buff, tc_classid):
		ctx_off -= offsetof(struct __sk_buff, tc_classid);
		ctx_off += offsetof(struct sk_buff, cb);
		ctx_off += offsetof(struct qdisc_skb_cb, tc_classid);
		WARN_ON(type != BPF_WRITE);
		*insn++ = BPF_STX_MEM(BPF_H, dst_reg, src_reg, ctx_off);
		break;

1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868
	case offsetof(struct __sk_buff, tc_index):
#ifdef CONFIG_NET_SCHED
		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, tc_index) != 2);

		if (type == BPF_WRITE)
			*insn++ = BPF_STX_MEM(BPF_H, dst_reg, src_reg,
					      offsetof(struct sk_buff, tc_index));
		else
			*insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
					      offsetof(struct sk_buff, tc_index));
		break;
#else
		if (type == BPF_WRITE)
			*insn++ = BPF_MOV64_REG(dst_reg, dst_reg);
		else
			*insn++ = BPF_MOV64_IMM(dst_reg, 0);
		break;
#endif
1869 1870 1871
	}

	return insn - insn_buf;
1872 1873
}

1874 1875 1876
static const struct bpf_verifier_ops sk_filter_ops = {
	.get_func_proto = sk_filter_func_proto,
	.is_valid_access = sk_filter_is_valid_access,
1877
	.convert_ctx_access = bpf_net_convert_ctx_access,
1878 1879
};

1880 1881
static const struct bpf_verifier_ops tc_cls_act_ops = {
	.get_func_proto = tc_cls_act_func_proto,
1882 1883
	.is_valid_access = tc_cls_act_is_valid_access,
	.convert_ctx_access = bpf_net_convert_ctx_access,
1884 1885
};

1886 1887
static struct bpf_prog_type_list sk_filter_type __read_mostly = {
	.ops = &sk_filter_ops,
1888 1889 1890
	.type = BPF_PROG_TYPE_SOCKET_FILTER,
};

1891
static struct bpf_prog_type_list sched_cls_type __read_mostly = {
1892
	.ops = &tc_cls_act_ops,
1893 1894 1895
	.type = BPF_PROG_TYPE_SCHED_CLS,
};

1896
static struct bpf_prog_type_list sched_act_type __read_mostly = {
1897
	.ops = &tc_cls_act_ops,
1898 1899 1900
	.type = BPF_PROG_TYPE_SCHED_ACT,
};

1901
static int __init register_sk_filter_ops(void)
1902
{
1903
	bpf_register_prog_type(&sk_filter_type);
1904
	bpf_register_prog_type(&sched_cls_type);
1905
	bpf_register_prog_type(&sched_act_type);
1906

1907 1908
	return 0;
}
1909 1910
late_initcall(register_sk_filter_ops);

1911 1912 1913 1914 1915
int sk_detach_filter(struct sock *sk)
{
	int ret = -ENOENT;
	struct sk_filter *filter;

1916 1917 1918
	if (sock_flag(sk, SOCK_FILTER_LOCKED))
		return -EPERM;

1919 1920
	filter = rcu_dereference_protected(sk->sk_filter,
					   sock_owned_by_user(sk));
1921
	if (filter) {
1922
		RCU_INIT_POINTER(sk->sk_filter, NULL);
E
Eric Dumazet 已提交
1923
		sk_filter_uncharge(sk, filter);
1924 1925
		ret = 0;
	}
1926

1927 1928
	return ret;
}
1929
EXPORT_SYMBOL_GPL(sk_detach_filter);
1930

1931 1932
int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
		  unsigned int len)
1933
{
1934
	struct sock_fprog_kern *fprog;
1935
	struct sk_filter *filter;
1936
	int ret = 0;
1937 1938 1939

	lock_sock(sk);
	filter = rcu_dereference_protected(sk->sk_filter,
1940
					   sock_owned_by_user(sk));
1941 1942
	if (!filter)
		goto out;
1943 1944

	/* We're copying the filter that has been originally attached,
1945 1946
	 * so no conversion/decode needed anymore. eBPF programs that
	 * have no original program cannot be dumped through this.
1947
	 */
1948
	ret = -EACCES;
1949
	fprog = filter->prog->orig_prog;
1950 1951
	if (!fprog)
		goto out;
1952 1953

	ret = fprog->len;
1954
	if (!len)
1955
		/* User space only enquires number of filter blocks. */
1956
		goto out;
1957

1958
	ret = -EINVAL;
1959
	if (len < fprog->len)
1960 1961 1962
		goto out;

	ret = -EFAULT;
1963
	if (copy_to_user(ubuf, fprog->filter, bpf_classic_proglen(fprog)))
1964
		goto out;
1965

1966 1967 1968 1969
	/* Instead of bytes, the API requests to return the number
	 * of filter blocks.
	 */
	ret = fprog->len;
1970 1971 1972 1973
out:
	release_sock(sk);
	return ret;
}