filter.c 75.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
/*
 * Linux Socket Filter - Kernel level socket filtering
 *
4 5
 * Based on the design of the Berkeley Packet Filter. The new
 * internal format has been designed by PLUMgrid:
L
Linus Torvalds 已提交
6
 *
7 8 9 10 11 12 13
 *	Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
 *
 * Authors:
 *
 *	Jay Schulist <jschlst@samba.org>
 *	Alexei Starovoitov <ast@plumgrid.com>
 *	Daniel Borkmann <dborkman@redhat.com>
L
Linus Torvalds 已提交
14 15 16 17 18 19 20
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 *
 * Andi Kleen - Fix a few bad bugs and races.
21
 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
L
Linus Torvalds 已提交
22 23 24 25 26 27 28 29 30 31 32
 */

#include <linux/module.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/fcntl.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/if_packet.h>
33
#include <linux/gfp.h>
L
Linus Torvalds 已提交
34 35
#include <net/ip.h>
#include <net/protocol.h>
36
#include <net/netlink.h>
L
Linus Torvalds 已提交
37 38
#include <linux/skbuff.h>
#include <net/sock.h>
39
#include <net/flow_dissector.h>
L
Linus Torvalds 已提交
40 41 42
#include <linux/errno.h>
#include <linux/timer.h>
#include <asm/uaccess.h>
43
#include <asm/unaligned.h>
L
Linus Torvalds 已提交
44
#include <linux/filter.h>
45
#include <linux/ratelimit.h>
46
#include <linux/seccomp.h>
E
Eric Dumazet 已提交
47
#include <linux/if_vlan.h>
48
#include <linux/bpf.h>
49
#include <net/sch_generic.h>
50
#include <net/cls_cgroup.h>
51
#include <net/dst_metadata.h>
52
#include <net/dst.h>
53
#include <net/sock_reuseport.h>
L
Linus Torvalds 已提交
54

S
Stephen Hemminger 已提交
55
/**
56
 *	sk_filter_trim_cap - run a packet through a socket filter
S
Stephen Hemminger 已提交
57 58
 *	@sk: sock associated with &sk_buff
 *	@skb: buffer to filter
59
 *	@cap: limit on how short the eBPF program may trim the packet
S
Stephen Hemminger 已提交
60
 *
61 62
 * Run the eBPF program and then cut skb->data to correct size returned by
 * the program. If pkt_len is 0 we toss packet. If skb->len is smaller
S
Stephen Hemminger 已提交
63
 * than pkt_len we keep whole skb->data. This is the socket level
64
 * wrapper to BPF_PROG_RUN. It returns 0 if the packet should
S
Stephen Hemminger 已提交
65 66 67
 * be accepted or -EPERM if the packet should be tossed.
 *
 */
68
int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap)
S
Stephen Hemminger 已提交
69 70 71 72
{
	int err;
	struct sk_filter *filter;

73 74 75 76 77 78 79 80
	/*
	 * If the skb was allocated from pfmemalloc reserves, only
	 * allow SOCK_MEMALLOC sockets to use it as this socket is
	 * helping free memory
	 */
	if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
		return -ENOMEM;

S
Stephen Hemminger 已提交
81 82 83 84
	err = security_sock_rcv_skb(sk, skb);
	if (err)
		return err;

85 86
	rcu_read_lock();
	filter = rcu_dereference(sk->sk_filter);
S
Stephen Hemminger 已提交
87
	if (filter) {
88
		unsigned int pkt_len = bpf_prog_run_save_cb(filter->prog, skb);
89
		err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM;
S
Stephen Hemminger 已提交
90
	}
91
	rcu_read_unlock();
S
Stephen Hemminger 已提交
92 93 94

	return err;
}
95
EXPORT_SYMBOL(sk_filter_trim_cap);
S
Stephen Hemminger 已提交
96

97
static u64 __skb_get_pay_offset(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
98
{
99
	return skb_get_poff((struct sk_buff *)(unsigned long) ctx);
100 101
}

102
static u64 __skb_get_nlattr(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
103
{
104
	struct sk_buff *skb = (struct sk_buff *)(unsigned long) ctx;
105 106 107 108 109
	struct nlattr *nla;

	if (skb_is_nonlinear(skb))
		return 0;

110 111 112
	if (skb->len < sizeof(struct nlattr))
		return 0;

113
	if (a > skb->len - sizeof(struct nlattr))
114 115
		return 0;

116
	nla = nla_find((struct nlattr *) &skb->data[a], skb->len - a, x);
117 118 119 120 121 122
	if (nla)
		return (void *) nla - (void *) skb->data;

	return 0;
}

123
static u64 __skb_get_nlattr_nest(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
124
{
125
	struct sk_buff *skb = (struct sk_buff *)(unsigned long) ctx;
126 127 128 129 130
	struct nlattr *nla;

	if (skb_is_nonlinear(skb))
		return 0;

131 132 133
	if (skb->len < sizeof(struct nlattr))
		return 0;

134
	if (a > skb->len - sizeof(struct nlattr))
135 136
		return 0;

137 138
	nla = (struct nlattr *) &skb->data[a];
	if (nla->nla_len > skb->len - a)
139 140
		return 0;

141
	nla = nla_find_nested(nla, x);
142 143 144 145 146 147
	if (nla)
		return (void *) nla - (void *) skb->data;

	return 0;
}

148
static u64 __get_raw_cpu_id(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
149 150 151 152
{
	return raw_smp_processor_id();
}

153 154 155 156 157 158
static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = {
	.func		= __get_raw_cpu_id,
	.gpl_only	= false,
	.ret_type	= RET_INTEGER,
};

159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185
static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg,
			      struct bpf_insn *insn_buf)
{
	struct bpf_insn *insn = insn_buf;

	switch (skb_field) {
	case SKF_AD_MARK:
		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);

		*insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
				      offsetof(struct sk_buff, mark));
		break;

	case SKF_AD_PKTTYPE:
		*insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_TYPE_OFFSET());
		*insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, PKT_TYPE_MAX);
#ifdef __BIG_ENDIAN_BITFIELD
		*insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 5);
#endif
		break;

	case SKF_AD_QUEUE:
		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);

		*insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
				      offsetof(struct sk_buff, queue_mapping));
		break;
186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204

	case SKF_AD_VLAN_TAG:
	case SKF_AD_VLAN_TAG_PRESENT:
		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
		BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);

		/* dst_reg = *(u16 *) (src_reg + offsetof(vlan_tci)) */
		*insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
				      offsetof(struct sk_buff, vlan_tci));
		if (skb_field == SKF_AD_VLAN_TAG) {
			*insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg,
						~VLAN_TAG_PRESENT);
		} else {
			/* dst_reg >>= 12 */
			*insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 12);
			/* dst_reg &= 1 */
			*insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, 1);
		}
		break;
205 206 207 208 209
	}

	return insn - insn_buf;
}

210
static bool convert_bpf_extensions(struct sock_filter *fp,
211
				   struct bpf_insn **insnp)
212
{
213
	struct bpf_insn *insn = *insnp;
214
	u32 cnt;
215 216 217

	switch (fp->k) {
	case SKF_AD_OFF + SKF_AD_PROTOCOL:
218 219 220 221 222 223 224
		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);

		/* A = *(u16 *) (CTX + offsetof(protocol)) */
		*insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
				      offsetof(struct sk_buff, protocol));
		/* A = ntohs(A) [emitting a nop or swap16] */
		*insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
225 226 227
		break;

	case SKF_AD_OFF + SKF_AD_PKTTYPE:
228 229
		cnt = convert_skb_access(SKF_AD_PKTTYPE, BPF_REG_A, BPF_REG_CTX, insn);
		insn += cnt - 1;
230 231 232 233 234 235
		break;

	case SKF_AD_OFF + SKF_AD_IFINDEX:
	case SKF_AD_OFF + SKF_AD_HATYPE:
		BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
		BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
236 237 238 239 240 241 242 243 244 245 246 247 248 249
		BUILD_BUG_ON(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)) < 0);

		*insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)),
				      BPF_REG_TMP, BPF_REG_CTX,
				      offsetof(struct sk_buff, dev));
		/* if (tmp != 0) goto pc + 1 */
		*insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1);
		*insn++ = BPF_EXIT_INSN();
		if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX)
			*insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_TMP,
					    offsetof(struct net_device, ifindex));
		else
			*insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_TMP,
					    offsetof(struct net_device, type));
250 251 252
		break;

	case SKF_AD_OFF + SKF_AD_MARK:
253 254
		cnt = convert_skb_access(SKF_AD_MARK, BPF_REG_A, BPF_REG_CTX, insn);
		insn += cnt - 1;
255 256 257 258 259
		break;

	case SKF_AD_OFF + SKF_AD_RXHASH:
		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);

260 261
		*insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX,
				    offsetof(struct sk_buff, hash));
262 263 264
		break;

	case SKF_AD_OFF + SKF_AD_QUEUE:
265 266
		cnt = convert_skb_access(SKF_AD_QUEUE, BPF_REG_A, BPF_REG_CTX, insn);
		insn += cnt - 1;
267 268 269
		break;

	case SKF_AD_OFF + SKF_AD_VLAN_TAG:
270 271 272 273
		cnt = convert_skb_access(SKF_AD_VLAN_TAG,
					 BPF_REG_A, BPF_REG_CTX, insn);
		insn += cnt - 1;
		break;
274

275 276 277 278
	case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT:
		cnt = convert_skb_access(SKF_AD_VLAN_TAG_PRESENT,
					 BPF_REG_A, BPF_REG_CTX, insn);
		insn += cnt - 1;
279 280
		break;

281 282 283 284 285 286 287 288 289 290
	case SKF_AD_OFF + SKF_AD_VLAN_TPID:
		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_proto) != 2);

		/* A = *(u16 *) (CTX + offsetof(vlan_proto)) */
		*insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
				      offsetof(struct sk_buff, vlan_proto));
		/* A = ntohs(A) [emitting a nop or swap16] */
		*insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
		break;

291 292 293 294
	case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
	case SKF_AD_OFF + SKF_AD_NLATTR:
	case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
	case SKF_AD_OFF + SKF_AD_CPU:
C
Chema Gonzalez 已提交
295
	case SKF_AD_OFF + SKF_AD_RANDOM:
296
		/* arg1 = CTX */
297
		*insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX);
298
		/* arg2 = A */
299
		*insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_A);
300
		/* arg3 = X */
301
		*insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_X);
302
		/* Emit call(arg1=CTX, arg2=A, arg3=X) */
303 304
		switch (fp->k) {
		case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
305
			*insn = BPF_EMIT_CALL(__skb_get_pay_offset);
306 307
			break;
		case SKF_AD_OFF + SKF_AD_NLATTR:
308
			*insn = BPF_EMIT_CALL(__skb_get_nlattr);
309 310
			break;
		case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
311
			*insn = BPF_EMIT_CALL(__skb_get_nlattr_nest);
312 313
			break;
		case SKF_AD_OFF + SKF_AD_CPU:
314
			*insn = BPF_EMIT_CALL(__get_raw_cpu_id);
315
			break;
C
Chema Gonzalez 已提交
316
		case SKF_AD_OFF + SKF_AD_RANDOM:
317 318
			*insn = BPF_EMIT_CALL(bpf_user_rnd_u32);
			bpf_user_rnd_init_once();
C
Chema Gonzalez 已提交
319
			break;
320 321 322 323
		}
		break;

	case SKF_AD_OFF + SKF_AD_ALU_XOR_X:
324 325
		/* A ^= X */
		*insn = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_X);
326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341
		break;

	default:
		/* This is just a dummy call to avoid letting the compiler
		 * evict __bpf_call_base() as an optimization. Placed here
		 * where no-one bothers.
		 */
		BUG_ON(__bpf_call_base(0, 0, 0, 0, 0) != 0);
		return false;
	}

	*insnp = insn;
	return true;
}

/**
342
 *	bpf_convert_filter - convert filter program
343 344 345 346 347 348 349 350 351
 *	@prog: the user passed filter program
 *	@len: the length of the user passed filter program
 *	@new_prog: buffer where converted program will be stored
 *	@new_len: pointer to store length of converted program
 *
 * Remap 'sock_filter' style BPF instruction set to 'sock_filter_ext' style.
 * Conversion workflow:
 *
 * 1) First pass for calculating the new program length:
352
 *   bpf_convert_filter(old_prog, old_len, NULL, &new_len)
353 354 355
 *
 * 2) 2nd pass to remap in two passes: 1st pass finds new
 *    jump offsets, 2nd pass remapping:
356
 *   new_prog = kmalloc(sizeof(struct bpf_insn) * new_len);
357
 *   bpf_convert_filter(old_prog, old_len, new_prog, &new_len);
358
 */
359 360
static int bpf_convert_filter(struct sock_filter *prog, int len,
			      struct bpf_insn *new_prog, int *new_len)
361 362
{
	int new_flen = 0, pass = 0, target, i;
363
	struct bpf_insn *new_insn;
364 365 366 367 368
	struct sock_filter *fp;
	int *addrs = NULL;
	u8 bpf_src;

	BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK);
369
	BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
370

371
	if (len <= 0 || len > BPF_MAXINSNS)
372 373 374
		return -EINVAL;

	if (new_prog) {
375 376
		addrs = kcalloc(len, sizeof(*addrs),
				GFP_KERNEL | __GFP_NOWARN);
377 378 379 380 381 382 383 384
		if (!addrs)
			return -ENOMEM;
	}

do_pass:
	new_insn = new_prog;
	fp = prog;

385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400
	/* Classic BPF related prologue emission. */
	if (new_insn) {
		/* Classic BPF expects A and X to be reset first. These need
		 * to be guaranteed to be the first two instructions.
		 */
		*new_insn++ = BPF_ALU64_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
		*new_insn++ = BPF_ALU64_REG(BPF_XOR, BPF_REG_X, BPF_REG_X);

		/* All programs must keep CTX in callee saved BPF_REG_CTX.
		 * In eBPF case it's done by the compiler, here we need to
		 * do this ourself. Initial CTX is present in BPF_REG_ARG1.
		 */
		*new_insn++ = BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1);
	} else {
		new_insn += 3;
	}
401 402

	for (i = 0; i < len; fp++, i++) {
403 404
		struct bpf_insn tmp_insns[6] = { };
		struct bpf_insn *insn = tmp_insns;
405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446

		if (addrs)
			addrs[i] = new_insn - new_prog;

		switch (fp->code) {
		/* All arithmetic insns and skb loads map as-is. */
		case BPF_ALU | BPF_ADD | BPF_X:
		case BPF_ALU | BPF_ADD | BPF_K:
		case BPF_ALU | BPF_SUB | BPF_X:
		case BPF_ALU | BPF_SUB | BPF_K:
		case BPF_ALU | BPF_AND | BPF_X:
		case BPF_ALU | BPF_AND | BPF_K:
		case BPF_ALU | BPF_OR | BPF_X:
		case BPF_ALU | BPF_OR | BPF_K:
		case BPF_ALU | BPF_LSH | BPF_X:
		case BPF_ALU | BPF_LSH | BPF_K:
		case BPF_ALU | BPF_RSH | BPF_X:
		case BPF_ALU | BPF_RSH | BPF_K:
		case BPF_ALU | BPF_XOR | BPF_X:
		case BPF_ALU | BPF_XOR | BPF_K:
		case BPF_ALU | BPF_MUL | BPF_X:
		case BPF_ALU | BPF_MUL | BPF_K:
		case BPF_ALU | BPF_DIV | BPF_X:
		case BPF_ALU | BPF_DIV | BPF_K:
		case BPF_ALU | BPF_MOD | BPF_X:
		case BPF_ALU | BPF_MOD | BPF_K:
		case BPF_ALU | BPF_NEG:
		case BPF_LD | BPF_ABS | BPF_W:
		case BPF_LD | BPF_ABS | BPF_H:
		case BPF_LD | BPF_ABS | BPF_B:
		case BPF_LD | BPF_IND | BPF_W:
		case BPF_LD | BPF_IND | BPF_H:
		case BPF_LD | BPF_IND | BPF_B:
			/* Check for overloaded BPF extension and
			 * directly convert it if found, otherwise
			 * just move on with mapping.
			 */
			if (BPF_CLASS(fp->code) == BPF_LD &&
			    BPF_MODE(fp->code) == BPF_ABS &&
			    convert_bpf_extensions(fp, &insn))
				break;

447
			*insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k);
448 449
			break;

450 451 452 453 454 455 456
		/* Jump transformation cannot use BPF block macros
		 * everywhere as offset calculation and target updates
		 * require a bit more work than the rest, i.e. jump
		 * opcodes map as-is, but offsets need adjustment.
		 */

#define BPF_EMIT_JMP							\
457 458 459 460 461 462 463 464
	do {								\
		if (target >= len || target < 0)			\
			goto err;					\
		insn->off = addrs ? addrs[target] - addrs[i] - 1 : 0;	\
		/* Adjust pc relative offset for 2nd or 3rd insn. */	\
		insn->off -= insn - tmp_insns;				\
	} while (0)

465 466 467 468
		case BPF_JMP | BPF_JA:
			target = i + fp->k + 1;
			insn->code = fp->code;
			BPF_EMIT_JMP;
469 470 471 472 473 474 475 476 477 478 479 480 481 482 483
			break;

		case BPF_JMP | BPF_JEQ | BPF_K:
		case BPF_JMP | BPF_JEQ | BPF_X:
		case BPF_JMP | BPF_JSET | BPF_K:
		case BPF_JMP | BPF_JSET | BPF_X:
		case BPF_JMP | BPF_JGT | BPF_K:
		case BPF_JMP | BPF_JGT | BPF_X:
		case BPF_JMP | BPF_JGE | BPF_K:
		case BPF_JMP | BPF_JGE | BPF_X:
			if (BPF_SRC(fp->code) == BPF_K && (int) fp->k < 0) {
				/* BPF immediates are signed, zero extend
				 * immediate into tmp register and use it
				 * in compare insn.
				 */
484
				*insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k);
485

486 487
				insn->dst_reg = BPF_REG_A;
				insn->src_reg = BPF_REG_TMP;
488 489
				bpf_src = BPF_X;
			} else {
490
				insn->dst_reg = BPF_REG_A;
491 492
				insn->imm = fp->k;
				bpf_src = BPF_SRC(fp->code);
493
				insn->src_reg = bpf_src == BPF_X ? BPF_REG_X : 0;
L
Linus Torvalds 已提交
494
			}
495 496 497 498 499

			/* Common case where 'jump_false' is next insn. */
			if (fp->jf == 0) {
				insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
				target = i + fp->jt + 1;
500
				BPF_EMIT_JMP;
501
				break;
L
Linus Torvalds 已提交
502
			}
503 504 505 506 507

			/* Convert JEQ into JNE when 'jump_true' is next insn. */
			if (fp->jt == 0 && BPF_OP(fp->code) == BPF_JEQ) {
				insn->code = BPF_JMP | BPF_JNE | bpf_src;
				target = i + fp->jf + 1;
508
				BPF_EMIT_JMP;
509
				break;
510
			}
511 512 513 514

			/* Other jumps are mapped into two insns: Jxx and JA. */
			target = i + fp->jt + 1;
			insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
515
			BPF_EMIT_JMP;
516 517 518 519
			insn++;

			insn->code = BPF_JMP | BPF_JA;
			target = i + fp->jf + 1;
520
			BPF_EMIT_JMP;
521 522 523 524
			break;

		/* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */
		case BPF_LDX | BPF_MSH | BPF_B:
525
			/* tmp = A */
526
			*insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_A);
527
			/* A = BPF_R0 = *(u8 *) (skb->data + K) */
528
			*insn++ = BPF_LD_ABS(BPF_B, fp->k);
529
			/* A &= 0xf */
530
			*insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf);
531
			/* A <<= 2 */
532
			*insn++ = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2);
533
			/* X = A */
534
			*insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
535
			/* A = tmp */
536
			*insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP);
537 538
			break;

539 540 541
		/* RET_K is remaped into 2 insns. RET_A case doesn't need an
		 * extra mov as BPF_REG_0 is already mapped into BPF_REG_A.
		 */
542 543
		case BPF_RET | BPF_A:
		case BPF_RET | BPF_K:
544 545 546
			if (BPF_RVAL(fp->code) == BPF_K)
				*insn++ = BPF_MOV32_RAW(BPF_K, BPF_REG_0,
							0, fp->k);
547
			*insn = BPF_EXIT_INSN();
548 549 550 551 552
			break;

		/* Store to stack. */
		case BPF_ST:
		case BPF_STX:
553 554 555
			*insn = BPF_STX_MEM(BPF_W, BPF_REG_FP, BPF_CLASS(fp->code) ==
					    BPF_ST ? BPF_REG_A : BPF_REG_X,
					    -(BPF_MEMWORDS - fp->k) * 4);
556 557 558 559 560
			break;

		/* Load from stack. */
		case BPF_LD | BPF_MEM:
		case BPF_LDX | BPF_MEM:
561 562 563
			*insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD  ?
					    BPF_REG_A : BPF_REG_X, BPF_REG_FP,
					    -(BPF_MEMWORDS - fp->k) * 4);
564 565 566 567 568
			break;

		/* A = K or X = K */
		case BPF_LD | BPF_IMM:
		case BPF_LDX | BPF_IMM:
569 570
			*insn = BPF_MOV32_IMM(BPF_CLASS(fp->code) == BPF_LD ?
					      BPF_REG_A : BPF_REG_X, fp->k);
571 572 573 574
			break;

		/* X = A */
		case BPF_MISC | BPF_TAX:
575
			*insn = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
576 577 578 579
			break;

		/* A = X */
		case BPF_MISC | BPF_TXA:
580
			*insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_X);
581 582 583 584 585
			break;

		/* A = skb->len or X = skb->len */
		case BPF_LD | BPF_W | BPF_LEN:
		case BPF_LDX | BPF_W | BPF_LEN:
586 587 588
			*insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
					    BPF_REG_A : BPF_REG_X, BPF_REG_CTX,
					    offsetof(struct sk_buff, len));
589 590
			break;

591
		/* Access seccomp_data fields. */
592
		case BPF_LDX | BPF_ABS | BPF_W:
593 594
			/* A = *(u32 *) (ctx + K) */
			*insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k);
595 596
			break;

S
Stephen Hemminger 已提交
597
		/* Unknown instruction. */
L
Linus Torvalds 已提交
598
		default:
599
			goto err;
L
Linus Torvalds 已提交
600
		}
601 602 603 604 605 606

		insn++;
		if (new_prog)
			memcpy(new_insn, tmp_insns,
			       sizeof(*insn) * (insn - tmp_insns));
		new_insn += insn - tmp_insns;
L
Linus Torvalds 已提交
607 608
	}

609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624
	if (!new_prog) {
		/* Only calculating new length. */
		*new_len = new_insn - new_prog;
		return 0;
	}

	pass++;
	if (new_flen != new_insn - new_prog) {
		new_flen = new_insn - new_prog;
		if (pass > 2)
			goto err;
		goto do_pass;
	}

	kfree(addrs);
	BUG_ON(*new_len != new_flen);
L
Linus Torvalds 已提交
625
	return 0;
626 627 628
err:
	kfree(addrs);
	return -EINVAL;
L
Linus Torvalds 已提交
629 630
}

631 632
/* Security:
 *
633
 * As we dont want to clear mem[] array for each packet going through
L
Li RongQing 已提交
634
 * __bpf_prog_run(), we check that filter loaded by user never try to read
635
 * a cell if not previously written, and we check all branches to be sure
L
Lucas De Marchi 已提交
636
 * a malicious user doesn't try to abuse us.
637
 */
638
static int check_load_and_stores(const struct sock_filter *filter, int flen)
639
{
640
	u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */
641 642 643
	int pc, ret = 0;

	BUILD_BUG_ON(BPF_MEMWORDS > 16);
644

645
	masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
646 647
	if (!masks)
		return -ENOMEM;
648

649 650 651 652 653 654
	memset(masks, 0xff, flen * sizeof(*masks));

	for (pc = 0; pc < flen; pc++) {
		memvalid &= masks[pc];

		switch (filter[pc].code) {
655 656
		case BPF_ST:
		case BPF_STX:
657 658
			memvalid |= (1 << filter[pc].k);
			break;
659 660
		case BPF_LD | BPF_MEM:
		case BPF_LDX | BPF_MEM:
661 662 663 664 665
			if (!(memvalid & (1 << filter[pc].k))) {
				ret = -EINVAL;
				goto error;
			}
			break;
666 667
		case BPF_JMP | BPF_JA:
			/* A jump must set masks on target */
668 669 670
			masks[pc + 1 + filter[pc].k] &= memvalid;
			memvalid = ~0;
			break;
671 672 673 674 675 676 677 678 679
		case BPF_JMP | BPF_JEQ | BPF_K:
		case BPF_JMP | BPF_JEQ | BPF_X:
		case BPF_JMP | BPF_JGE | BPF_K:
		case BPF_JMP | BPF_JGE | BPF_X:
		case BPF_JMP | BPF_JGT | BPF_K:
		case BPF_JMP | BPF_JGT | BPF_X:
		case BPF_JMP | BPF_JSET | BPF_K:
		case BPF_JMP | BPF_JSET | BPF_X:
			/* A jump must set masks on targets */
680 681 682 683 684 685 686 687 688 689 690
			masks[pc + 1 + filter[pc].jt] &= memvalid;
			masks[pc + 1 + filter[pc].jf] &= memvalid;
			memvalid = ~0;
			break;
		}
	}
error:
	kfree(masks);
	return ret;
}

691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756
static bool chk_code_allowed(u16 code_to_probe)
{
	static const bool codes[] = {
		/* 32 bit ALU operations */
		[BPF_ALU | BPF_ADD | BPF_K] = true,
		[BPF_ALU | BPF_ADD | BPF_X] = true,
		[BPF_ALU | BPF_SUB | BPF_K] = true,
		[BPF_ALU | BPF_SUB | BPF_X] = true,
		[BPF_ALU | BPF_MUL | BPF_K] = true,
		[BPF_ALU | BPF_MUL | BPF_X] = true,
		[BPF_ALU | BPF_DIV | BPF_K] = true,
		[BPF_ALU | BPF_DIV | BPF_X] = true,
		[BPF_ALU | BPF_MOD | BPF_K] = true,
		[BPF_ALU | BPF_MOD | BPF_X] = true,
		[BPF_ALU | BPF_AND | BPF_K] = true,
		[BPF_ALU | BPF_AND | BPF_X] = true,
		[BPF_ALU | BPF_OR | BPF_K] = true,
		[BPF_ALU | BPF_OR | BPF_X] = true,
		[BPF_ALU | BPF_XOR | BPF_K] = true,
		[BPF_ALU | BPF_XOR | BPF_X] = true,
		[BPF_ALU | BPF_LSH | BPF_K] = true,
		[BPF_ALU | BPF_LSH | BPF_X] = true,
		[BPF_ALU | BPF_RSH | BPF_K] = true,
		[BPF_ALU | BPF_RSH | BPF_X] = true,
		[BPF_ALU | BPF_NEG] = true,
		/* Load instructions */
		[BPF_LD | BPF_W | BPF_ABS] = true,
		[BPF_LD | BPF_H | BPF_ABS] = true,
		[BPF_LD | BPF_B | BPF_ABS] = true,
		[BPF_LD | BPF_W | BPF_LEN] = true,
		[BPF_LD | BPF_W | BPF_IND] = true,
		[BPF_LD | BPF_H | BPF_IND] = true,
		[BPF_LD | BPF_B | BPF_IND] = true,
		[BPF_LD | BPF_IMM] = true,
		[BPF_LD | BPF_MEM] = true,
		[BPF_LDX | BPF_W | BPF_LEN] = true,
		[BPF_LDX | BPF_B | BPF_MSH] = true,
		[BPF_LDX | BPF_IMM] = true,
		[BPF_LDX | BPF_MEM] = true,
		/* Store instructions */
		[BPF_ST] = true,
		[BPF_STX] = true,
		/* Misc instructions */
		[BPF_MISC | BPF_TAX] = true,
		[BPF_MISC | BPF_TXA] = true,
		/* Return instructions */
		[BPF_RET | BPF_K] = true,
		[BPF_RET | BPF_A] = true,
		/* Jump instructions */
		[BPF_JMP | BPF_JA] = true,
		[BPF_JMP | BPF_JEQ | BPF_K] = true,
		[BPF_JMP | BPF_JEQ | BPF_X] = true,
		[BPF_JMP | BPF_JGE | BPF_K] = true,
		[BPF_JMP | BPF_JGE | BPF_X] = true,
		[BPF_JMP | BPF_JGT | BPF_K] = true,
		[BPF_JMP | BPF_JGT | BPF_X] = true,
		[BPF_JMP | BPF_JSET | BPF_K] = true,
		[BPF_JMP | BPF_JSET | BPF_X] = true,
	};

	if (code_to_probe >= ARRAY_SIZE(codes))
		return false;

	return codes[code_to_probe];
}

757 758 759 760 761 762 763 764 765 766 767
static bool bpf_check_basics_ok(const struct sock_filter *filter,
				unsigned int flen)
{
	if (filter == NULL)
		return false;
	if (flen == 0 || flen > BPF_MAXINSNS)
		return false;

	return true;
}

L
Linus Torvalds 已提交
768
/**
769
 *	bpf_check_classic - verify socket filter code
L
Linus Torvalds 已提交
770 771 772 773 774
 *	@filter: filter to verify
 *	@flen: length of filter
 *
 * Check the user's filter code. If we let some ugly
 * filter code slip through kaboom! The filter must contain
775 776
 * no references or jumps that are out of range, no illegal
 * instructions, and must end with a RET instruction.
L
Linus Torvalds 已提交
777
 *
778 779 780
 * All jumps are forward as they are not signed.
 *
 * Returns 0 if the rule set is legal or -EINVAL if not.
L
Linus Torvalds 已提交
781
 */
782 783
static int bpf_check_classic(const struct sock_filter *filter,
			     unsigned int flen)
L
Linus Torvalds 已提交
784
{
785
	bool anc_found;
786
	int pc;
L
Linus Torvalds 已提交
787

788
	/* Check the filter code now */
L
Linus Torvalds 已提交
789
	for (pc = 0; pc < flen; pc++) {
790
		const struct sock_filter *ftest = &filter[pc];
791

792 793
		/* May we actually operate on this code? */
		if (!chk_code_allowed(ftest->code))
794
			return -EINVAL;
795

796
		/* Some instructions need special checks */
797 798 799 800
		switch (ftest->code) {
		case BPF_ALU | BPF_DIV | BPF_K:
		case BPF_ALU | BPF_MOD | BPF_K:
			/* Check for division by zero */
E
Eric Dumazet 已提交
801 802 803
			if (ftest->k == 0)
				return -EINVAL;
			break;
R
Rabin Vincent 已提交
804 805 806 807 808
		case BPF_ALU | BPF_LSH | BPF_K:
		case BPF_ALU | BPF_RSH | BPF_K:
			if (ftest->k >= 32)
				return -EINVAL;
			break;
809 810 811 812 813
		case BPF_LD | BPF_MEM:
		case BPF_LDX | BPF_MEM:
		case BPF_ST:
		case BPF_STX:
			/* Check for invalid memory addresses */
814 815 816
			if (ftest->k >= BPF_MEMWORDS)
				return -EINVAL;
			break;
817 818
		case BPF_JMP | BPF_JA:
			/* Note, the large ftest->k might cause loops.
819 820 821
			 * Compare this with conditional jumps below,
			 * where offsets are limited. --ANK (981016)
			 */
822
			if (ftest->k >= (unsigned int)(flen - pc - 1))
823
				return -EINVAL;
824
			break;
825 826 827 828 829 830 831 832 833
		case BPF_JMP | BPF_JEQ | BPF_K:
		case BPF_JMP | BPF_JEQ | BPF_X:
		case BPF_JMP | BPF_JGE | BPF_K:
		case BPF_JMP | BPF_JGE | BPF_X:
		case BPF_JMP | BPF_JGT | BPF_K:
		case BPF_JMP | BPF_JGT | BPF_X:
		case BPF_JMP | BPF_JSET | BPF_K:
		case BPF_JMP | BPF_JSET | BPF_X:
			/* Both conditionals must be safe */
834
			if (pc + ftest->jt + 1 >= flen ||
835 836
			    pc + ftest->jf + 1 >= flen)
				return -EINVAL;
837
			break;
838 839 840
		case BPF_LD | BPF_W | BPF_ABS:
		case BPF_LD | BPF_H | BPF_ABS:
		case BPF_LD | BPF_B | BPF_ABS:
841
			anc_found = false;
842 843 844
			if (bpf_anc_helper(ftest) & BPF_ANC)
				anc_found = true;
			/* Ancillary operation unknown or unsupported */
845 846
			if (anc_found == false && ftest->k >= SKF_AD_OFF)
				return -EINVAL;
847 848
		}
	}
849

850
	/* Last instruction must be a RET code */
851
	switch (filter[flen - 1].code) {
852 853
	case BPF_RET | BPF_K:
	case BPF_RET | BPF_A:
854
		return check_load_and_stores(filter, flen);
855
	}
856

857
	return -EINVAL;
L
Linus Torvalds 已提交
858 859
}

860 861
static int bpf_prog_store_orig_filter(struct bpf_prog *fp,
				      const struct sock_fprog *fprog)
862
{
863
	unsigned int fsize = bpf_classic_proglen(fprog);
864 865 866 867 868 869 870 871
	struct sock_fprog_kern *fkprog;

	fp->orig_prog = kmalloc(sizeof(*fkprog), GFP_KERNEL);
	if (!fp->orig_prog)
		return -ENOMEM;

	fkprog = fp->orig_prog;
	fkprog->len = fprog->len;
872 873 874

	fkprog->filter = kmemdup(fp->insns, fsize,
				 GFP_KERNEL | __GFP_NOWARN);
875 876 877 878 879 880 881 882
	if (!fkprog->filter) {
		kfree(fp->orig_prog);
		return -ENOMEM;
	}

	return 0;
}

883
static void bpf_release_orig_filter(struct bpf_prog *fp)
884 885 886 887 888 889 890 891 892
{
	struct sock_fprog_kern *fprog = fp->orig_prog;

	if (fprog) {
		kfree(fprog->filter);
		kfree(fprog);
	}
}

893 894
static void __bpf_prog_release(struct bpf_prog *prog)
{
895
	if (prog->type == BPF_PROG_TYPE_SOCKET_FILTER) {
896 897 898 899 900
		bpf_prog_put(prog);
	} else {
		bpf_release_orig_filter(prog);
		bpf_prog_free(prog);
	}
901 902
}

903 904
static void __sk_filter_release(struct sk_filter *fp)
{
905 906
	__bpf_prog_release(fp->prog);
	kfree(fp);
907 908
}

909
/**
E
Eric Dumazet 已提交
910
 * 	sk_filter_release_rcu - Release a socket filter by rcu_head
911 912
 *	@rcu: rcu_head that contains the sk_filter to free
 */
913
static void sk_filter_release_rcu(struct rcu_head *rcu)
914 915 916
{
	struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);

917
	__sk_filter_release(fp);
918
}
919 920 921 922 923 924 925 926 927 928 929 930 931 932 933

/**
 *	sk_filter_release - release a socket filter
 *	@fp: filter to remove
 *
 *	Remove a filter from a socket and release its resources.
 */
static void sk_filter_release(struct sk_filter *fp)
{
	if (atomic_dec_and_test(&fp->refcnt))
		call_rcu(&fp->rcu, sk_filter_release_rcu);
}

void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
{
934
	u32 filter_size = bpf_prog_size(fp->prog->len);
935

936 937
	atomic_sub(filter_size, &sk->sk_omem_alloc);
	sk_filter_release(fp);
938
}
939

940 941 942 943
/* try to charge the socket memory if there is space available
 * return true on success
 */
bool sk_filter_charge(struct sock *sk, struct sk_filter *fp)
944
{
945
	u32 filter_size = bpf_prog_size(fp->prog->len);
946 947 948 949 950 951 952

	/* same check as in sock_kmalloc() */
	if (filter_size <= sysctl_optmem_max &&
	    atomic_read(&sk->sk_omem_alloc) + filter_size < sysctl_optmem_max) {
		atomic_inc(&fp->refcnt);
		atomic_add(filter_size, &sk->sk_omem_alloc);
		return true;
953
	}
954
	return false;
955 956
}

957
static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
958 959
{
	struct sock_filter *old_prog;
960
	struct bpf_prog *old_fp;
961
	int err, new_len, old_len = fp->len;
962 963 964 965 966 967 968

	/* We are free to overwrite insns et al right here as it
	 * won't be used at this point in time anymore internally
	 * after the migration to the internal BPF instruction
	 * representation.
	 */
	BUILD_BUG_ON(sizeof(struct sock_filter) !=
969
		     sizeof(struct bpf_insn));
970 971 972 973 974 975

	/* Conversion cannot happen on overlapping memory areas,
	 * so we need to keep the user BPF around until the 2nd
	 * pass. At this time, the user BPF is stored in fp->insns.
	 */
	old_prog = kmemdup(fp->insns, old_len * sizeof(struct sock_filter),
976
			   GFP_KERNEL | __GFP_NOWARN);
977 978 979 980 981 982
	if (!old_prog) {
		err = -ENOMEM;
		goto out_err;
	}

	/* 1st pass: calculate the new program length. */
983
	err = bpf_convert_filter(old_prog, old_len, NULL, &new_len);
984 985 986 987 988
	if (err)
		goto out_err_free;

	/* Expand fp for appending the new filter representation. */
	old_fp = fp;
989
	fp = bpf_prog_realloc(old_fp, bpf_prog_size(new_len), 0);
990 991 992 993 994 995 996 997 998 999 1000
	if (!fp) {
		/* The old_fp is still around in case we couldn't
		 * allocate new memory, so uncharge on that one.
		 */
		fp = old_fp;
		err = -ENOMEM;
		goto out_err_free;
	}

	fp->len = new_len;

1001
	/* 2nd pass: remap sock_filter insns into bpf_insn insns. */
1002
	err = bpf_convert_filter(old_prog, old_len, fp->insnsi, &new_len);
1003
	if (err)
1004
		/* 2nd bpf_convert_filter() can fail only if it fails
1005 1006
		 * to allocate memory, remapping must succeed. Note,
		 * that at this time old_fp has already been released
1007
		 * by krealloc().
1008 1009 1010
		 */
		goto out_err_free;

1011 1012 1013 1014 1015
	/* We are guaranteed to never error here with cBPF to eBPF
	 * transitions, since there's no issue with type compatibility
	 * checks on program arrays.
	 */
	fp = bpf_prog_select_runtime(fp, &err);
1016

1017 1018 1019 1020 1021 1022
	kfree(old_prog);
	return fp;

out_err_free:
	kfree(old_prog);
out_err:
1023
	__bpf_prog_release(fp);
1024 1025 1026
	return ERR_PTR(err);
}

1027 1028
static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp,
					   bpf_aux_classic_check_t trans)
1029 1030 1031
{
	int err;

1032
	fp->bpf_func = NULL;
1033
	fp->jited = 0;
1034

1035
	err = bpf_check_classic(fp->insns, fp->len);
1036
	if (err) {
1037
		__bpf_prog_release(fp);
1038
		return ERR_PTR(err);
1039
	}
1040

1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051
	/* There might be additional checks and transformations
	 * needed on classic filters, f.e. in case of seccomp.
	 */
	if (trans) {
		err = trans(fp->insns, fp->len);
		if (err) {
			__bpf_prog_release(fp);
			return ERR_PTR(err);
		}
	}

1052 1053 1054
	/* Probe if we can JIT compile the filter and if so, do
	 * the compilation of the filter.
	 */
1055
	bpf_jit_compile(fp);
1056 1057 1058 1059

	/* JIT compiler couldn't process this filter, so do the
	 * internal BPF translation for the optimized interpreter.
	 */
1060
	if (!fp->jited)
1061
		fp = bpf_migrate_filter(fp);
1062 1063

	return fp;
1064 1065 1066
}

/**
1067
 *	bpf_prog_create - create an unattached filter
R
Randy Dunlap 已提交
1068
 *	@pfp: the unattached filter that is created
1069
 *	@fprog: the filter program
1070
 *
R
Randy Dunlap 已提交
1071
 * Create a filter independent of any socket. We first run some
1072 1073 1074 1075
 * sanity checks on it to make sure it does not explode on us later.
 * If an error occurs or there is insufficient memory for the filter
 * a negative errno code is returned. On success the return is zero.
 */
1076
int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
1077
{
1078
	unsigned int fsize = bpf_classic_proglen(fprog);
1079
	struct bpf_prog *fp;
1080 1081

	/* Make sure new filter is there and in the right amounts. */
1082
	if (!bpf_check_basics_ok(fprog->filter, fprog->len))
1083 1084
		return -EINVAL;

1085
	fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
1086 1087
	if (!fp)
		return -ENOMEM;
1088

1089 1090 1091
	memcpy(fp->insns, fprog->filter, fsize);

	fp->len = fprog->len;
1092 1093 1094 1095 1096
	/* Since unattached filters are not copied back to user
	 * space through sk_get_filter(), we do not need to hold
	 * a copy here, and can spare us the work.
	 */
	fp->orig_prog = NULL;
1097

1098
	/* bpf_prepare_filter() already takes care of freeing
1099 1100
	 * memory in case something goes wrong.
	 */
1101
	fp = bpf_prepare_filter(fp, NULL);
1102 1103
	if (IS_ERR(fp))
		return PTR_ERR(fp);
1104 1105 1106 1107

	*pfp = fp;
	return 0;
}
1108
EXPORT_SYMBOL_GPL(bpf_prog_create);
1109

1110 1111 1112 1113 1114
/**
 *	bpf_prog_create_from_user - create an unattached filter from user buffer
 *	@pfp: the unattached filter that is created
 *	@fprog: the filter program
 *	@trans: post-classic verifier transformation handler
1115
 *	@save_orig: save classic BPF program
1116 1117 1118 1119 1120 1121
 *
 * This function effectively does the same as bpf_prog_create(), only
 * that it builds up its insns buffer from user space provided buffer.
 * It also allows for passing a bpf_aux_classic_check_t handler.
 */
int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
1122
			      bpf_aux_classic_check_t trans, bool save_orig)
1123 1124 1125
{
	unsigned int fsize = bpf_classic_proglen(fprog);
	struct bpf_prog *fp;
1126
	int err;
1127 1128

	/* Make sure new filter is there and in the right amounts. */
1129
	if (!bpf_check_basics_ok(fprog->filter, fprog->len))
1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143
		return -EINVAL;

	fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
	if (!fp)
		return -ENOMEM;

	if (copy_from_user(fp->insns, fprog->filter, fsize)) {
		__bpf_prog_free(fp);
		return -EFAULT;
	}

	fp->len = fprog->len;
	fp->orig_prog = NULL;

1144 1145 1146 1147 1148 1149 1150 1151
	if (save_orig) {
		err = bpf_prog_store_orig_filter(fp, fprog);
		if (err) {
			__bpf_prog_free(fp);
			return -ENOMEM;
		}
	}

1152 1153 1154 1155 1156 1157 1158 1159 1160 1161
	/* bpf_prepare_filter() already takes care of freeing
	 * memory in case something goes wrong.
	 */
	fp = bpf_prepare_filter(fp, trans);
	if (IS_ERR(fp))
		return PTR_ERR(fp);

	*pfp = fp;
	return 0;
}
1162
EXPORT_SYMBOL_GPL(bpf_prog_create_from_user);
1163

1164
void bpf_prog_destroy(struct bpf_prog *fp)
1165
{
1166
	__bpf_prog_release(fp);
1167
}
1168
EXPORT_SYMBOL_GPL(bpf_prog_destroy);
1169

1170
static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk)
1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185
{
	struct sk_filter *fp, *old_fp;

	fp = kmalloc(sizeof(*fp), GFP_KERNEL);
	if (!fp)
		return -ENOMEM;

	fp->prog = prog;
	atomic_set(&fp->refcnt, 0);

	if (!sk_filter_charge(sk, fp)) {
		kfree(fp);
		return -ENOMEM;
	}

1186 1187
	old_fp = rcu_dereference_protected(sk->sk_filter,
					   lockdep_sock_is_held(sk));
1188
	rcu_assign_pointer(sk->sk_filter, fp);
1189

1190 1191 1192 1193 1194 1195
	if (old_fp)
		sk_filter_uncharge(sk, old_fp);

	return 0;
}

1196 1197 1198 1199 1200 1201 1202 1203
static int __reuseport_attach_prog(struct bpf_prog *prog, struct sock *sk)
{
	struct bpf_prog *old_prog;
	int err;

	if (bpf_prog_size(prog->len) > sysctl_optmem_max)
		return -ENOMEM;

1204
	if (sk_unhashed(sk) && sk->sk_reuseport) {
1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221
		err = reuseport_alloc(sk);
		if (err)
			return err;
	} else if (!rcu_access_pointer(sk->sk_reuseport_cb)) {
		/* The socket wasn't bound with SO_REUSEPORT */
		return -EINVAL;
	}

	old_prog = reuseport_attach_prog(sk, prog);
	if (old_prog)
		bpf_prog_destroy(old_prog);

	return 0;
}

static
struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk)
L
Linus Torvalds 已提交
1222
{
1223
	unsigned int fsize = bpf_classic_proglen(fprog);
1224
	struct bpf_prog *prog;
L
Linus Torvalds 已提交
1225 1226
	int err;

1227
	if (sock_flag(sk, SOCK_FILTER_LOCKED))
1228
		return ERR_PTR(-EPERM);
1229

L
Linus Torvalds 已提交
1230
	/* Make sure new filter is there and in the right amounts. */
1231
	if (!bpf_check_basics_ok(fprog->filter, fprog->len))
1232
		return ERR_PTR(-EINVAL);
L
Linus Torvalds 已提交
1233

1234
	prog = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
1235
	if (!prog)
1236
		return ERR_PTR(-ENOMEM);
1237

1238
	if (copy_from_user(prog->insns, fprog->filter, fsize)) {
1239
		__bpf_prog_free(prog);
1240
		return ERR_PTR(-EFAULT);
L
Linus Torvalds 已提交
1241 1242
	}

1243
	prog->len = fprog->len;
L
Linus Torvalds 已提交
1244

1245
	err = bpf_prog_store_orig_filter(prog, fprog);
1246
	if (err) {
1247
		__bpf_prog_free(prog);
1248
		return ERR_PTR(-ENOMEM);
1249 1250
	}

1251
	/* bpf_prepare_filter() already takes care of freeing
1252 1253
	 * memory in case something goes wrong.
	 */
1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266
	return bpf_prepare_filter(prog, NULL);
}

/**
 *	sk_attach_filter - attach a socket filter
 *	@fprog: the filter program
 *	@sk: the socket to use
 *
 * Attach the user's filter code. We first run some sanity checks on
 * it to make sure it does not explode on us later. If an error
 * occurs or there is insufficient memory for the filter a negative
 * errno code is returned. On success the return is zero.
 */
1267
int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
1268 1269 1270 1271
{
	struct bpf_prog *prog = __get_filter(fprog, sk);
	int err;

1272 1273 1274
	if (IS_ERR(prog))
		return PTR_ERR(prog);

1275
	err = __sk_attach_prog(prog, sk);
1276
	if (err < 0) {
1277
		__bpf_prog_release(prog);
1278
		return err;
1279 1280
	}

1281
	return 0;
L
Linus Torvalds 已提交
1282
}
1283
EXPORT_SYMBOL_GPL(sk_attach_filter);
L
Linus Torvalds 已提交
1284

1285
int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk)
1286
{
1287
	struct bpf_prog *prog = __get_filter(fprog, sk);
1288
	int err;
1289

1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303
	if (IS_ERR(prog))
		return PTR_ERR(prog);

	err = __reuseport_attach_prog(prog, sk);
	if (err < 0) {
		__bpf_prog_release(prog);
		return err;
	}

	return 0;
}

static struct bpf_prog *__get_bpf(u32 ufd, struct sock *sk)
{
1304
	if (sock_flag(sk, SOCK_FILTER_LOCKED))
1305
		return ERR_PTR(-EPERM);
1306

1307
	return bpf_prog_get_type(ufd, BPF_PROG_TYPE_SOCKET_FILTER);
1308 1309 1310 1311 1312 1313 1314 1315 1316 1317
}

int sk_attach_bpf(u32 ufd, struct sock *sk)
{
	struct bpf_prog *prog = __get_bpf(ufd, sk);
	int err;

	if (IS_ERR(prog))
		return PTR_ERR(prog);

1318
	err = __sk_attach_prog(prog, sk);
1319
	if (err < 0) {
1320
		bpf_prog_put(prog);
1321
		return err;
1322 1323 1324 1325 1326
	}

	return 0;
}

1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343
int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk)
{
	struct bpf_prog *prog = __get_bpf(ufd, sk);
	int err;

	if (IS_ERR(prog))
		return PTR_ERR(prog);

	err = __reuseport_attach_prog(prog, sk);
	if (err < 0) {
		bpf_prog_put(prog);
		return err;
	}

	return 0;
}

1344 1345 1346 1347 1348 1349 1350 1351
struct bpf_scratchpad {
	union {
		__be32 diff[MAX_BPF_STACK / sizeof(__be32)];
		u8     buff[MAX_BPF_STACK];
	};
};

static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp);
1352

1353 1354 1355 1356 1357 1358
static inline int __bpf_try_make_writable(struct sk_buff *skb,
					  unsigned int write_len)
{
	return skb_ensure_writable(skb, write_len);
}

1359 1360 1361
static inline int bpf_try_make_writable(struct sk_buff *skb,
					unsigned int write_len)
{
1362
	int err = __bpf_try_make_writable(skb, write_len);
1363

1364
	bpf_compute_data_end(skb);
1365 1366 1367
	return err;
}

1368 1369 1370 1371 1372 1373
static inline void bpf_push_mac_rcsum(struct sk_buff *skb)
{
	if (skb_at_tc_ingress(skb))
		skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len);
}

1374 1375 1376 1377 1378 1379
static inline void bpf_pull_mac_rcsum(struct sk_buff *skb)
{
	if (skb_at_tc_ingress(skb))
		skb_postpull_rcsum(skb, skb_mac_header(skb), skb->mac_len);
}

1380
static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
1381 1382
{
	struct sk_buff *skb = (struct sk_buff *) (long) r1;
1383
	unsigned int offset = (unsigned int) r2;
1384 1385 1386 1387
	void *from = (void *) (long) r3;
	unsigned int len = (unsigned int) r4;
	void *ptr;

1388
	if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH)))
1389
		return -EINVAL;
1390
	if (unlikely(offset > 0xffff))
1391
		return -EFAULT;
1392
	if (unlikely(bpf_try_make_writable(skb, offset + len)))
1393 1394
		return -EFAULT;

1395
	ptr = skb->data + offset;
1396
	if (flags & BPF_F_RECOMPUTE_CSUM)
1397
		__skb_postpull_rcsum(skb, ptr, len, offset);
1398 1399 1400

	memcpy(ptr, from, len);

1401
	if (flags & BPF_F_RECOMPUTE_CSUM)
1402
		__skb_postpush_rcsum(skb, ptr, len, offset);
1403 1404
	if (flags & BPF_F_INVALIDATE_HASH)
		skb_clear_hash(skb);
1405

1406 1407 1408
	return 0;
}

1409
static const struct bpf_func_proto bpf_skb_store_bytes_proto = {
1410 1411 1412 1413 1414 1415 1416
	.func		= bpf_skb_store_bytes,
	.gpl_only	= false,
	.ret_type	= RET_INTEGER,
	.arg1_type	= ARG_PTR_TO_CTX,
	.arg2_type	= ARG_ANYTHING,
	.arg3_type	= ARG_PTR_TO_STACK,
	.arg4_type	= ARG_CONST_STACK_SIZE,
1417 1418 1419
	.arg5_type	= ARG_ANYTHING,
};

1420 1421 1422
static u64 bpf_skb_load_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
{
	const struct sk_buff *skb = (const struct sk_buff *)(unsigned long) r1;
1423
	unsigned int offset = (unsigned int) r2;
1424 1425 1426 1427
	void *to = (void *)(unsigned long) r3;
	unsigned int len = (unsigned int) r4;
	void *ptr;

1428
	if (unlikely(offset > 0xffff))
1429
		goto err_clear;
1430 1431 1432

	ptr = skb_header_pointer(skb, offset, len, to);
	if (unlikely(!ptr))
1433
		goto err_clear;
1434 1435 1436 1437
	if (ptr != to)
		memcpy(to, ptr, len);

	return 0;
1438 1439 1440
err_clear:
	memset(to, 0, len);
	return -EFAULT;
1441 1442
}

1443
static const struct bpf_func_proto bpf_skb_load_bytes_proto = {
1444 1445 1446 1447 1448
	.func		= bpf_skb_load_bytes,
	.gpl_only	= false,
	.ret_type	= RET_INTEGER,
	.arg1_type	= ARG_PTR_TO_CTX,
	.arg2_type	= ARG_ANYTHING,
1449
	.arg3_type	= ARG_PTR_TO_RAW_STACK,
1450 1451 1452
	.arg4_type	= ARG_CONST_STACK_SIZE,
};

1453
static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
1454 1455
{
	struct sk_buff *skb = (struct sk_buff *) (long) r1;
1456 1457
	unsigned int offset = (unsigned int) r2;
	__sum16 *ptr;
1458

1459 1460
	if (unlikely(flags & ~(BPF_F_HDR_FIELD_MASK)))
		return -EINVAL;
1461
	if (unlikely(offset > 0xffff || offset & 1))
1462
		return -EFAULT;
1463
	if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr))))
1464 1465
		return -EFAULT;

1466
	ptr = (__sum16 *)(skb->data + offset);
1467
	switch (flags & BPF_F_HDR_FIELD_MASK) {
1468 1469 1470 1471 1472 1473
	case 0:
		if (unlikely(from != 0))
			return -EINVAL;

		csum_replace_by_diff(ptr, to);
		break;
1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486
	case 2:
		csum_replace2(ptr, from, to);
		break;
	case 4:
		csum_replace4(ptr, from, to);
		break;
	default:
		return -EINVAL;
	}

	return 0;
}

1487
static const struct bpf_func_proto bpf_l3_csum_replace_proto = {
1488 1489 1490 1491 1492 1493 1494 1495 1496 1497
	.func		= bpf_l3_csum_replace,
	.gpl_only	= false,
	.ret_type	= RET_INTEGER,
	.arg1_type	= ARG_PTR_TO_CTX,
	.arg2_type	= ARG_ANYTHING,
	.arg3_type	= ARG_ANYTHING,
	.arg4_type	= ARG_ANYTHING,
	.arg5_type	= ARG_ANYTHING,
};

1498
static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
1499 1500
{
	struct sk_buff *skb = (struct sk_buff *) (long) r1;
1501
	bool is_pseudo = flags & BPF_F_PSEUDO_HDR;
1502
	bool is_mmzero = flags & BPF_F_MARK_MANGLED_0;
1503 1504
	unsigned int offset = (unsigned int) r2;
	__sum16 *ptr;
1505

1506 1507
	if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_PSEUDO_HDR |
			       BPF_F_HDR_FIELD_MASK)))
1508
		return -EINVAL;
1509
	if (unlikely(offset > 0xffff || offset & 1))
1510
		return -EFAULT;
1511
	if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr))))
1512 1513
		return -EFAULT;

1514
	ptr = (__sum16 *)(skb->data + offset);
1515 1516
	if (is_mmzero && !*ptr)
		return 0;
1517

1518
	switch (flags & BPF_F_HDR_FIELD_MASK) {
1519 1520 1521 1522 1523 1524
	case 0:
		if (unlikely(from != 0))
			return -EINVAL;

		inet_proto_csum_replace_by_diff(ptr, skb, to, is_pseudo);
		break;
1525 1526 1527 1528 1529 1530 1531 1532 1533 1534
	case 2:
		inet_proto_csum_replace2(ptr, skb, from, to, is_pseudo);
		break;
	case 4:
		inet_proto_csum_replace4(ptr, skb, from, to, is_pseudo);
		break;
	default:
		return -EINVAL;
	}

1535 1536
	if (is_mmzero && !*ptr)
		*ptr = CSUM_MANGLED_0;
1537 1538 1539
	return 0;
}

1540
static const struct bpf_func_proto bpf_l4_csum_replace_proto = {
1541 1542 1543 1544 1545 1546 1547 1548
	.func		= bpf_l4_csum_replace,
	.gpl_only	= false,
	.ret_type	= RET_INTEGER,
	.arg1_type	= ARG_PTR_TO_CTX,
	.arg2_type	= ARG_ANYTHING,
	.arg3_type	= ARG_ANYTHING,
	.arg4_type	= ARG_ANYTHING,
	.arg5_type	= ARG_ANYTHING,
1549 1550
};

1551 1552
static u64 bpf_csum_diff(u64 r1, u64 from_size, u64 r3, u64 to_size, u64 seed)
{
1553
	struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp);
1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578
	u64 diff_size = from_size + to_size;
	__be32 *from = (__be32 *) (long) r1;
	__be32 *to   = (__be32 *) (long) r3;
	int i, j = 0;

	/* This is quite flexible, some examples:
	 *
	 * from_size == 0, to_size > 0,  seed := csum --> pushing data
	 * from_size > 0,  to_size == 0, seed := csum --> pulling data
	 * from_size > 0,  to_size > 0,  seed := 0    --> diffing data
	 *
	 * Even for diffing, from_size and to_size don't need to be equal.
	 */
	if (unlikely(((from_size | to_size) & (sizeof(__be32) - 1)) ||
		     diff_size > sizeof(sp->diff)))
		return -EINVAL;

	for (i = 0; i < from_size / sizeof(__be32); i++, j++)
		sp->diff[j] = ~from[i];
	for (i = 0; i <   to_size / sizeof(__be32); i++, j++)
		sp->diff[j] = to[i];

	return csum_partial(sp->diff, diff_size, seed);
}

1579
static const struct bpf_func_proto bpf_csum_diff_proto = {
1580 1581 1582 1583 1584 1585 1586 1587 1588 1589
	.func		= bpf_csum_diff,
	.gpl_only	= false,
	.ret_type	= RET_INTEGER,
	.arg1_type	= ARG_PTR_TO_STACK,
	.arg2_type	= ARG_CONST_STACK_SIZE_OR_ZERO,
	.arg3_type	= ARG_PTR_TO_STACK,
	.arg4_type	= ARG_CONST_STACK_SIZE_OR_ZERO,
	.arg5_type	= ARG_ANYTHING,
};

1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613
static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb)
{
	return dev_forward_skb(dev, skb);
}

static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
{
	int ret;

	if (unlikely(__this_cpu_read(xmit_recursion) > XMIT_RECURSION_LIMIT)) {
		net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
		kfree_skb(skb);
		return -ENETDOWN;
	}

	skb->dev = dev;

	__this_cpu_inc(xmit_recursion);
	ret = dev_queue_xmit(skb);
	__this_cpu_dec(xmit_recursion);

	return ret;
}

1614 1615
static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5)
{
1616
	struct sk_buff *skb = (struct sk_buff *) (long) r1;
1617 1618
	struct net_device *dev;

1619 1620 1621
	if (unlikely(flags & ~(BPF_F_INGRESS)))
		return -EINVAL;

1622 1623 1624 1625
	dev = dev_get_by_index_rcu(dev_net(skb->dev), ifindex);
	if (unlikely(!dev))
		return -EINVAL;

1626 1627
	skb = skb_clone(skb, GFP_ATOMIC);
	if (unlikely(!skb))
1628 1629
		return -ENOMEM;

1630 1631
	bpf_push_mac_rcsum(skb);

1632 1633
	return flags & BPF_F_INGRESS ?
	       __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
1634 1635
}

1636
static const struct bpf_func_proto bpf_clone_redirect_proto = {
1637 1638 1639 1640 1641 1642 1643 1644
	.func           = bpf_clone_redirect,
	.gpl_only       = false,
	.ret_type       = RET_INTEGER,
	.arg1_type      = ARG_PTR_TO_CTX,
	.arg2_type      = ARG_ANYTHING,
	.arg3_type      = ARG_ANYTHING,
};

1645 1646 1647 1648 1649 1650
struct redirect_info {
	u32 ifindex;
	u32 flags;
};

static DEFINE_PER_CPU(struct redirect_info, redirect_info);
1651

1652 1653 1654 1655
static u64 bpf_redirect(u64 ifindex, u64 flags, u64 r3, u64 r4, u64 r5)
{
	struct redirect_info *ri = this_cpu_ptr(&redirect_info);

1656 1657 1658
	if (unlikely(flags & ~(BPF_F_INGRESS)))
		return TC_ACT_SHOT;

1659 1660
	ri->ifindex = ifindex;
	ri->flags = flags;
1661

1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676
	return TC_ACT_REDIRECT;
}

int skb_do_redirect(struct sk_buff *skb)
{
	struct redirect_info *ri = this_cpu_ptr(&redirect_info);
	struct net_device *dev;

	dev = dev_get_by_index_rcu(dev_net(skb->dev), ri->ifindex);
	ri->ifindex = 0;
	if (unlikely(!dev)) {
		kfree_skb(skb);
		return -EINVAL;
	}

1677 1678
	bpf_push_mac_rcsum(skb);

1679 1680
	return ri->flags & BPF_F_INGRESS ?
	       __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
1681 1682
}

1683
static const struct bpf_func_proto bpf_redirect_proto = {
1684 1685 1686 1687 1688 1689 1690
	.func           = bpf_redirect,
	.gpl_only       = false,
	.ret_type       = RET_INTEGER,
	.arg1_type      = ARG_ANYTHING,
	.arg2_type      = ARG_ANYTHING,
};

1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702
static u64 bpf_get_cgroup_classid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
{
	return task_get_classid((struct sk_buff *) (unsigned long) r1);
}

static const struct bpf_func_proto bpf_get_cgroup_classid_proto = {
	.func           = bpf_get_cgroup_classid,
	.gpl_only       = false,
	.ret_type       = RET_INTEGER,
	.arg1_type      = ARG_PTR_TO_CTX,
};

1703 1704
static u64 bpf_get_route_realm(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
{
1705
	return dst_tclassid((struct sk_buff *) (unsigned long) r1);
1706 1707 1708 1709 1710 1711 1712 1713 1714
}

static const struct bpf_func_proto bpf_get_route_realm_proto = {
	.func           = bpf_get_route_realm,
	.gpl_only       = false,
	.ret_type       = RET_INTEGER,
	.arg1_type      = ARG_PTR_TO_CTX,
};

1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731
static u64 bpf_get_hash_recalc(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
{
	/* If skb_clear_hash() was called due to mangling, we can
	 * trigger SW recalculation here. Later access to hash
	 * can then use the inline skb->hash via context directly
	 * instead of calling this helper again.
	 */
	return skb_get_hash((struct sk_buff *) (unsigned long) r1);
}

static const struct bpf_func_proto bpf_get_hash_recalc_proto = {
	.func		= bpf_get_hash_recalc,
	.gpl_only	= false,
	.ret_type	= RET_INTEGER,
	.arg1_type	= ARG_PTR_TO_CTX,
};

1732 1733 1734 1735
static u64 bpf_skb_vlan_push(u64 r1, u64 r2, u64 vlan_tci, u64 r4, u64 r5)
{
	struct sk_buff *skb = (struct sk_buff *) (long) r1;
	__be16 vlan_proto = (__force __be16) r2;
1736
	int ret;
1737 1738 1739 1740 1741

	if (unlikely(vlan_proto != htons(ETH_P_8021Q) &&
		     vlan_proto != htons(ETH_P_8021AD)))
		vlan_proto = htons(ETH_P_8021Q);

1742
	bpf_push_mac_rcsum(skb);
1743
	ret = skb_vlan_push(skb, vlan_proto, vlan_tci);
1744 1745
	bpf_pull_mac_rcsum(skb);

1746 1747
	bpf_compute_data_end(skb);
	return ret;
1748 1749 1750 1751 1752 1753 1754 1755 1756 1757
}

const struct bpf_func_proto bpf_skb_vlan_push_proto = {
	.func           = bpf_skb_vlan_push,
	.gpl_only       = false,
	.ret_type       = RET_INTEGER,
	.arg1_type      = ARG_PTR_TO_CTX,
	.arg2_type      = ARG_ANYTHING,
	.arg3_type      = ARG_ANYTHING,
};
1758
EXPORT_SYMBOL_GPL(bpf_skb_vlan_push_proto);
1759 1760 1761 1762

static u64 bpf_skb_vlan_pop(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
{
	struct sk_buff *skb = (struct sk_buff *) (long) r1;
1763
	int ret;
1764

1765
	bpf_push_mac_rcsum(skb);
1766
	ret = skb_vlan_pop(skb);
1767 1768
	bpf_pull_mac_rcsum(skb);

1769 1770
	bpf_compute_data_end(skb);
	return ret;
1771 1772 1773 1774 1775 1776 1777 1778
}

const struct bpf_func_proto bpf_skb_vlan_pop_proto = {
	.func           = bpf_skb_vlan_pop,
	.gpl_only       = false,
	.ret_type       = RET_INTEGER,
	.arg1_type      = ARG_PTR_TO_CTX,
};
1779
EXPORT_SYMBOL_GPL(bpf_skb_vlan_pop_proto);
1780

1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976
static int bpf_skb_generic_push(struct sk_buff *skb, u32 off, u32 len)
{
	/* Caller already did skb_cow() with len as headroom,
	 * so no need to do it here.
	 */
	skb_push(skb, len);
	memmove(skb->data, skb->data + len, off);
	memset(skb->data + off, 0, len);

	/* No skb_postpush_rcsum(skb, skb->data + off, len)
	 * needed here as it does not change the skb->csum
	 * result for checksum complete when summing over
	 * zeroed blocks.
	 */
	return 0;
}

static int bpf_skb_generic_pop(struct sk_buff *skb, u32 off, u32 len)
{
	/* skb_ensure_writable() is not needed here, as we're
	 * already working on an uncloned skb.
	 */
	if (unlikely(!pskb_may_pull(skb, off + len)))
		return -ENOMEM;

	skb_postpull_rcsum(skb, skb->data + off, len);
	memmove(skb->data + len, skb->data, off);
	__skb_pull(skb, len);

	return 0;
}

static int bpf_skb_net_hdr_push(struct sk_buff *skb, u32 off, u32 len)
{
	bool trans_same = skb->transport_header == skb->network_header;
	int ret;

	/* There's no need for __skb_push()/__skb_pull() pair to
	 * get to the start of the mac header as we're guaranteed
	 * to always start from here under eBPF.
	 */
	ret = bpf_skb_generic_push(skb, off, len);
	if (likely(!ret)) {
		skb->mac_header -= len;
		skb->network_header -= len;
		if (trans_same)
			skb->transport_header = skb->network_header;
	}

	return ret;
}

static int bpf_skb_net_hdr_pop(struct sk_buff *skb, u32 off, u32 len)
{
	bool trans_same = skb->transport_header == skb->network_header;
	int ret;

	/* Same here, __skb_push()/__skb_pull() pair not needed. */
	ret = bpf_skb_generic_pop(skb, off, len);
	if (likely(!ret)) {
		skb->mac_header += len;
		skb->network_header += len;
		if (trans_same)
			skb->transport_header = skb->network_header;
	}

	return ret;
}

static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
{
	const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr);
	u32 off = skb->network_header - skb->mac_header;
	int ret;

	ret = skb_cow(skb, len_diff);
	if (unlikely(ret < 0))
		return ret;

	ret = bpf_skb_net_hdr_push(skb, off, len_diff);
	if (unlikely(ret < 0))
		return ret;

	if (skb_is_gso(skb)) {
		/* SKB_GSO_UDP stays as is. SKB_GSO_TCPV4 needs to
		 * be changed into SKB_GSO_TCPV6.
		 */
		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
			skb_shinfo(skb)->gso_type &= ~SKB_GSO_TCPV4;
			skb_shinfo(skb)->gso_type |=  SKB_GSO_TCPV6;
		}

		/* Due to IPv6 header, MSS needs to be downgraded. */
		skb_shinfo(skb)->gso_size -= len_diff;
		/* Header must be checked, and gso_segs recomputed. */
		skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
		skb_shinfo(skb)->gso_segs = 0;
	}

	skb->protocol = htons(ETH_P_IPV6);
	skb_clear_hash(skb);

	return 0;
}

static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
{
	const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr);
	u32 off = skb->network_header - skb->mac_header;
	int ret;

	ret = skb_unclone(skb, GFP_ATOMIC);
	if (unlikely(ret < 0))
		return ret;

	ret = bpf_skb_net_hdr_pop(skb, off, len_diff);
	if (unlikely(ret < 0))
		return ret;

	if (skb_is_gso(skb)) {
		/* SKB_GSO_UDP stays as is. SKB_GSO_TCPV6 needs to
		 * be changed into SKB_GSO_TCPV4.
		 */
		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
			skb_shinfo(skb)->gso_type &= ~SKB_GSO_TCPV6;
			skb_shinfo(skb)->gso_type |=  SKB_GSO_TCPV4;
		}

		/* Due to IPv4 header, MSS can be upgraded. */
		skb_shinfo(skb)->gso_size += len_diff;
		/* Header must be checked, and gso_segs recomputed. */
		skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
		skb_shinfo(skb)->gso_segs = 0;
	}

	skb->protocol = htons(ETH_P_IP);
	skb_clear_hash(skb);

	return 0;
}

static int bpf_skb_proto_xlat(struct sk_buff *skb, __be16 to_proto)
{
	__be16 from_proto = skb->protocol;

	if (from_proto == htons(ETH_P_IP) &&
	      to_proto == htons(ETH_P_IPV6))
		return bpf_skb_proto_4_to_6(skb);

	if (from_proto == htons(ETH_P_IPV6) &&
	      to_proto == htons(ETH_P_IP))
		return bpf_skb_proto_6_to_4(skb);

	return -ENOTSUPP;
}

static u64 bpf_skb_change_proto(u64 r1, u64 r2, u64 flags, u64 r4, u64 r5)
{
	struct sk_buff *skb = (struct sk_buff *) (long) r1;
	__be16 proto = (__force __be16) r2;
	int ret;

	if (unlikely(flags))
		return -EINVAL;

	/* General idea is that this helper does the basic groundwork
	 * needed for changing the protocol, and eBPF program fills the
	 * rest through bpf_skb_store_bytes(), bpf_lX_csum_replace()
	 * and other helpers, rather than passing a raw buffer here.
	 *
	 * The rationale is to keep this minimal and without a need to
	 * deal with raw packet data. F.e. even if we would pass buffers
	 * here, the program still needs to call the bpf_lX_csum_replace()
	 * helpers anyway. Plus, this way we keep also separation of
	 * concerns, since f.e. bpf_skb_store_bytes() should only take
	 * care of stores.
	 *
	 * Currently, additional options and extension header space are
	 * not supported, but flags register is reserved so we can adapt
	 * that. For offloads, we mark packet as dodgy, so that headers
	 * need to be verified first.
	 */
	ret = bpf_skb_proto_xlat(skb, proto);
	bpf_compute_data_end(skb);
	return ret;
}

static const struct bpf_func_proto bpf_skb_change_proto_proto = {
	.func		= bpf_skb_change_proto,
	.gpl_only	= false,
	.ret_type	= RET_INTEGER,
	.arg1_type	= ARG_PTR_TO_CTX,
	.arg2_type	= ARG_ANYTHING,
	.arg3_type	= ARG_ANYTHING,
};

1977 1978 1979 1980 1981 1982
static u64 bpf_skb_change_type(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
{
	struct sk_buff *skb = (struct sk_buff *) (long) r1;
	u32 pkt_type = r2;

	/* We only allow a restricted subset to be changed for now. */
1983 1984
	if (unlikely(!skb_pkt_type_ok(skb->pkt_type) ||
		     !skb_pkt_type_ok(pkt_type)))
1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998
		return -EINVAL;

	skb->pkt_type = pkt_type;
	return 0;
}

static const struct bpf_func_proto bpf_skb_change_type_proto = {
	.func		= bpf_skb_change_type,
	.gpl_only	= false,
	.ret_type	= RET_INTEGER,
	.arg1_type	= ARG_PTR_TO_CTX,
	.arg2_type	= ARG_ANYTHING,
};

1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012
static u32 __bpf_skb_min_len(const struct sk_buff *skb)
{
	u32 min_len = skb_network_offset(skb);

	if (skb_transport_header_was_set(skb))
		min_len = skb_transport_offset(skb);
	if (skb->ip_summed == CHECKSUM_PARTIAL)
		min_len = skb_checksum_start_offset(skb) +
			  skb->csum_offset + sizeof(__sum16);
	return min_len;
}

static u32 __bpf_skb_max_len(const struct sk_buff *skb)
{
D
Daniel Borkmann 已提交
2013
	return skb->dev->mtu + skb->dev->hard_header_len;
2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083
}

static int bpf_skb_grow_rcsum(struct sk_buff *skb, unsigned int new_len)
{
	unsigned int old_len = skb->len;
	int ret;

	ret = __skb_grow_rcsum(skb, new_len);
	if (!ret)
		memset(skb->data + old_len, 0, new_len - old_len);
	return ret;
}

static int bpf_skb_trim_rcsum(struct sk_buff *skb, unsigned int new_len)
{
	return __skb_trim_rcsum(skb, new_len);
}

static u64 bpf_skb_change_tail(u64 r1, u64 r2, u64 flags, u64 r4, u64 r5)
{
	struct sk_buff *skb = (struct sk_buff *)(long) r1;
	u32 max_len = __bpf_skb_max_len(skb);
	u32 min_len = __bpf_skb_min_len(skb);
	u32 new_len = (u32) r2;
	int ret;

	if (unlikely(flags || new_len > max_len || new_len < min_len))
		return -EINVAL;
	if (skb->encapsulation)
		return -ENOTSUPP;

	/* The basic idea of this helper is that it's performing the
	 * needed work to either grow or trim an skb, and eBPF program
	 * rewrites the rest via helpers like bpf_skb_store_bytes(),
	 * bpf_lX_csum_replace() and others rather than passing a raw
	 * buffer here. This one is a slow path helper and intended
	 * for replies with control messages.
	 *
	 * Like in bpf_skb_change_proto(), we want to keep this rather
	 * minimal and without protocol specifics so that we are able
	 * to separate concerns as in bpf_skb_store_bytes() should only
	 * be the one responsible for writing buffers.
	 *
	 * It's really expected to be a slow path operation here for
	 * control message replies, so we're implicitly linearizing,
	 * uncloning and drop offloads from the skb by this.
	 */
	ret = __bpf_try_make_writable(skb, skb->len);
	if (!ret) {
		if (new_len > skb->len)
			ret = bpf_skb_grow_rcsum(skb, new_len);
		else if (new_len < skb->len)
			ret = bpf_skb_trim_rcsum(skb, new_len);
		if (!ret && skb_is_gso(skb))
			skb_gso_reset(skb);
	}

	bpf_compute_data_end(skb);
	return ret;
}

static const struct bpf_func_proto bpf_skb_change_tail_proto = {
	.func		= bpf_skb_change_tail,
	.gpl_only	= false,
	.ret_type	= RET_INTEGER,
	.arg1_type	= ARG_PTR_TO_CTX,
	.arg2_type	= ARG_ANYTHING,
	.arg3_type	= ARG_ANYTHING,
};

2084 2085 2086 2087 2088 2089
bool bpf_helper_changes_skb_data(void *func)
{
	if (func == bpf_skb_vlan_push)
		return true;
	if (func == bpf_skb_vlan_pop)
		return true;
2090 2091
	if (func == bpf_skb_store_bytes)
		return true;
2092 2093
	if (func == bpf_skb_change_proto)
		return true;
2094 2095
	if (func == bpf_skb_change_tail)
		return true;
2096 2097 2098 2099 2100
	if (func == bpf_l3_csum_replace)
		return true;
	if (func == bpf_l4_csum_replace)
		return true;

2101 2102 2103
	return false;
}

2104
static unsigned long bpf_skb_copy(void *dst_buff, const void *skb,
2105
				  unsigned long off, unsigned long len)
2106
{
2107
	void *ptr = skb_header_pointer(skb, off, len, dst_buff);
2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144

	if (unlikely(!ptr))
		return len;
	if (ptr != dst_buff)
		memcpy(dst_buff, ptr, len);

	return 0;
}

static u64 bpf_skb_event_output(u64 r1, u64 r2, u64 flags, u64 r4,
				u64 meta_size)
{
	struct sk_buff *skb = (struct sk_buff *)(long) r1;
	struct bpf_map *map = (struct bpf_map *)(long) r2;
	u64 skb_size = (flags & BPF_F_CTXLEN_MASK) >> 32;
	void *meta = (void *)(long) r4;

	if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK)))
		return -EINVAL;
	if (unlikely(skb_size > skb->len))
		return -EFAULT;

	return bpf_event_output(map, flags, meta, meta_size, skb, skb_size,
				bpf_skb_copy);
}

static const struct bpf_func_proto bpf_skb_event_output_proto = {
	.func		= bpf_skb_event_output,
	.gpl_only	= true,
	.ret_type	= RET_INTEGER,
	.arg1_type	= ARG_PTR_TO_CTX,
	.arg2_type	= ARG_CONST_MAP_PTR,
	.arg3_type	= ARG_ANYTHING,
	.arg4_type	= ARG_PTR_TO_STACK,
	.arg5_type	= ARG_CONST_STACK_SIZE,
};

2145 2146 2147 2148 2149
static unsigned short bpf_tunnel_key_af(u64 flags)
{
	return flags & BPF_F_TUNINFO_IPV6 ? AF_INET6 : AF_INET;
}

2150 2151 2152 2153
static u64 bpf_skb_get_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
{
	struct sk_buff *skb = (struct sk_buff *) (long) r1;
	struct bpf_tunnel_key *to = (struct bpf_tunnel_key *) (long) r2;
2154 2155
	const struct ip_tunnel_info *info = skb_tunnel_info(skb);
	u8 compat[sizeof(struct bpf_tunnel_key)];
2156 2157
	void *to_orig = to;
	int err;
2158

2159 2160 2161 2162 2163 2164 2165 2166
	if (unlikely(!info || (flags & ~(BPF_F_TUNINFO_IPV6)))) {
		err = -EINVAL;
		goto err_clear;
	}
	if (ip_tunnel_info_af(info) != bpf_tunnel_key_af(flags)) {
		err = -EPROTO;
		goto err_clear;
	}
2167
	if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
2168
		err = -EINVAL;
2169
		switch (size) {
2170
		case offsetof(struct bpf_tunnel_key, tunnel_label):
2171
		case offsetof(struct bpf_tunnel_key, tunnel_ext):
2172
			goto set_compat;
2173 2174 2175 2176 2177
		case offsetof(struct bpf_tunnel_key, remote_ipv6[1]):
			/* Fixup deprecated structure layouts here, so we have
			 * a common path later on.
			 */
			if (ip_tunnel_info_af(info) != AF_INET)
2178
				goto err_clear;
2179
set_compat:
2180 2181 2182
			to = (struct bpf_tunnel_key *)compat;
			break;
		default:
2183
			goto err_clear;
2184 2185
		}
	}
2186 2187

	to->tunnel_id = be64_to_cpu(info->key.tun_id);
2188 2189 2190
	to->tunnel_tos = info->key.tos;
	to->tunnel_ttl = info->key.ttl;

2191
	if (flags & BPF_F_TUNINFO_IPV6) {
2192 2193
		memcpy(to->remote_ipv6, &info->key.u.ipv6.src,
		       sizeof(to->remote_ipv6));
2194 2195
		to->tunnel_label = be32_to_cpu(info->key.label);
	} else {
2196
		to->remote_ipv4 = be32_to_cpu(info->key.u.ipv4.src);
2197
	}
2198 2199

	if (unlikely(size != sizeof(struct bpf_tunnel_key)))
2200
		memcpy(to_orig, to, size);
2201 2202

	return 0;
2203 2204 2205
err_clear:
	memset(to_orig, 0, size);
	return err;
2206 2207
}

2208
static const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = {
2209 2210 2211 2212
	.func		= bpf_skb_get_tunnel_key,
	.gpl_only	= false,
	.ret_type	= RET_INTEGER,
	.arg1_type	= ARG_PTR_TO_CTX,
2213
	.arg2_type	= ARG_PTR_TO_RAW_STACK,
2214 2215 2216 2217
	.arg3_type	= ARG_CONST_STACK_SIZE,
	.arg4_type	= ARG_ANYTHING,
};

2218 2219 2220 2221 2222
static u64 bpf_skb_get_tunnel_opt(u64 r1, u64 r2, u64 size, u64 r4, u64 r5)
{
	struct sk_buff *skb = (struct sk_buff *) (long) r1;
	u8 *to = (u8 *) (long) r2;
	const struct ip_tunnel_info *info = skb_tunnel_info(skb);
2223
	int err;
2224 2225

	if (unlikely(!info ||
2226 2227 2228 2229 2230 2231 2232 2233
		     !(info->key.tun_flags & TUNNEL_OPTIONS_PRESENT))) {
		err = -ENOENT;
		goto err_clear;
	}
	if (unlikely(size < info->options_len)) {
		err = -ENOMEM;
		goto err_clear;
	}
2234 2235

	ip_tunnel_info_opts_get(to, info);
2236 2237
	if (size > info->options_len)
		memset(to + info->options_len, 0, size - info->options_len);
2238 2239

	return info->options_len;
2240 2241 2242
err_clear:
	memset(to, 0, size);
	return err;
2243 2244 2245 2246 2247 2248 2249
}

static const struct bpf_func_proto bpf_skb_get_tunnel_opt_proto = {
	.func		= bpf_skb_get_tunnel_opt,
	.gpl_only	= false,
	.ret_type	= RET_INTEGER,
	.arg1_type	= ARG_PTR_TO_CTX,
2250
	.arg2_type	= ARG_PTR_TO_RAW_STACK,
2251 2252 2253
	.arg3_type	= ARG_CONST_STACK_SIZE,
};

2254 2255 2256 2257 2258 2259 2260
static struct metadata_dst __percpu *md_dst;

static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
{
	struct sk_buff *skb = (struct sk_buff *) (long) r1;
	struct bpf_tunnel_key *from = (struct bpf_tunnel_key *) (long) r2;
	struct metadata_dst *md = this_cpu_ptr(md_dst);
2261
	u8 compat[sizeof(struct bpf_tunnel_key)];
2262 2263
	struct ip_tunnel_info *info;

2264 2265
	if (unlikely(flags & ~(BPF_F_TUNINFO_IPV6 | BPF_F_ZERO_CSUM_TX |
			       BPF_F_DONT_FRAGMENT)))
2266
		return -EINVAL;
2267 2268
	if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
		switch (size) {
2269
		case offsetof(struct bpf_tunnel_key, tunnel_label):
2270
		case offsetof(struct bpf_tunnel_key, tunnel_ext):
2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282
		case offsetof(struct bpf_tunnel_key, remote_ipv6[1]):
			/* Fixup deprecated structure layouts here, so we have
			 * a common path later on.
			 */
			memcpy(compat, from, size);
			memset(compat + size, 0, sizeof(compat) - size);
			from = (struct bpf_tunnel_key *)compat;
			break;
		default:
			return -EINVAL;
		}
	}
2283 2284
	if (unlikely((!(flags & BPF_F_TUNINFO_IPV6) && from->tunnel_label) ||
		     from->tunnel_ext))
2285
		return -EINVAL;
2286 2287 2288 2289 2290 2291 2292

	skb_dst_drop(skb);
	dst_hold((struct dst_entry *) md);
	skb_dst_set(skb, (struct dst_entry *) md);

	info = &md->u.tun_info;
	info->mode = IP_TUNNEL_INFO_TX;
2293

2294
	info->key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE;
2295 2296 2297
	if (flags & BPF_F_DONT_FRAGMENT)
		info->key.tun_flags |= TUNNEL_DONT_FRAGMENT;

2298
	info->key.tun_id = cpu_to_be64(from->tunnel_id);
2299 2300 2301 2302 2303 2304 2305
	info->key.tos = from->tunnel_tos;
	info->key.ttl = from->tunnel_ttl;

	if (flags & BPF_F_TUNINFO_IPV6) {
		info->mode |= IP_TUNNEL_INFO_IPV6;
		memcpy(&info->key.u.ipv6.dst, from->remote_ipv6,
		       sizeof(from->remote_ipv6));
2306 2307
		info->key.label = cpu_to_be32(from->tunnel_label) &
				  IPV6_FLOWLABEL_MASK;
2308 2309
	} else {
		info->key.u.ipv4.dst = cpu_to_be32(from->remote_ipv4);
2310 2311
		if (flags & BPF_F_ZERO_CSUM_TX)
			info->key.tun_flags &= ~TUNNEL_CSUM;
2312
	}
2313 2314 2315 2316

	return 0;
}

2317
static const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = {
2318 2319 2320 2321 2322 2323 2324 2325 2326
	.func		= bpf_skb_set_tunnel_key,
	.gpl_only	= false,
	.ret_type	= RET_INTEGER,
	.arg1_type	= ARG_PTR_TO_CTX,
	.arg2_type	= ARG_PTR_TO_STACK,
	.arg3_type	= ARG_CONST_STACK_SIZE,
	.arg4_type	= ARG_ANYTHING,
};

2327 2328 2329 2330 2331 2332 2333 2334 2335
static u64 bpf_skb_set_tunnel_opt(u64 r1, u64 r2, u64 size, u64 r4, u64 r5)
{
	struct sk_buff *skb = (struct sk_buff *) (long) r1;
	u8 *from = (u8 *) (long) r2;
	struct ip_tunnel_info *info = skb_tunnel_info(skb);
	const struct metadata_dst *md = this_cpu_ptr(md_dst);

	if (unlikely(info != &md->u.tun_info || (size & (sizeof(u32) - 1))))
		return -EINVAL;
2336
	if (unlikely(size > IP_TUNNEL_OPTS_MAX))
2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354
		return -ENOMEM;

	ip_tunnel_info_opts_set(info, from, size);

	return 0;
}

static const struct bpf_func_proto bpf_skb_set_tunnel_opt_proto = {
	.func		= bpf_skb_set_tunnel_opt,
	.gpl_only	= false,
	.ret_type	= RET_INTEGER,
	.arg1_type	= ARG_PTR_TO_CTX,
	.arg2_type	= ARG_PTR_TO_STACK,
	.arg3_type	= ARG_CONST_STACK_SIZE,
};

static const struct bpf_func_proto *
bpf_get_skb_set_tunnel_proto(enum bpf_func_id which)
2355 2356
{
	if (!md_dst) {
2357 2358
		/* Race is not possible, since it's called from verifier
		 * that is holding verifier mutex.
2359
		 */
2360
		md_dst = metadata_dst_alloc_percpu(IP_TUNNEL_OPTS_MAX,
2361
						   GFP_KERNEL);
2362 2363 2364
		if (!md_dst)
			return NULL;
	}
2365 2366 2367 2368 2369 2370 2371 2372 2373

	switch (which) {
	case BPF_FUNC_skb_set_tunnel_key:
		return &bpf_skb_set_tunnel_key_proto;
	case BPF_FUNC_skb_set_tunnel_opt:
		return &bpf_skb_set_tunnel_opt_proto;
	default:
		return NULL;
	}
2374 2375
}

2376
static u64 bpf_skb_under_cgroup(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395
{
	struct sk_buff *skb = (struct sk_buff *)(long)r1;
	struct bpf_map *map = (struct bpf_map *)(long)r2;
	struct bpf_array *array = container_of(map, struct bpf_array, map);
	struct cgroup *cgrp;
	struct sock *sk;
	u32 i = (u32)r3;

	sk = skb->sk;
	if (!sk || !sk_fullsock(sk))
		return -ENOENT;

	if (unlikely(i >= array->map.max_entries))
		return -E2BIG;

	cgrp = READ_ONCE(array->ptrs[i]);
	if (unlikely(!cgrp))
		return -EAGAIN;

2396
	return sk_under_cgroup_hierarchy(sk, cgrp);
2397 2398
}

2399 2400
static const struct bpf_func_proto bpf_skb_under_cgroup_proto = {
	.func		= bpf_skb_under_cgroup,
2401 2402 2403 2404 2405 2406 2407
	.gpl_only	= false,
	.ret_type	= RET_INTEGER,
	.arg1_type	= ARG_PTR_TO_CTX,
	.arg2_type	= ARG_CONST_MAP_PTR,
	.arg3_type	= ARG_ANYTHING,
};

2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442
static unsigned long bpf_xdp_copy(void *dst_buff, const void *src_buff,
				  unsigned long off, unsigned long len)
{
	memcpy(dst_buff, src_buff + off, len);
	return 0;
}

static u64 bpf_xdp_event_output(u64 r1, u64 r2, u64 flags, u64 r4,
				u64 meta_size)
{
	struct xdp_buff *xdp = (struct xdp_buff *)(long) r1;
	struct bpf_map *map = (struct bpf_map *)(long) r2;
	u64 xdp_size = (flags & BPF_F_CTXLEN_MASK) >> 32;
	void *meta = (void *)(long) r4;

	if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK)))
		return -EINVAL;
	if (unlikely(xdp_size > (unsigned long)(xdp->data_end - xdp->data)))
		return -EFAULT;

	return bpf_event_output(map, flags, meta, meta_size, xdp, xdp_size,
				bpf_xdp_copy);
}

static const struct bpf_func_proto bpf_xdp_event_output_proto = {
	.func		= bpf_xdp_event_output,
	.gpl_only	= true,
	.ret_type	= RET_INTEGER,
	.arg1_type	= ARG_PTR_TO_CTX,
	.arg2_type	= ARG_CONST_MAP_PTR,
	.arg3_type	= ARG_ANYTHING,
	.arg4_type	= ARG_PTR_TO_STACK,
	.arg5_type	= ARG_CONST_STACK_SIZE,
};

2443 2444
static const struct bpf_func_proto *
sk_filter_func_proto(enum bpf_func_id func_id)
2445 2446 2447 2448 2449 2450 2451 2452
{
	switch (func_id) {
	case BPF_FUNC_map_lookup_elem:
		return &bpf_map_lookup_elem_proto;
	case BPF_FUNC_map_update_elem:
		return &bpf_map_update_elem_proto;
	case BPF_FUNC_map_delete_elem:
		return &bpf_map_delete_elem_proto;
2453 2454
	case BPF_FUNC_get_prandom_u32:
		return &bpf_get_prandom_u32_proto;
2455
	case BPF_FUNC_get_smp_processor_id:
2456
		return &bpf_get_raw_smp_processor_id_proto;
2457 2458
	case BPF_FUNC_tail_call:
		return &bpf_tail_call_proto;
2459 2460
	case BPF_FUNC_ktime_get_ns:
		return &bpf_ktime_get_ns_proto;
2461
	case BPF_FUNC_trace_printk:
2462 2463
		if (capable(CAP_SYS_ADMIN))
			return bpf_get_trace_printk_proto();
2464 2465 2466 2467 2468
	default:
		return NULL;
	}
}

2469 2470 2471 2472 2473 2474
static const struct bpf_func_proto *
tc_cls_act_func_proto(enum bpf_func_id func_id)
{
	switch (func_id) {
	case BPF_FUNC_skb_store_bytes:
		return &bpf_skb_store_bytes_proto;
2475 2476
	case BPF_FUNC_skb_load_bytes:
		return &bpf_skb_load_bytes_proto;
2477 2478
	case BPF_FUNC_csum_diff:
		return &bpf_csum_diff_proto;
2479 2480 2481 2482
	case BPF_FUNC_l3_csum_replace:
		return &bpf_l3_csum_replace_proto;
	case BPF_FUNC_l4_csum_replace:
		return &bpf_l4_csum_replace_proto;
2483 2484
	case BPF_FUNC_clone_redirect:
		return &bpf_clone_redirect_proto;
2485 2486
	case BPF_FUNC_get_cgroup_classid:
		return &bpf_get_cgroup_classid_proto;
2487 2488 2489 2490
	case BPF_FUNC_skb_vlan_push:
		return &bpf_skb_vlan_push_proto;
	case BPF_FUNC_skb_vlan_pop:
		return &bpf_skb_vlan_pop_proto;
2491 2492
	case BPF_FUNC_skb_change_proto:
		return &bpf_skb_change_proto_proto;
2493 2494
	case BPF_FUNC_skb_change_type:
		return &bpf_skb_change_type_proto;
2495 2496
	case BPF_FUNC_skb_change_tail:
		return &bpf_skb_change_tail_proto;
2497 2498 2499
	case BPF_FUNC_skb_get_tunnel_key:
		return &bpf_skb_get_tunnel_key_proto;
	case BPF_FUNC_skb_set_tunnel_key:
2500 2501 2502 2503 2504
		return bpf_get_skb_set_tunnel_proto(func_id);
	case BPF_FUNC_skb_get_tunnel_opt:
		return &bpf_skb_get_tunnel_opt_proto;
	case BPF_FUNC_skb_set_tunnel_opt:
		return bpf_get_skb_set_tunnel_proto(func_id);
2505 2506
	case BPF_FUNC_redirect:
		return &bpf_redirect_proto;
2507 2508
	case BPF_FUNC_get_route_realm:
		return &bpf_get_route_realm_proto;
2509 2510
	case BPF_FUNC_get_hash_recalc:
		return &bpf_get_hash_recalc_proto;
2511
	case BPF_FUNC_perf_event_output:
2512
		return &bpf_skb_event_output_proto;
2513 2514
	case BPF_FUNC_get_smp_processor_id:
		return &bpf_get_smp_processor_id_proto;
2515 2516
	case BPF_FUNC_skb_under_cgroup:
		return &bpf_skb_under_cgroup_proto;
2517 2518 2519 2520 2521
	default:
		return sk_filter_func_proto(func_id);
	}
}

2522 2523 2524
static const struct bpf_func_proto *
xdp_func_proto(enum bpf_func_id func_id)
{
2525 2526 2527 2528 2529 2530
	switch (func_id) {
	case BPF_FUNC_perf_event_output:
		return &bpf_xdp_event_output_proto;
	default:
		return sk_filter_func_proto(func_id);
	}
2531 2532
}

2533
static bool __is_valid_access(int off, int size, enum bpf_access_type type)
2534
{
2535 2536
	if (off < 0 || off >= sizeof(struct __sk_buff))
		return false;
2537
	/* The verifier guarantees that size > 0. */
2538 2539
	if (off % size != 0)
		return false;
2540
	if (size != sizeof(__u32))
2541 2542 2543 2544 2545
		return false;

	return true;
}

2546
static bool sk_filter_is_valid_access(int off, int size,
2547 2548
				      enum bpf_access_type type,
				      enum bpf_reg_type *reg_type)
2549
{
2550 2551 2552 2553
	switch (off) {
	case offsetof(struct __sk_buff, tc_classid):
	case offsetof(struct __sk_buff, data):
	case offsetof(struct __sk_buff, data_end):
2554
		return false;
2555
	}
2556

2557 2558 2559
	if (type == BPF_WRITE) {
		switch (off) {
		case offsetof(struct __sk_buff, cb[0]) ...
2560
		     offsetof(struct __sk_buff, cb[4]):
2561 2562 2563 2564 2565 2566 2567 2568 2569 2570
			break;
		default:
			return false;
		}
	}

	return __is_valid_access(off, size, type);
}

static bool tc_cls_act_is_valid_access(int off, int size,
2571 2572
				       enum bpf_access_type type,
				       enum bpf_reg_type *reg_type)
2573 2574 2575 2576 2577
{
	if (type == BPF_WRITE) {
		switch (off) {
		case offsetof(struct __sk_buff, mark):
		case offsetof(struct __sk_buff, tc_index):
2578
		case offsetof(struct __sk_buff, priority):
2579
		case offsetof(struct __sk_buff, cb[0]) ...
2580 2581
		     offsetof(struct __sk_buff, cb[4]):
		case offsetof(struct __sk_buff, tc_classid):
2582 2583 2584 2585 2586
			break;
		default:
			return false;
		}
	}
2587 2588 2589 2590 2591 2592 2593 2594 2595 2596

	switch (off) {
	case offsetof(struct __sk_buff, data):
		*reg_type = PTR_TO_PACKET;
		break;
	case offsetof(struct __sk_buff, data_end):
		*reg_type = PTR_TO_PACKET_END;
		break;
	}

2597 2598 2599
	return __is_valid_access(off, size, type);
}

2600 2601 2602 2603 2604 2605 2606
static bool __is_valid_xdp_access(int off, int size,
				  enum bpf_access_type type)
{
	if (off < 0 || off >= sizeof(struct xdp_md))
		return false;
	if (off % size != 0)
		return false;
D
Daniel Borkmann 已提交
2607
	if (size != sizeof(__u32))
2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637
		return false;

	return true;
}

static bool xdp_is_valid_access(int off, int size,
				enum bpf_access_type type,
				enum bpf_reg_type *reg_type)
{
	if (type == BPF_WRITE)
		return false;

	switch (off) {
	case offsetof(struct xdp_md, data):
		*reg_type = PTR_TO_PACKET;
		break;
	case offsetof(struct xdp_md, data_end):
		*reg_type = PTR_TO_PACKET_END;
		break;
	}

	return __is_valid_xdp_access(off, size, type);
}

void bpf_warn_invalid_xdp_action(u32 act)
{
	WARN_ONCE(1, "Illegal XDP return value %u, expect packet loss\n", act);
}
EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action);

2638 2639
static u32 bpf_net_convert_ctx_access(enum bpf_access_type type, int dst_reg,
				      int src_reg, int ctx_off,
2640 2641
				      struct bpf_insn *insn_buf,
				      struct bpf_prog *prog)
2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652
{
	struct bpf_insn *insn = insn_buf;

	switch (ctx_off) {
	case offsetof(struct __sk_buff, len):
		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);

		*insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
				      offsetof(struct sk_buff, len));
		break;

2653 2654 2655 2656 2657 2658 2659
	case offsetof(struct __sk_buff, protocol):
		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);

		*insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
				      offsetof(struct sk_buff, protocol));
		break;

2660 2661 2662 2663 2664 2665 2666
	case offsetof(struct __sk_buff, vlan_proto):
		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_proto) != 2);

		*insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
				      offsetof(struct sk_buff, vlan_proto));
		break;

2667 2668 2669
	case offsetof(struct __sk_buff, priority):
		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, priority) != 4);

2670 2671 2672 2673 2674 2675
		if (type == BPF_WRITE)
			*insn++ = BPF_STX_MEM(BPF_W, dst_reg, src_reg,
					      offsetof(struct sk_buff, priority));
		else
			*insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
					      offsetof(struct sk_buff, priority));
2676 2677
		break;

2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695
	case offsetof(struct __sk_buff, ingress_ifindex):
		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, skb_iif) != 4);

		*insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
				      offsetof(struct sk_buff, skb_iif));
		break;

	case offsetof(struct __sk_buff, ifindex):
		BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);

		*insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)),
				      dst_reg, src_reg,
				      offsetof(struct sk_buff, dev));
		*insn++ = BPF_JMP_IMM(BPF_JEQ, dst_reg, 0, 1);
		*insn++ = BPF_LDX_MEM(BPF_W, dst_reg, dst_reg,
				      offsetof(struct net_device, ifindex));
		break;

2696 2697 2698 2699 2700 2701 2702
	case offsetof(struct __sk_buff, hash):
		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);

		*insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
				      offsetof(struct sk_buff, hash));
		break;

2703
	case offsetof(struct __sk_buff, mark):
2704 2705 2706 2707 2708 2709 2710 2711 2712
		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);

		if (type == BPF_WRITE)
			*insn++ = BPF_STX_MEM(BPF_W, dst_reg, src_reg,
					      offsetof(struct sk_buff, mark));
		else
			*insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
					      offsetof(struct sk_buff, mark));
		break;
2713 2714 2715 2716 2717 2718

	case offsetof(struct __sk_buff, pkt_type):
		return convert_skb_access(SKF_AD_PKTTYPE, dst_reg, src_reg, insn);

	case offsetof(struct __sk_buff, queue_mapping):
		return convert_skb_access(SKF_AD_QUEUE, dst_reg, src_reg, insn);
2719 2720 2721 2722 2723 2724 2725 2726

	case offsetof(struct __sk_buff, vlan_present):
		return convert_skb_access(SKF_AD_VLAN_TAG_PRESENT,
					  dst_reg, src_reg, insn);

	case offsetof(struct __sk_buff, vlan_tci):
		return convert_skb_access(SKF_AD_VLAN_TAG,
					  dst_reg, src_reg, insn);
2727 2728

	case offsetof(struct __sk_buff, cb[0]) ...
D
Daniel Borkmann 已提交
2729
	     offsetof(struct __sk_buff, cb[4]):
2730 2731
		BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, data) < 20);

2732
		prog->cb_access = 1;
2733 2734 2735 2736 2737 2738 2739 2740 2741
		ctx_off -= offsetof(struct __sk_buff, cb[0]);
		ctx_off += offsetof(struct sk_buff, cb);
		ctx_off += offsetof(struct qdisc_skb_cb, data);
		if (type == BPF_WRITE)
			*insn++ = BPF_STX_MEM(BPF_W, dst_reg, src_reg, ctx_off);
		else
			*insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, ctx_off);
		break;

2742 2743 2744 2745
	case offsetof(struct __sk_buff, tc_classid):
		ctx_off -= offsetof(struct __sk_buff, tc_classid);
		ctx_off += offsetof(struct sk_buff, cb);
		ctx_off += offsetof(struct qdisc_skb_cb, tc_classid);
2746 2747 2748 2749
		if (type == BPF_WRITE)
			*insn++ = BPF_STX_MEM(BPF_H, dst_reg, src_reg, ctx_off);
		else
			*insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, ctx_off);
2750 2751
		break;

2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765
	case offsetof(struct __sk_buff, data):
		*insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, data)),
				      dst_reg, src_reg,
				      offsetof(struct sk_buff, data));
		break;

	case offsetof(struct __sk_buff, data_end):
		ctx_off -= offsetof(struct __sk_buff, data_end);
		ctx_off += offsetof(struct sk_buff, cb);
		ctx_off += offsetof(struct bpf_skb_data_end, data_end);
		*insn++ = BPF_LDX_MEM(bytes_to_bpf_size(sizeof(void *)),
				      dst_reg, src_reg, ctx_off);
		break;

2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783
	case offsetof(struct __sk_buff, tc_index):
#ifdef CONFIG_NET_SCHED
		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, tc_index) != 2);

		if (type == BPF_WRITE)
			*insn++ = BPF_STX_MEM(BPF_H, dst_reg, src_reg,
					      offsetof(struct sk_buff, tc_index));
		else
			*insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
					      offsetof(struct sk_buff, tc_index));
		break;
#else
		if (type == BPF_WRITE)
			*insn++ = BPF_MOV64_REG(dst_reg, dst_reg);
		else
			*insn++ = BPF_MOV64_IMM(dst_reg, 0);
		break;
#endif
2784 2785 2786
	}

	return insn - insn_buf;
2787 2788
}

2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811
static u32 xdp_convert_ctx_access(enum bpf_access_type type, int dst_reg,
				  int src_reg, int ctx_off,
				  struct bpf_insn *insn_buf,
				  struct bpf_prog *prog)
{
	struct bpf_insn *insn = insn_buf;

	switch (ctx_off) {
	case offsetof(struct xdp_md, data):
		*insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct xdp_buff, data)),
				      dst_reg, src_reg,
				      offsetof(struct xdp_buff, data));
		break;
	case offsetof(struct xdp_md, data_end):
		*insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct xdp_buff, data_end)),
				      dst_reg, src_reg,
				      offsetof(struct xdp_buff, data_end));
		break;
	}

	return insn - insn_buf;
}

2812
static const struct bpf_verifier_ops sk_filter_ops = {
2813 2814 2815
	.get_func_proto		= sk_filter_func_proto,
	.is_valid_access	= sk_filter_is_valid_access,
	.convert_ctx_access	= bpf_net_convert_ctx_access,
2816 2817
};

2818
static const struct bpf_verifier_ops tc_cls_act_ops = {
2819 2820 2821
	.get_func_proto		= tc_cls_act_func_proto,
	.is_valid_access	= tc_cls_act_is_valid_access,
	.convert_ctx_access	= bpf_net_convert_ctx_access,
2822 2823
};

2824 2825 2826 2827 2828 2829
static const struct bpf_verifier_ops xdp_ops = {
	.get_func_proto		= xdp_func_proto,
	.is_valid_access	= xdp_is_valid_access,
	.convert_ctx_access	= xdp_convert_ctx_access,
};

2830
static struct bpf_prog_type_list sk_filter_type __read_mostly = {
2831 2832
	.ops	= &sk_filter_ops,
	.type	= BPF_PROG_TYPE_SOCKET_FILTER,
2833 2834
};

2835
static struct bpf_prog_type_list sched_cls_type __read_mostly = {
2836 2837
	.ops	= &tc_cls_act_ops,
	.type	= BPF_PROG_TYPE_SCHED_CLS,
2838 2839
};

2840
static struct bpf_prog_type_list sched_act_type __read_mostly = {
2841 2842
	.ops	= &tc_cls_act_ops,
	.type	= BPF_PROG_TYPE_SCHED_ACT,
2843 2844
};

2845 2846 2847 2848 2849
static struct bpf_prog_type_list xdp_type __read_mostly = {
	.ops	= &xdp_ops,
	.type	= BPF_PROG_TYPE_XDP,
};

2850
static int __init register_sk_filter_ops(void)
2851
{
2852
	bpf_register_prog_type(&sk_filter_type);
2853
	bpf_register_prog_type(&sched_cls_type);
2854
	bpf_register_prog_type(&sched_act_type);
2855
	bpf_register_prog_type(&xdp_type);
2856

2857 2858
	return 0;
}
2859 2860
late_initcall(register_sk_filter_ops);

2861
int sk_detach_filter(struct sock *sk)
2862 2863 2864 2865
{
	int ret = -ENOENT;
	struct sk_filter *filter;

2866 2867 2868
	if (sock_flag(sk, SOCK_FILTER_LOCKED))
		return -EPERM;

2869 2870
	filter = rcu_dereference_protected(sk->sk_filter,
					   lockdep_sock_is_held(sk));
2871
	if (filter) {
2872
		RCU_INIT_POINTER(sk->sk_filter, NULL);
E
Eric Dumazet 已提交
2873
		sk_filter_uncharge(sk, filter);
2874 2875
		ret = 0;
	}
2876

2877 2878
	return ret;
}
2879
EXPORT_SYMBOL_GPL(sk_detach_filter);
2880

2881 2882
int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
		  unsigned int len)
2883
{
2884
	struct sock_fprog_kern *fprog;
2885
	struct sk_filter *filter;
2886
	int ret = 0;
2887 2888 2889

	lock_sock(sk);
	filter = rcu_dereference_protected(sk->sk_filter,
2890
					   lockdep_sock_is_held(sk));
2891 2892
	if (!filter)
		goto out;
2893 2894

	/* We're copying the filter that has been originally attached,
2895 2896
	 * so no conversion/decode needed anymore. eBPF programs that
	 * have no original program cannot be dumped through this.
2897
	 */
2898
	ret = -EACCES;
2899
	fprog = filter->prog->orig_prog;
2900 2901
	if (!fprog)
		goto out;
2902 2903

	ret = fprog->len;
2904
	if (!len)
2905
		/* User space only enquires number of filter blocks. */
2906
		goto out;
2907

2908
	ret = -EINVAL;
2909
	if (len < fprog->len)
2910 2911 2912
		goto out;

	ret = -EFAULT;
2913
	if (copy_to_user(ubuf, fprog->filter, bpf_classic_proglen(fprog)))
2914
		goto out;
2915

2916 2917 2918 2919
	/* Instead of bytes, the API requests to return the number
	 * of filter blocks.
	 */
	ret = fprog->len;
2920 2921 2922 2923
out:
	release_sock(sk);
	return ret;
}