bpf_jit_comp.c 26.0 KB
Newer Older
1 2
/* bpf_jit_comp.c : BPF JIT compiler
 *
3
 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
4
 * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
5 6 7 8 9 10 11 12 13 14
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; version 2
 * of the License.
 */
#include <linux/moduleloader.h>
#include <asm/cacheflush.h>
#include <linux/netdevice.h>
#include <linux/filter.h>
15
#include <linux/if_vlan.h>
16
#include <linux/random.h>
17 18 19 20 21 22

int bpf_jit_enable __read_mostly;

/*
 * assembly code in arch/x86/net/bpf_jit.S
 */
23
extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
24
extern u8 sk_load_word_positive_offset[], sk_load_half_positive_offset[];
25
extern u8 sk_load_byte_positive_offset[];
26
extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[];
27
extern u8 sk_load_byte_negative_offset[];
28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47

static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
{
	if (len == 1)
		*ptr = bytes;
	else if (len == 2)
		*(u16 *)ptr = bytes;
	else {
		*(u32 *)ptr = bytes;
		barrier();
	}
	return ptr + len;
}

#define EMIT(bytes, len)	do { prog = emit_code(prog, bytes, len); } while (0)

#define EMIT1(b1)		EMIT(b1, 1)
#define EMIT2(b1, b2)		EMIT((b1) + ((b2) << 8), 2)
#define EMIT3(b1, b2, b3)	EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
#define EMIT4(b1, b2, b3, b4)   EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
48 49 50 51 52 53 54 55
#define EMIT1_off32(b1, off) \
	do {EMIT1(b1); EMIT(off, 4); } while (0)
#define EMIT2_off32(b1, b2, off) \
	do {EMIT2(b1, b2); EMIT(off, 4); } while (0)
#define EMIT3_off32(b1, b2, b3, off) \
	do {EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
#define EMIT4_off32(b1, b2, b3, b4, off) \
	do {EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
56 57 58 59 60 61

static inline bool is_imm8(int value)
{
	return value <= 127 && value >= -128;
}

62
static inline bool is_simm32(s64 value)
63
{
64
	return value == (s64) (s32) value;
65 66
}

67 68 69 70
/* mov dst, src */
#define EMIT_mov(DST, SRC) \
	do {if (DST != SRC) \
		EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
	} while (0)

static int bpf_size_to_x86_bytes(int bpf_size)
{
	if (bpf_size == BPF_W)
		return 4;
	else if (bpf_size == BPF_H)
		return 2;
	else if (bpf_size == BPF_B)
		return 1;
	else if (bpf_size == BPF_DW)
		return 4; /* imm32 */
	else
		return 0;
}
86 87 88 89 90 91 92 93 94 95

/* list of x86 cond jumps opcodes (. + s8)
 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
 */
#define X86_JB  0x72
#define X86_JAE 0x73
#define X86_JE  0x74
#define X86_JNE 0x75
#define X86_JBE 0x76
#define X86_JA  0x77
96 97
#define X86_JGE 0x7D
#define X86_JG  0x7F
98 99 100 101 102 103 104 105 106 107 108

static inline void bpf_flush_icache(void *start, void *end)
{
	mm_segment_t old_fs = get_fs();

	set_fs(KERNEL_DS);
	smp_wmb();
	flush_icache_range((unsigned long)start, (unsigned long)end);
	set_fs(old_fs);
}

109 110
#define CHOOSE_LOAD_FUNC(K, func) \
	((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
111

112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137
struct bpf_binary_header {
	unsigned int	pages;
	/* Note : for security reasons, bpf code will follow a randomly
	 * sized amount of int3 instructions
	 */
	u8		image[];
};

static struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen,
						  u8 **image_ptr)
{
	unsigned int sz, hole;
	struct bpf_binary_header *header;

	/* Most of BPF filters are really small,
	 * but if some of them fill a page, allow at least
	 * 128 extra bytes to insert a random section of int3
	 */
	sz = round_up(proglen + sizeof(*header) + 128, PAGE_SIZE);
	header = module_alloc(sz);
	if (!header)
		return NULL;

	memset(header, 0xcc, sz); /* fill whole space with int3 instructions */

	header->pages = sz / PAGE_SIZE;
138
	hole = min(sz - (proglen + sizeof(*header)), PAGE_SIZE - sizeof(*header));
139 140 141 142 143 144

	/* insert a random number of int3 instructions before BPF code */
	*image_ptr = &header->image[prandom_u32() % hole];
	return header;
}

145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196
/* pick a register outside of BPF range for JIT internal work */
#define AUX_REG (MAX_BPF_REG + 1)

/* the following table maps BPF registers to x64 registers.
 * x64 register r12 is unused, since if used as base address register
 * in load/store instructions, it always needs an extra byte of encoding
 */
static const int reg2hex[] = {
	[BPF_REG_0] = 0,  /* rax */
	[BPF_REG_1] = 7,  /* rdi */
	[BPF_REG_2] = 6,  /* rsi */
	[BPF_REG_3] = 2,  /* rdx */
	[BPF_REG_4] = 1,  /* rcx */
	[BPF_REG_5] = 0,  /* r8 */
	[BPF_REG_6] = 3,  /* rbx callee saved */
	[BPF_REG_7] = 5,  /* r13 callee saved */
	[BPF_REG_8] = 6,  /* r14 callee saved */
	[BPF_REG_9] = 7,  /* r15 callee saved */
	[BPF_REG_FP] = 5, /* rbp readonly */
	[AUX_REG] = 3,    /* r11 temp register */
};

/* is_ereg() == true if BPF register 'reg' maps to x64 r8..r15
 * which need extra byte of encoding.
 * rax,rcx,...,rbp have simpler encoding
 */
static inline bool is_ereg(u32 reg)
{
	if (reg == BPF_REG_5 || reg == AUX_REG ||
	    (reg >= BPF_REG_7 && reg <= BPF_REG_9))
		return true;
	else
		return false;
}

/* add modifiers if 'reg' maps to x64 registers r8..r15 */
static inline u8 add_1mod(u8 byte, u32 reg)
{
	if (is_ereg(reg))
		byte |= 1;
	return byte;
}

static inline u8 add_2mod(u8 byte, u32 r1, u32 r2)
{
	if (is_ereg(r1))
		byte |= 1;
	if (is_ereg(r2))
		byte |= 4;
	return byte;
}

197 198
/* encode 'dst_reg' register into x64 opcode 'byte' */
static inline u8 add_1reg(u8 byte, u32 dst_reg)
199
{
200
	return byte + reg2hex[dst_reg];
201 202
}

203 204
/* encode 'dst_reg' and 'src_reg' registers into x64 opcode 'byte' */
static inline u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
205
{
206
	return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
207 208
}

209 210
struct jit_context {
	unsigned int cleanup_addr; /* epilogue code offset */
211
	bool seen_ld_abs;
212 213
};

214
static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
215
		  int oldproglen, struct jit_context *ctx)
216
{
217
	struct bpf_insn *insn = bpf_prog->insnsi;
218
	int insn_cnt = bpf_prog->len;
219
	u8 temp[64];
220 221 222 223 224 225
	int i;
	int proglen = 0;
	u8 *prog = temp;
	int stacksize = MAX_BPF_STACK +
		32 /* space for rbx, r13, r14, r15 */ +
		8 /* space for skb_copy_bits() buffer */;
226

227 228
	EMIT1(0x55); /* push rbp */
	EMIT3(0x48, 0x89, 0xE5); /* mov rbp,rsp */
229

230 231 232 233 234 235 236 237
	/* sub rsp, stacksize */
	EMIT3_off32(0x48, 0x81, 0xEC, stacksize);

	/* all classic BPF filters use R6(rbx) save it */

	/* mov qword ptr [rbp-X],rbx */
	EMIT3_off32(0x48, 0x89, 0x9D, -stacksize);

238
	/* bpf_convert_filter() maps classic BPF register X to R7 and uses R8
239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288
	 * as temporary, so all tcpdump filters need to spill/fill R7(r13) and
	 * R8(r14). R9(r15) spill could be made conditional, but there is only
	 * one 'bpf_error' return path out of helper functions inside bpf_jit.S
	 * The overhead of extra spill is negligible for any filter other
	 * than synthetic ones. Therefore not worth adding complexity.
	 */

	/* mov qword ptr [rbp-X],r13 */
	EMIT3_off32(0x4C, 0x89, 0xAD, -stacksize + 8);
	/* mov qword ptr [rbp-X],r14 */
	EMIT3_off32(0x4C, 0x89, 0xB5, -stacksize + 16);
	/* mov qword ptr [rbp-X],r15 */
	EMIT3_off32(0x4C, 0x89, 0xBD, -stacksize + 24);

	/* clear A and X registers */
	EMIT2(0x31, 0xc0); /* xor eax, eax */
	EMIT3(0x4D, 0x31, 0xED); /* xor r13, r13 */

	if (ctx->seen_ld_abs) {
		/* r9d : skb->len - skb->data_len (headlen)
		 * r10 : skb->data
		 */
		if (is_imm8(offsetof(struct sk_buff, len)))
			/* mov %r9d, off8(%rdi) */
			EMIT4(0x44, 0x8b, 0x4f,
			      offsetof(struct sk_buff, len));
		else
			/* mov %r9d, off32(%rdi) */
			EMIT3_off32(0x44, 0x8b, 0x8f,
				    offsetof(struct sk_buff, len));

		if (is_imm8(offsetof(struct sk_buff, data_len)))
			/* sub %r9d, off8(%rdi) */
			EMIT4(0x44, 0x2b, 0x4f,
			      offsetof(struct sk_buff, data_len));
		else
			EMIT3_off32(0x44, 0x2b, 0x8f,
				    offsetof(struct sk_buff, data_len));

		if (is_imm8(offsetof(struct sk_buff, data)))
			/* mov %r10, off8(%rdi) */
			EMIT4(0x4c, 0x8b, 0x57,
			      offsetof(struct sk_buff, data));
		else
			/* mov %r10, off32(%rdi) */
			EMIT3_off32(0x4c, 0x8b, 0x97,
				    offsetof(struct sk_buff, data));
	}

	for (i = 0; i < insn_cnt; i++, insn++) {
289 290 291
		const s32 imm32 = insn->imm;
		u32 dst_reg = insn->dst_reg;
		u32 src_reg = insn->src_reg;
292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
		u8 b1 = 0, b2 = 0, b3 = 0;
		s64 jmp_offset;
		u8 jmp_cond;
		int ilen;
		u8 *func;

		switch (insn->code) {
			/* ALU */
		case BPF_ALU | BPF_ADD | BPF_X:
		case BPF_ALU | BPF_SUB | BPF_X:
		case BPF_ALU | BPF_AND | BPF_X:
		case BPF_ALU | BPF_OR | BPF_X:
		case BPF_ALU | BPF_XOR | BPF_X:
		case BPF_ALU64 | BPF_ADD | BPF_X:
		case BPF_ALU64 | BPF_SUB | BPF_X:
		case BPF_ALU64 | BPF_AND | BPF_X:
		case BPF_ALU64 | BPF_OR | BPF_X:
		case BPF_ALU64 | BPF_XOR | BPF_X:
			switch (BPF_OP(insn->code)) {
			case BPF_ADD: b2 = 0x01; break;
			case BPF_SUB: b2 = 0x29; break;
			case BPF_AND: b2 = 0x21; break;
			case BPF_OR: b2 = 0x09; break;
			case BPF_XOR: b2 = 0x31; break;
316
			}
317
			if (BPF_CLASS(insn->code) == BPF_ALU64)
318 319 320 321
				EMIT1(add_2mod(0x48, dst_reg, src_reg));
			else if (is_ereg(dst_reg) || is_ereg(src_reg))
				EMIT1(add_2mod(0x40, dst_reg, src_reg));
			EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
322
			break;
323

324
			/* mov dst, src */
325
		case BPF_ALU64 | BPF_MOV | BPF_X:
326
			EMIT_mov(dst_reg, src_reg);
327 328
			break;

329
			/* mov32 dst, src */
330
		case BPF_ALU | BPF_MOV | BPF_X:
331 332 333
			if (is_ereg(dst_reg) || is_ereg(src_reg))
				EMIT1(add_2mod(0x40, dst_reg, src_reg));
			EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
334
			break;
335

336
			/* neg dst */
337 338 339
		case BPF_ALU | BPF_NEG:
		case BPF_ALU64 | BPF_NEG:
			if (BPF_CLASS(insn->code) == BPF_ALU64)
340 341 342 343
				EMIT1(add_1mod(0x48, dst_reg));
			else if (is_ereg(dst_reg))
				EMIT1(add_1mod(0x40, dst_reg));
			EMIT2(0xF7, add_1reg(0xD8, dst_reg));
344 345 346 347 348 349 350 351 352 353 354 355 356
			break;

		case BPF_ALU | BPF_ADD | BPF_K:
		case BPF_ALU | BPF_SUB | BPF_K:
		case BPF_ALU | BPF_AND | BPF_K:
		case BPF_ALU | BPF_OR | BPF_K:
		case BPF_ALU | BPF_XOR | BPF_K:
		case BPF_ALU64 | BPF_ADD | BPF_K:
		case BPF_ALU64 | BPF_SUB | BPF_K:
		case BPF_ALU64 | BPF_AND | BPF_K:
		case BPF_ALU64 | BPF_OR | BPF_K:
		case BPF_ALU64 | BPF_XOR | BPF_K:
			if (BPF_CLASS(insn->code) == BPF_ALU64)
357 358 359
				EMIT1(add_1mod(0x48, dst_reg));
			else if (is_ereg(dst_reg))
				EMIT1(add_1mod(0x40, dst_reg));
360 361 362 363 364 365 366 367 368

			switch (BPF_OP(insn->code)) {
			case BPF_ADD: b3 = 0xC0; break;
			case BPF_SUB: b3 = 0xE8; break;
			case BPF_AND: b3 = 0xE0; break;
			case BPF_OR: b3 = 0xC8; break;
			case BPF_XOR: b3 = 0xF0; break;
			}

369 370
			if (is_imm8(imm32))
				EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
371
			else
372
				EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
373 374 375 376 377 378 379
			break;

		case BPF_ALU64 | BPF_MOV | BPF_K:
			/* optimization: if imm32 is positive,
			 * use 'mov eax, imm32' (which zero-extends imm32)
			 * to save 2 bytes
			 */
380
			if (imm32 < 0) {
381
				/* 'mov rax, imm32' sign extends imm32 */
382
				b1 = add_1mod(0x48, dst_reg);
383 384
				b2 = 0xC7;
				b3 = 0xC0;
385
				EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
386
				break;
387 388 389 390
			}

		case BPF_ALU | BPF_MOV | BPF_K:
			/* mov %eax, imm32 */
391 392 393
			if (is_ereg(dst_reg))
				EMIT1(add_1mod(0x40, dst_reg));
			EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
394 395
			break;

396
			/* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
397 398 399 400 401 402 403 404 405 406 407 408
		case BPF_ALU | BPF_MOD | BPF_X:
		case BPF_ALU | BPF_DIV | BPF_X:
		case BPF_ALU | BPF_MOD | BPF_K:
		case BPF_ALU | BPF_DIV | BPF_K:
		case BPF_ALU64 | BPF_MOD | BPF_X:
		case BPF_ALU64 | BPF_DIV | BPF_X:
		case BPF_ALU64 | BPF_MOD | BPF_K:
		case BPF_ALU64 | BPF_DIV | BPF_K:
			EMIT1(0x50); /* push rax */
			EMIT1(0x52); /* push rdx */

			if (BPF_SRC(insn->code) == BPF_X)
409 410
				/* mov r11, src_reg */
				EMIT_mov(AUX_REG, src_reg);
411
			else
412 413
				/* mov r11, imm32 */
				EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
414

415 416
			/* mov rax, dst_reg */
			EMIT_mov(BPF_REG_0, dst_reg);
417 418 419 420 421 422 423

			/* xor edx, edx
			 * equivalent to 'xor rdx, rdx', but one byte less
			 */
			EMIT2(0x31, 0xd2);

			if (BPF_SRC(insn->code) == BPF_X) {
424
				/* if (src_reg == 0) return 0 */
425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459

				/* cmp r11, 0 */
				EMIT4(0x49, 0x83, 0xFB, 0x00);

				/* jne .+9 (skip over pop, pop, xor and jmp) */
				EMIT2(X86_JNE, 1 + 1 + 2 + 5);
				EMIT1(0x5A); /* pop rdx */
				EMIT1(0x58); /* pop rax */
				EMIT2(0x31, 0xc0); /* xor eax, eax */

				/* jmp cleanup_addr
				 * addrs[i] - 11, because there are 11 bytes
				 * after this insn: div, mov, pop, pop, mov
				 */
				jmp_offset = ctx->cleanup_addr - (addrs[i] - 11);
				EMIT1_off32(0xE9, jmp_offset);
			}

			if (BPF_CLASS(insn->code) == BPF_ALU64)
				/* div r11 */
				EMIT3(0x49, 0xF7, 0xF3);
			else
				/* div r11d */
				EMIT3(0x41, 0xF7, 0xF3);

			if (BPF_OP(insn->code) == BPF_MOD)
				/* mov r11, rdx */
				EMIT3(0x49, 0x89, 0xD3);
			else
				/* mov r11, rax */
				EMIT3(0x49, 0x89, 0xC3);

			EMIT1(0x5A); /* pop rdx */
			EMIT1(0x58); /* pop rax */

460 461
			/* mov dst_reg, r11 */
			EMIT_mov(dst_reg, AUX_REG);
462 463 464 465 466 467 468 469 470
			break;

		case BPF_ALU | BPF_MUL | BPF_K:
		case BPF_ALU | BPF_MUL | BPF_X:
		case BPF_ALU64 | BPF_MUL | BPF_K:
		case BPF_ALU64 | BPF_MUL | BPF_X:
			EMIT1(0x50); /* push rax */
			EMIT1(0x52); /* push rdx */

471 472
			/* mov r11, dst_reg */
			EMIT_mov(AUX_REG, dst_reg);
473 474

			if (BPF_SRC(insn->code) == BPF_X)
475 476
				/* mov rax, src_reg */
				EMIT_mov(BPF_REG_0, src_reg);
477
			else
478 479
				/* mov rax, imm32 */
				EMIT3_off32(0x48, 0xC7, 0xC0, imm32);
480 481 482 483 484 485 486 487 488 489 490 491 492 493

			if (BPF_CLASS(insn->code) == BPF_ALU64)
				EMIT1(add_1mod(0x48, AUX_REG));
			else if (is_ereg(AUX_REG))
				EMIT1(add_1mod(0x40, AUX_REG));
			/* mul(q) r11 */
			EMIT2(0xF7, add_1reg(0xE0, AUX_REG));

			/* mov r11, rax */
			EMIT_mov(AUX_REG, BPF_REG_0);

			EMIT1(0x5A); /* pop rdx */
			EMIT1(0x58); /* pop rax */

494 495
			/* mov dst_reg, r11 */
			EMIT_mov(dst_reg, AUX_REG);
496 497 498 499 500 501 502 503 504 505
			break;

			/* shifts */
		case BPF_ALU | BPF_LSH | BPF_K:
		case BPF_ALU | BPF_RSH | BPF_K:
		case BPF_ALU | BPF_ARSH | BPF_K:
		case BPF_ALU64 | BPF_LSH | BPF_K:
		case BPF_ALU64 | BPF_RSH | BPF_K:
		case BPF_ALU64 | BPF_ARSH | BPF_K:
			if (BPF_CLASS(insn->code) == BPF_ALU64)
506 507 508
				EMIT1(add_1mod(0x48, dst_reg));
			else if (is_ereg(dst_reg))
				EMIT1(add_1mod(0x40, dst_reg));
509 510 511 512 513 514

			switch (BPF_OP(insn->code)) {
			case BPF_LSH: b3 = 0xE0; break;
			case BPF_RSH: b3 = 0xE8; break;
			case BPF_ARSH: b3 = 0xF8; break;
			}
515
			EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
516 517
			break;

518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559
		case BPF_ALU | BPF_LSH | BPF_X:
		case BPF_ALU | BPF_RSH | BPF_X:
		case BPF_ALU | BPF_ARSH | BPF_X:
		case BPF_ALU64 | BPF_LSH | BPF_X:
		case BPF_ALU64 | BPF_RSH | BPF_X:
		case BPF_ALU64 | BPF_ARSH | BPF_X:

			/* check for bad case when dst_reg == rcx */
			if (dst_reg == BPF_REG_4) {
				/* mov r11, dst_reg */
				EMIT_mov(AUX_REG, dst_reg);
				dst_reg = AUX_REG;
			}

			if (src_reg != BPF_REG_4) { /* common case */
				EMIT1(0x51); /* push rcx */

				/* mov rcx, src_reg */
				EMIT_mov(BPF_REG_4, src_reg);
			}

			/* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
			if (BPF_CLASS(insn->code) == BPF_ALU64)
				EMIT1(add_1mod(0x48, dst_reg));
			else if (is_ereg(dst_reg))
				EMIT1(add_1mod(0x40, dst_reg));

			switch (BPF_OP(insn->code)) {
			case BPF_LSH: b3 = 0xE0; break;
			case BPF_RSH: b3 = 0xE8; break;
			case BPF_ARSH: b3 = 0xF8; break;
			}
			EMIT2(0xD3, add_1reg(b3, dst_reg));

			if (src_reg != BPF_REG_4)
				EMIT1(0x59); /* pop rcx */

			if (insn->dst_reg == BPF_REG_4)
				/* mov dst_reg, r11 */
				EMIT_mov(insn->dst_reg, AUX_REG);
			break;

560
		case BPF_ALU | BPF_END | BPF_FROM_BE:
561
			switch (imm32) {
562 563 564
			case 16:
				/* emit 'ror %ax, 8' to swap lower 2 bytes */
				EMIT1(0x66);
565
				if (is_ereg(dst_reg))
566
					EMIT1(0x41);
567
				EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
568 569 570
				break;
			case 32:
				/* emit 'bswap eax' to swap lower 4 bytes */
571
				if (is_ereg(dst_reg))
572
					EMIT2(0x41, 0x0F);
573
				else
574
					EMIT1(0x0F);
575
				EMIT1(add_1reg(0xC8, dst_reg));
576
				break;
577 578
			case 64:
				/* emit 'bswap rax' to swap 8 bytes */
579 580
				EMIT3(add_1mod(0x48, dst_reg), 0x0F,
				      add_1reg(0xC8, dst_reg));
581 582
				break;
			}
583 584 585 586 587
			break;

		case BPF_ALU | BPF_END | BPF_FROM_LE:
			break;

588
			/* ST: *(u8*)(dst_reg + off) = imm */
589
		case BPF_ST | BPF_MEM | BPF_B:
590
			if (is_ereg(dst_reg))
591 592 593 594 595
				EMIT2(0x41, 0xC6);
			else
				EMIT1(0xC6);
			goto st;
		case BPF_ST | BPF_MEM | BPF_H:
596
			if (is_ereg(dst_reg))
597 598 599 600 601
				EMIT3(0x66, 0x41, 0xC7);
			else
				EMIT2(0x66, 0xC7);
			goto st;
		case BPF_ST | BPF_MEM | BPF_W:
602
			if (is_ereg(dst_reg))
603 604 605 606 607
				EMIT2(0x41, 0xC7);
			else
				EMIT1(0xC7);
			goto st;
		case BPF_ST | BPF_MEM | BPF_DW:
608
			EMIT2(add_1mod(0x48, dst_reg), 0xC7);
609 610

st:			if (is_imm8(insn->off))
611
				EMIT2(add_1reg(0x40, dst_reg), insn->off);
612
			else
613
				EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
614

615
			EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
616 617
			break;

618
			/* STX: *(u8*)(dst_reg + off) = src_reg */
619 620
		case BPF_STX | BPF_MEM | BPF_B:
			/* emit 'mov byte ptr [rax + off], al' */
621
			if (is_ereg(dst_reg) || is_ereg(src_reg) ||
622
			    /* have to add extra byte for x86 SIL, DIL regs */
623 624
			    src_reg == BPF_REG_1 || src_reg == BPF_REG_2)
				EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
625 626 627 628
			else
				EMIT1(0x88);
			goto stx;
		case BPF_STX | BPF_MEM | BPF_H:
629 630
			if (is_ereg(dst_reg) || is_ereg(src_reg))
				EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
631 632 633 634
			else
				EMIT2(0x66, 0x89);
			goto stx;
		case BPF_STX | BPF_MEM | BPF_W:
635 636
			if (is_ereg(dst_reg) || is_ereg(src_reg))
				EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
637 638 639 640
			else
				EMIT1(0x89);
			goto stx;
		case BPF_STX | BPF_MEM | BPF_DW:
641
			EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
642
stx:			if (is_imm8(insn->off))
643
				EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
644
			else
645
				EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
646 647 648
					    insn->off);
			break;

649
			/* LDX: dst_reg = *(u8*)(src_reg + off) */
650 651
		case BPF_LDX | BPF_MEM | BPF_B:
			/* emit 'movzx rax, byte ptr [rax + off]' */
652
			EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
653 654 655
			goto ldx;
		case BPF_LDX | BPF_MEM | BPF_H:
			/* emit 'movzx rax, word ptr [rax + off]' */
656
			EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
657 658 659
			goto ldx;
		case BPF_LDX | BPF_MEM | BPF_W:
			/* emit 'mov eax, dword ptr [rax+0x14]' */
660 661
			if (is_ereg(dst_reg) || is_ereg(src_reg))
				EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
662 663 664 665 666
			else
				EMIT1(0x8B);
			goto ldx;
		case BPF_LDX | BPF_MEM | BPF_DW:
			/* emit 'mov rax, qword ptr [rax+0x14]' */
667
			EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
668 669 670 671 672
ldx:			/* if insn->off == 0 we can save one extra byte, but
			 * special case of x86 r13 which always needs an offset
			 * is not worth the hassle
			 */
			if (is_imm8(insn->off))
673
				EMIT2(add_2reg(0x40, src_reg, dst_reg), insn->off);
674
			else
675
				EMIT1_off32(add_2reg(0x80, src_reg, dst_reg),
676 677 678
					    insn->off);
			break;

679
			/* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */
680 681
		case BPF_STX | BPF_XADD | BPF_W:
			/* emit 'lock add dword ptr [rax + off], eax' */
682 683
			if (is_ereg(dst_reg) || is_ereg(src_reg))
				EMIT3(0xF0, add_2mod(0x40, dst_reg, src_reg), 0x01);
684 685 686 687
			else
				EMIT2(0xF0, 0x01);
			goto xadd;
		case BPF_STX | BPF_XADD | BPF_DW:
688
			EMIT3(0xF0, add_2mod(0x48, dst_reg, src_reg), 0x01);
689
xadd:			if (is_imm8(insn->off))
690
				EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
691
			else
692
				EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
693 694 695 696 697
					    insn->off);
			break;

			/* call */
		case BPF_JMP | BPF_CALL:
698
			func = (u8 *) __bpf_call_base + imm32;
699 700 701 702 703 704 705 706 707
			jmp_offset = func - (image + addrs[i]);
			if (ctx->seen_ld_abs) {
				EMIT2(0x41, 0x52); /* push %r10 */
				EMIT2(0x41, 0x51); /* push %r9 */
				/* need to adjust jmp offset, since
				 * pop %r9, pop %r10 take 4 bytes after call insn
				 */
				jmp_offset += 4;
			}
708
			if (!imm32 || !is_simm32(jmp_offset)) {
709
				pr_err("unsupported bpf func %d addr %p image %p\n",
710
				       imm32, func, image);
711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726
				return -EINVAL;
			}
			EMIT1_off32(0xE8, jmp_offset);
			if (ctx->seen_ld_abs) {
				EMIT2(0x41, 0x59); /* pop %r9 */
				EMIT2(0x41, 0x5A); /* pop %r10 */
			}
			break;

			/* cond jump */
		case BPF_JMP | BPF_JEQ | BPF_X:
		case BPF_JMP | BPF_JNE | BPF_X:
		case BPF_JMP | BPF_JGT | BPF_X:
		case BPF_JMP | BPF_JGE | BPF_X:
		case BPF_JMP | BPF_JSGT | BPF_X:
		case BPF_JMP | BPF_JSGE | BPF_X:
727 728 729
			/* cmp dst_reg, src_reg */
			EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x39,
			      add_2reg(0xC0, dst_reg, src_reg));
730 731 732
			goto emit_cond_jmp;

		case BPF_JMP | BPF_JSET | BPF_X:
733 734 735
			/* test dst_reg, src_reg */
			EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x85,
			      add_2reg(0xC0, dst_reg, src_reg));
736 737 738
			goto emit_cond_jmp;

		case BPF_JMP | BPF_JSET | BPF_K:
739 740 741
			/* test dst_reg, imm32 */
			EMIT1(add_1mod(0x48, dst_reg));
			EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
742 743 744 745 746 747 748 749
			goto emit_cond_jmp;

		case BPF_JMP | BPF_JEQ | BPF_K:
		case BPF_JMP | BPF_JNE | BPF_K:
		case BPF_JMP | BPF_JGT | BPF_K:
		case BPF_JMP | BPF_JGE | BPF_K:
		case BPF_JMP | BPF_JSGT | BPF_K:
		case BPF_JMP | BPF_JSGE | BPF_K:
750 751
			/* cmp dst_reg, imm8/32 */
			EMIT1(add_1mod(0x48, dst_reg));
752

753 754
			if (is_imm8(imm32))
				EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
755
			else
756
				EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796

emit_cond_jmp:		/* convert BPF opcode to x86 */
			switch (BPF_OP(insn->code)) {
			case BPF_JEQ:
				jmp_cond = X86_JE;
				break;
			case BPF_JSET:
			case BPF_JNE:
				jmp_cond = X86_JNE;
				break;
			case BPF_JGT:
				/* GT is unsigned '>', JA in x86 */
				jmp_cond = X86_JA;
				break;
			case BPF_JGE:
				/* GE is unsigned '>=', JAE in x86 */
				jmp_cond = X86_JAE;
				break;
			case BPF_JSGT:
				/* signed '>', GT in x86 */
				jmp_cond = X86_JG;
				break;
			case BPF_JSGE:
				/* signed '>=', GE in x86 */
				jmp_cond = X86_JGE;
				break;
			default: /* to silence gcc warning */
				return -EFAULT;
			}
			jmp_offset = addrs[i + insn->off] - addrs[i];
			if (is_imm8(jmp_offset)) {
				EMIT2(jmp_cond, jmp_offset);
			} else if (is_simm32(jmp_offset)) {
				EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
			} else {
				pr_err("cond_jmp gen bug %llx\n", jmp_offset);
				return -EFAULT;
			}

			break;
797

798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817
		case BPF_JMP | BPF_JA:
			jmp_offset = addrs[i + insn->off] - addrs[i];
			if (!jmp_offset)
				/* optimize out nop jumps */
				break;
emit_jmp:
			if (is_imm8(jmp_offset)) {
				EMIT2(0xEB, jmp_offset);
			} else if (is_simm32(jmp_offset)) {
				EMIT1_off32(0xE9, jmp_offset);
			} else {
				pr_err("jmp gen bug %llx\n", jmp_offset);
				return -EFAULT;
			}
			break;

		case BPF_LD | BPF_IND | BPF_W:
			func = sk_load_word;
			goto common_load;
		case BPF_LD | BPF_ABS | BPF_W:
818
			func = CHOOSE_LOAD_FUNC(imm32, sk_load_word);
819 820 821 822
common_load:		ctx->seen_ld_abs = true;
			jmp_offset = func - (image + addrs[i]);
			if (!func || !is_simm32(jmp_offset)) {
				pr_err("unsupported bpf func %d addr %p image %p\n",
823
				       imm32, func, image);
824 825 826 827
				return -EINVAL;
			}
			if (BPF_MODE(insn->code) == BPF_ABS) {
				/* mov %esi, imm32 */
828
				EMIT1_off32(0xBE, imm32);
829
			} else {
830 831 832 833
				/* mov %rsi, src_reg */
				EMIT_mov(BPF_REG_2, src_reg);
				if (imm32) {
					if (is_imm8(imm32))
834
						/* add %esi, imm8 */
835
						EMIT3(0x83, 0xC6, imm32);
836
					else
837
						/* add %esi, imm32 */
838
						EMIT2_off32(0x81, 0xC6, imm32);
839
				}
840 841 842 843 844 845 846 847 848 849 850 851 852
			}
			/* skb pointer is in R6 (%rbx), it will be copied into
			 * %rdi if skb_copy_bits() call is necessary.
			 * sk_load_* helpers also use %r10 and %r9d.
			 * See bpf_jit.S
			 */
			EMIT1_off32(0xE8, jmp_offset); /* call */
			break;

		case BPF_LD | BPF_IND | BPF_H:
			func = sk_load_half;
			goto common_load;
		case BPF_LD | BPF_ABS | BPF_H:
853
			func = CHOOSE_LOAD_FUNC(imm32, sk_load_half);
854 855 856 857 858
			goto common_load;
		case BPF_LD | BPF_IND | BPF_B:
			func = sk_load_byte;
			goto common_load;
		case BPF_LD | BPF_ABS | BPF_B:
859
			func = CHOOSE_LOAD_FUNC(imm32, sk_load_byte);
860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881
			goto common_load;

		case BPF_JMP | BPF_EXIT:
			if (i != insn_cnt - 1) {
				jmp_offset = ctx->cleanup_addr - addrs[i];
				goto emit_jmp;
			}
			/* update cleanup_addr */
			ctx->cleanup_addr = proglen;
			/* mov rbx, qword ptr [rbp-X] */
			EMIT3_off32(0x48, 0x8B, 0x9D, -stacksize);
			/* mov r13, qword ptr [rbp-X] */
			EMIT3_off32(0x4C, 0x8B, 0xAD, -stacksize + 8);
			/* mov r14, qword ptr [rbp-X] */
			EMIT3_off32(0x4C, 0x8B, 0xB5, -stacksize + 16);
			/* mov r15, qword ptr [rbp-X] */
			EMIT3_off32(0x4C, 0x8B, 0xBD, -stacksize + 24);

			EMIT1(0xC9); /* leave */
			EMIT1(0xC3); /* ret */
			break;

882
		default:
883 884 885
			/* By design x64 JIT should support all BPF instructions
			 * This error will be seen if new instruction was added
			 * to interpreter, but not to JIT
886
			 * or if there is junk in bpf_prog
887 888
			 */
			pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
889 890
			return -EINVAL;
		}
891

892 893 894
		ilen = prog - temp;
		if (image) {
			if (unlikely(proglen + ilen > oldproglen)) {
895
				pr_err("bpf_jit_compile fatal error\n");
896
				return -EFAULT;
897
			}
898
			memcpy(image + proglen, temp, ilen);
899
		}
900 901 902 903 904 905 906
		proglen += ilen;
		addrs[i] = proglen;
		prog = temp;
	}
	return proglen;
}

907
void bpf_jit_compile(struct bpf_prog *prog)
908 909 910
{
}

911
void bpf_int_jit_compile(struct bpf_prog *prog)
912 913 914 915 916 917 918 919 920 921 922
{
	struct bpf_binary_header *header = NULL;
	int proglen, oldproglen = 0;
	struct jit_context ctx = {};
	u8 *image = NULL;
	int *addrs;
	int pass;
	int i;

	if (!bpf_jit_enable)
		return;
923

924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947
	if (!prog || !prog->len)
		return;

	addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL);
	if (!addrs)
		return;

	/* Before first pass, make a rough estimation of addrs[]
	 * each bpf instruction is translated to less than 64 bytes
	 */
	for (proglen = 0, i = 0; i < prog->len; i++) {
		proglen += 64;
		addrs[i] = proglen;
	}
	ctx.cleanup_addr = proglen;

	for (pass = 0; pass < 10; pass++) {
		proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
		if (proglen <= 0) {
			image = NULL;
			if (header)
				module_free(NULL, header);
			goto out;
		}
948
		if (image) {
949
			if (proglen != oldproglen)
950 951
				pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
				       proglen, oldproglen);
952 953 954
			break;
		}
		if (proglen == oldproglen) {
955 956
			header = bpf_alloc_binary(proglen, &image);
			if (!header)
957 958 959 960
				goto out;
		}
		oldproglen = proglen;
	}
961

962
	if (bpf_jit_enable > 1)
963
		bpf_jit_dump(prog->len, proglen, 0, image);
964 965

	if (image) {
966 967
		bpf_flush_icache(header, image + proglen);
		set_memory_ro((unsigned long)header, header->pages);
968 969
		prog->bpf_func = (void *)image;
		prog->jited = 1;
970 971 972 973 974
	}
out:
	kfree(addrs);
}

975 976
static void bpf_jit_free_deferred(struct work_struct *work)
{
977
	struct bpf_prog *fp = container_of(work, struct bpf_prog, work);
978 979 980 981 982 983 984 985
	unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
	struct bpf_binary_header *header = (void *)addr;

	set_memory_rw(addr, header->pages);
	module_free(NULL, header);
	kfree(fp);
}

986
void bpf_jit_free(struct bpf_prog *fp)
987
{
988
	if (fp->jited) {
989 990
		INIT_WORK(&fp->work, bpf_jit_free_deferred);
		schedule_work(&fp->work);
991 992
	} else {
		kfree(fp);
993
	}
994
}