bpf_jit_comp.c 43.4 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3
/*
 * bpf_jit_comp.c: BPF JIT compiler
4
 *
5
 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
6
 * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
7 8 9
 */
#include <linux/netdevice.h>
#include <linux/filter.h>
10
#include <linux/if_vlan.h>
11
#include <linux/bpf.h>
12
#include <linux/memory.h>
13
#include <asm/extable.h>
L
Laura Abbott 已提交
14
#include <asm/set_memory.h>
15
#include <asm/nospec-branch.h>
16
#include <asm/text-patching.h>
17

18
static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
19 20 21 22 23 24 25 26 27 28 29 30
{
	if (len == 1)
		*ptr = bytes;
	else if (len == 2)
		*(u16 *)ptr = bytes;
	else {
		*(u32 *)ptr = bytes;
		barrier();
	}
	return ptr + len;
}

31 32
#define EMIT(bytes, len) \
	do { prog = emit_code(prog, bytes, len); cnt += len; } while (0)
33 34 35 36 37

#define EMIT1(b1)		EMIT(b1, 1)
#define EMIT2(b1, b2)		EMIT((b1) + ((b2) << 8), 2)
#define EMIT3(b1, b2, b3)	EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
#define EMIT4(b1, b2, b3, b4)   EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
38

39
#define EMIT1_off32(b1, off) \
40
	do { EMIT1(b1); EMIT(off, 4); } while (0)
41
#define EMIT2_off32(b1, b2, off) \
42
	do { EMIT2(b1, b2); EMIT(off, 4); } while (0)
43
#define EMIT3_off32(b1, b2, b3, off) \
44
	do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
45
#define EMIT4_off32(b1, b2, b3, b4, off) \
46
	do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
47

48
static bool is_imm8(int value)
49 50 51 52
{
	return value <= 127 && value >= -128;
}

53
static bool is_simm32(s64 value)
54
{
55 56 57 58 59 60
	return value == (s64)(s32)value;
}

static bool is_uimm32(u64 value)
{
	return value == (u64)(u32)value;
61 62
}

63
/* mov dst, src */
64 65 66 67
#define EMIT_mov(DST, SRC)								 \
	do {										 \
		if (DST != SRC)								 \
			EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
	} while (0)

static int bpf_size_to_x86_bytes(int bpf_size)
{
	if (bpf_size == BPF_W)
		return 4;
	else if (bpf_size == BPF_H)
		return 2;
	else if (bpf_size == BPF_B)
		return 1;
	else if (bpf_size == BPF_DW)
		return 4; /* imm32 */
	else
		return 0;
}
83

84 85
/*
 * List of x86 cond jumps opcodes (. + s8)
86 87 88 89 90 91 92 93
 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
 */
#define X86_JB  0x72
#define X86_JAE 0x73
#define X86_JE  0x74
#define X86_JNE 0x75
#define X86_JBE 0x76
#define X86_JA  0x77
94
#define X86_JL  0x7C
95
#define X86_JGE 0x7D
96
#define X86_JLE 0x7E
97
#define X86_JG  0x7F
98

99
/* Pick a register outside of BPF range for JIT internal work */
100
#define AUX_REG (MAX_BPF_JIT_REG + 1)
A
Alexei Starovoitov 已提交
101
#define X86_REG_R9 (MAX_BPF_JIT_REG + 2)
102

103 104
/*
 * The following table maps BPF registers to x86-64 registers.
105
 *
106
 * x86-64 register R12 is unused, since if used as base address
107 108 109
 * register in load/store instructions, it always needs an
 * extra byte of encoding and is callee saved.
 *
A
Alexei Starovoitov 已提交
110 111
 * x86-64 register R9 is not used by BPF programs, but can be used by BPF
 * trampoline. x86-64 register R10 is used for blinding (if enabled).
112 113
 */
static const int reg2hex[] = {
114 115 116 117 118 119 120 121 122 123 124 125 126
	[BPF_REG_0] = 0,  /* RAX */
	[BPF_REG_1] = 7,  /* RDI */
	[BPF_REG_2] = 6,  /* RSI */
	[BPF_REG_3] = 2,  /* RDX */
	[BPF_REG_4] = 1,  /* RCX */
	[BPF_REG_5] = 0,  /* R8  */
	[BPF_REG_6] = 3,  /* RBX callee saved */
	[BPF_REG_7] = 5,  /* R13 callee saved */
	[BPF_REG_8] = 6,  /* R14 callee saved */
	[BPF_REG_9] = 7,  /* R15 callee saved */
	[BPF_REG_FP] = 5, /* RBP readonly */
	[BPF_REG_AX] = 2, /* R10 temp register */
	[AUX_REG] = 3,    /* R11 temp register */
A
Alexei Starovoitov 已提交
127
	[X86_REG_R9] = 1, /* R9 register, 6th function argument */
128 129
};

130 131 132 133 134 135 136 137 138 139 140 141 142
static const int reg2pt_regs[] = {
	[BPF_REG_0] = offsetof(struct pt_regs, ax),
	[BPF_REG_1] = offsetof(struct pt_regs, di),
	[BPF_REG_2] = offsetof(struct pt_regs, si),
	[BPF_REG_3] = offsetof(struct pt_regs, dx),
	[BPF_REG_4] = offsetof(struct pt_regs, cx),
	[BPF_REG_5] = offsetof(struct pt_regs, r8),
	[BPF_REG_6] = offsetof(struct pt_regs, bx),
	[BPF_REG_7] = offsetof(struct pt_regs, r13),
	[BPF_REG_8] = offsetof(struct pt_regs, r14),
	[BPF_REG_9] = offsetof(struct pt_regs, r15),
};

143 144
/*
 * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15
145 146 147
 * which need extra byte of encoding.
 * rax,rcx,...,rbp have simpler encoding
 */
148
static bool is_ereg(u32 reg)
149
{
150 151 152 153
	return (1 << reg) & (BIT(BPF_REG_5) |
			     BIT(AUX_REG) |
			     BIT(BPF_REG_7) |
			     BIT(BPF_REG_8) |
154
			     BIT(BPF_REG_9) |
A
Alexei Starovoitov 已提交
155
			     BIT(X86_REG_R9) |
156
			     BIT(BPF_REG_AX));
157 158
}

159 160 161 162 163
static bool is_axreg(u32 reg)
{
	return reg == BPF_REG_0;
}

164
/* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */
165
static u8 add_1mod(u8 byte, u32 reg)
166 167 168 169 170 171
{
	if (is_ereg(reg))
		byte |= 1;
	return byte;
}

172
static u8 add_2mod(u8 byte, u32 r1, u32 r2)
173 174 175 176 177 178 179 180
{
	if (is_ereg(r1))
		byte |= 1;
	if (is_ereg(r2))
		byte |= 4;
	return byte;
}

181
/* Encode 'dst_reg' register into x86-64 opcode 'byte' */
182
static u8 add_1reg(u8 byte, u32 dst_reg)
183
{
184
	return byte + reg2hex[dst_reg];
185 186
}

187
/* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */
188
static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
189
{
190
	return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
191 192
}

193 194
static void jit_fill_hole(void *area, unsigned int size)
{
195
	/* Fill whole space with INT3 instructions */
196 197 198
	memset(area, 0xcc, size);
}

199
struct jit_context {
200
	int cleanup_addr; /* Epilogue code offset */
201 202
};

203
/* Maximum number of bytes emitted while JITing one eBPF insn */
204 205
#define BPF_MAX_INSN_SIZE	128
#define BPF_INSN_SAFETY		64
206 207 208

/* Number of bytes emit_patch() needs to generate instructions */
#define X86_PATCH_SIZE		5
209

210
#define PROLOGUE_SIZE		25
211

212 213
/*
 * Emit x86-64 prologue code for BPF program and check its size.
214 215
 * bpf_tail_call helper will skip it while jumping into another program
 */
216
static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf)
217
{
218
	u8 *prog = *pprog;
219
	int cnt = X86_PATCH_SIZE;
220

221 222 223 224 225
	/* BPF trampoline can be made to work without these nops,
	 * but let's waste 5 bytes for now and optimize later
	 */
	memcpy(prog, ideal_nops[NOP_ATOMIC5], cnt);
	prog += cnt;
226 227 228 229 230 231 232 233
	EMIT1(0x55);             /* push rbp */
	EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
	/* sub rsp, rounded_stack_depth */
	EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8));
	EMIT1(0x53);             /* push rbx */
	EMIT2(0x41, 0x55);       /* push r13 */
	EMIT2(0x41, 0x56);       /* push r14 */
	EMIT2(0x41, 0x57);       /* push r15 */
234
	if (!ebpf_from_cbpf) {
235 236
		/* zero init tail_call_cnt */
		EMIT2(0x6a, 0x00);
237 238
		BUILD_BUG_ON(cnt != PROLOGUE_SIZE);
	}
239 240 241
	*pprog = prog;
}

242 243 244
/*
 * Generate the following code:
 *
245 246 247 248 249
 * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
 *   if (index >= array->map.max_entries)
 *     goto out;
 *   if (++tail_call_cnt > MAX_TAIL_CALL_CNT)
 *     goto out;
250
 *   prog = array->ptrs[index];
251 252 253 254 255 256 257 258 259 260 261
 *   if (prog == NULL)
 *     goto out;
 *   goto *(prog->bpf_func + prologue_size);
 * out:
 */
static void emit_bpf_tail_call(u8 **pprog)
{
	u8 *prog = *pprog;
	int label1, label2, label3;
	int cnt = 0;

262 263
	/*
	 * rdi - pointer to ctx
264 265 266 267
	 * rsi - pointer to bpf_array
	 * rdx - index in bpf_array
	 */

268 269 270
	/*
	 * if (index >= array->map.max_entries)
	 *	goto out;
271
	 */
272 273
	EMIT2(0x89, 0xD2);                        /* mov edx, edx */
	EMIT3(0x39, 0x56,                         /* cmp dword ptr [rsi + 16], edx */
274
	      offsetof(struct bpf_array, map.max_entries));
275
#define OFFSET1 (41 + RETPOLINE_RAX_BPF_JIT_SIZE) /* Number of bytes to jump */
276 277 278
	EMIT2(X86_JBE, OFFSET1);                  /* jbe out */
	label1 = cnt;

279 280 281
	/*
	 * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
	 *	goto out;
282
	 */
283
	EMIT2_off32(0x8B, 0x85, -36 - MAX_BPF_STACK); /* mov eax, dword ptr [rbp - 548] */
284
	EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT);     /* cmp eax, MAX_TAIL_CALL_CNT */
285
#define OFFSET2 (30 + RETPOLINE_RAX_BPF_JIT_SIZE)
286 287 288
	EMIT2(X86_JA, OFFSET2);                   /* ja out */
	label2 = cnt;
	EMIT3(0x83, 0xC0, 0x01);                  /* add eax, 1 */
289
	EMIT2_off32(0x89, 0x85, -36 - MAX_BPF_STACK); /* mov dword ptr [rbp -548], eax */
290

291
	/* prog = array->ptrs[index]; */
292
	EMIT4_off32(0x48, 0x8B, 0x84, 0xD6,       /* mov rax, [rsi + rdx * 8 + offsetof(...)] */
293
		    offsetof(struct bpf_array, ptrs));
294

295 296 297
	/*
	 * if (prog == NULL)
	 *	goto out;
298
	 */
299
	EMIT3(0x48, 0x85, 0xC0);		  /* test rax,rax */
300
#define OFFSET3 (8 + RETPOLINE_RAX_BPF_JIT_SIZE)
301 302 303 304 305 306 307 308
	EMIT2(X86_JE, OFFSET3);                   /* je out */
	label3 = cnt;

	/* goto *(prog->bpf_func + prologue_size); */
	EMIT4(0x48, 0x8B, 0x40,                   /* mov rax, qword ptr [rax + 32] */
	      offsetof(struct bpf_prog, bpf_func));
	EMIT4(0x48, 0x83, 0xC0, PROLOGUE_SIZE);   /* add rax, prologue_size */

309 310
	/*
	 * Wow we're ready to jump into next BPF program
311 312 313
	 * rdi == ctx (1st arg)
	 * rax == prog->bpf_func + prologue_size
	 */
314
	RETPOLINE_RAX_BPF_JIT();
315 316 317 318 319 320 321 322

	/* out: */
	BUILD_BUG_ON(cnt - label1 != OFFSET1);
	BUILD_BUG_ON(cnt - label2 != OFFSET2);
	BUILD_BUG_ON(cnt - label3 != OFFSET3);
	*pprog = prog;
}

323 324 325 326 327 328 329
static void emit_mov_imm32(u8 **pprog, bool sign_propagate,
			   u32 dst_reg, const u32 imm32)
{
	u8 *prog = *pprog;
	u8 b1, b2, b3;
	int cnt = 0;

330 331
	/*
	 * Optimization: if imm32 is positive, use 'mov %eax, imm32'
332 333 334 335 336 337 338 339 340 341 342
	 * (which zero-extends imm32) to save 2 bytes.
	 */
	if (sign_propagate && (s32)imm32 < 0) {
		/* 'mov %rax, imm32' sign extends imm32 */
		b1 = add_1mod(0x48, dst_reg);
		b2 = 0xC7;
		b3 = 0xC0;
		EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
		goto done;
	}

343 344
	/*
	 * Optimization: if imm32 is zero, use 'xor %eax, %eax'
345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370
	 * to save 3 bytes.
	 */
	if (imm32 == 0) {
		if (is_ereg(dst_reg))
			EMIT1(add_2mod(0x40, dst_reg, dst_reg));
		b2 = 0x31; /* xor */
		b3 = 0xC0;
		EMIT2(b2, add_2reg(b3, dst_reg, dst_reg));
		goto done;
	}

	/* mov %eax, imm32 */
	if (is_ereg(dst_reg))
		EMIT1(add_1mod(0x40, dst_reg));
	EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
done:
	*pprog = prog;
}

static void emit_mov_imm64(u8 **pprog, u32 dst_reg,
			   const u32 imm32_hi, const u32 imm32_lo)
{
	u8 *prog = *pprog;
	int cnt = 0;

	if (is_uimm32(((u64)imm32_hi << 32) | (u32)imm32_lo)) {
371 372
		/*
		 * For emitting plain u32, where sign bit must not be
373 374 375 376 377 378 379 380 381 382 383 384 385 386 387
		 * propagated LLVM tends to load imm64 over mov32
		 * directly, so save couple of bytes by just doing
		 * 'mov %eax, imm32' instead.
		 */
		emit_mov_imm32(&prog, false, dst_reg, imm32_lo);
	} else {
		/* movabsq %rax, imm64 */
		EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
		EMIT(imm32_lo, 4);
		EMIT(imm32_hi, 4);
	}

	*pprog = prog;
}

388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405
static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg)
{
	u8 *prog = *pprog;
	int cnt = 0;

	if (is64) {
		/* mov dst, src */
		EMIT_mov(dst_reg, src_reg);
	} else {
		/* mov32 dst, src */
		if (is_ereg(dst_reg) || is_ereg(src_reg))
			EMIT1(add_2mod(0x40, dst_reg, src_reg));
		EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
	}

	*pprog = prog;
}

406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483
/* LDX: dst_reg = *(u8*)(src_reg + off) */
static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
{
	u8 *prog = *pprog;
	int cnt = 0;

	switch (size) {
	case BPF_B:
		/* Emit 'movzx rax, byte ptr [rax + off]' */
		EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
		break;
	case BPF_H:
		/* Emit 'movzx rax, word ptr [rax + off]' */
		EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
		break;
	case BPF_W:
		/* Emit 'mov eax, dword ptr [rax+0x14]' */
		if (is_ereg(dst_reg) || is_ereg(src_reg))
			EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
		else
			EMIT1(0x8B);
		break;
	case BPF_DW:
		/* Emit 'mov rax, qword ptr [rax+0x14]' */
		EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
		break;
	}
	/*
	 * If insn->off == 0 we can save one extra byte, but
	 * special case of x86 R13 which always needs an offset
	 * is not worth the hassle
	 */
	if (is_imm8(off))
		EMIT2(add_2reg(0x40, src_reg, dst_reg), off);
	else
		EMIT1_off32(add_2reg(0x80, src_reg, dst_reg), off);
	*pprog = prog;
}

/* STX: *(u8*)(dst_reg + off) = src_reg */
static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
{
	u8 *prog = *pprog;
	int cnt = 0;

	switch (size) {
	case BPF_B:
		/* Emit 'mov byte ptr [rax + off], al' */
		if (is_ereg(dst_reg) || is_ereg(src_reg) ||
		    /* We have to add extra byte for x86 SIL, DIL regs */
		    src_reg == BPF_REG_1 || src_reg == BPF_REG_2)
			EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
		else
			EMIT1(0x88);
		break;
	case BPF_H:
		if (is_ereg(dst_reg) || is_ereg(src_reg))
			EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
		else
			EMIT2(0x66, 0x89);
		break;
	case BPF_W:
		if (is_ereg(dst_reg) || is_ereg(src_reg))
			EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
		else
			EMIT1(0x89);
		break;
	case BPF_DW:
		EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
		break;
	}
	if (is_imm8(off))
		EMIT2(add_2reg(0x40, dst_reg, src_reg), off);
	else
		EMIT1_off32(add_2reg(0x80, dst_reg, src_reg), off);
	*pprog = prog;
}

484
static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode)
485 486 487 488 489
{
	u8 *prog = *pprog;
	int cnt = 0;
	s64 offset;

490
	offset = func - (ip + X86_PATCH_SIZE);
491 492 493 494
	if (!is_simm32(offset)) {
		pr_err("Target call %p is out of range\n", func);
		return -EINVAL;
	}
495
	EMIT1_off32(opcode, offset);
496 497 498
	*pprog = prog;
	return 0;
}
499

500 501 502 503 504 505 506 507 508 509
static int emit_call(u8 **pprog, void *func, void *ip)
{
	return emit_patch(pprog, func, ip, 0xE8);
}

static int emit_jump(u8 **pprog, void *func, void *ip)
{
	return emit_patch(pprog, func, ip, 0xE9);
}

510 511 512
int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
		       void *old_addr, void *new_addr)
{
513 514 515
	int (*emit_patch_fn)(u8 **pprog, void *func, void *ip);
	u8 old_insn[X86_PATCH_SIZE] = {};
	u8 new_insn[X86_PATCH_SIZE] = {};
516 517 518
	u8 *prog;
	int ret;

519 520
	if (!is_kernel_text((long)ip) &&
	    !is_bpf_text_address((long)ip))
521
		/* BPF poking in modules is not supported */
522 523
		return -EINVAL;

524 525 526 527 528 529 530 531 532 533 534
	switch (t) {
	case BPF_MOD_NOP_TO_CALL ... BPF_MOD_CALL_TO_NOP:
		emit_patch_fn = emit_call;
		break;
	case BPF_MOD_NOP_TO_JUMP ... BPF_MOD_JUMP_TO_NOP:
		emit_patch_fn = emit_jump;
		break;
	default:
		return -ENOTSUPP;
	}

535 536
	if (old_addr) {
		prog = old_insn;
537
		ret = emit_patch_fn(&prog, old_addr, (void *)ip);
538 539 540 541 542
		if (ret)
			return ret;
	}
	if (new_addr) {
		prog = new_insn;
543
		ret = emit_patch_fn(&prog, new_addr, (void *)ip);
544 545 546
		if (ret)
			return ret;
	}
547

548 549 550 551
	ret = -EBUSY;
	mutex_lock(&text_mutex);
	switch (t) {
	case BPF_MOD_NOP_TO_CALL:
552 553
	case BPF_MOD_NOP_TO_JUMP:
		if (memcmp(ip, ideal_nops[NOP_ATOMIC5], X86_PATCH_SIZE))
554
			goto out;
555
		text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL);
556 557
		break;
	case BPF_MOD_CALL_TO_CALL:
558 559
	case BPF_MOD_JUMP_TO_JUMP:
		if (memcmp(ip, old_insn, X86_PATCH_SIZE))
560
			goto out;
561
		text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL);
562 563
		break;
	case BPF_MOD_CALL_TO_NOP:
564 565
	case BPF_MOD_JUMP_TO_NOP:
		if (memcmp(ip, old_insn, X86_PATCH_SIZE))
566
			goto out;
567 568
		text_poke_bp(ip, ideal_nops[NOP_ATOMIC5], X86_PATCH_SIZE,
			     NULL);
569 570 571 572 573 574 575 576
		break;
	}
	ret = 0;
out:
	mutex_unlock(&text_mutex);
	return ret;
}

577 578 579 580 581 582 583 584 585 586 587 588
static bool ex_handler_bpf(const struct exception_table_entry *x,
			   struct pt_regs *regs, int trapnr,
			   unsigned long error_code, unsigned long fault_addr)
{
	u32 reg = x->fixup >> 8;

	/* jump over faulting load and clear dest register */
	*(unsigned long *)((void *)regs + reg) = 0;
	regs->ip += x->fixup & 0xff;
	return true;
}

589 590 591 592 593 594 595
static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
		  int oldproglen, struct jit_context *ctx)
{
	struct bpf_insn *insn = bpf_prog->insnsi;
	int insn_cnt = bpf_prog->len;
	bool seen_exit = false;
	u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
596
	int i, cnt = 0, excnt = 0;
597 598 599
	int proglen = 0;
	u8 *prog = temp;

600 601
	emit_prologue(&prog, bpf_prog->aux->stack_depth,
		      bpf_prog_was_classic(bpf_prog));
602
	addrs[0] = prog - temp;
603

604
	for (i = 1; i <= insn_cnt; i++, insn++) {
605 606 607
		const s32 imm32 = insn->imm;
		u32 dst_reg = insn->dst_reg;
		u32 src_reg = insn->src_reg;
608
		u8 b2 = 0, b3 = 0;
609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631
		s64 jmp_offset;
		u8 jmp_cond;
		int ilen;
		u8 *func;

		switch (insn->code) {
			/* ALU */
		case BPF_ALU | BPF_ADD | BPF_X:
		case BPF_ALU | BPF_SUB | BPF_X:
		case BPF_ALU | BPF_AND | BPF_X:
		case BPF_ALU | BPF_OR | BPF_X:
		case BPF_ALU | BPF_XOR | BPF_X:
		case BPF_ALU64 | BPF_ADD | BPF_X:
		case BPF_ALU64 | BPF_SUB | BPF_X:
		case BPF_ALU64 | BPF_AND | BPF_X:
		case BPF_ALU64 | BPF_OR | BPF_X:
		case BPF_ALU64 | BPF_XOR | BPF_X:
			switch (BPF_OP(insn->code)) {
			case BPF_ADD: b2 = 0x01; break;
			case BPF_SUB: b2 = 0x29; break;
			case BPF_AND: b2 = 0x21; break;
			case BPF_OR: b2 = 0x09; break;
			case BPF_XOR: b2 = 0x31; break;
632
			}
633
			if (BPF_CLASS(insn->code) == BPF_ALU64)
634 635 636 637
				EMIT1(add_2mod(0x48, dst_reg, src_reg));
			else if (is_ereg(dst_reg) || is_ereg(src_reg))
				EMIT1(add_2mod(0x40, dst_reg, src_reg));
			EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
638
			break;
639

640 641
		case BPF_ALU64 | BPF_MOV | BPF_X:
		case BPF_ALU | BPF_MOV | BPF_X:
642 643 644
			emit_mov_reg(&prog,
				     BPF_CLASS(insn->code) == BPF_ALU64,
				     dst_reg, src_reg);
645
			break;
646

647
			/* neg dst */
648 649 650
		case BPF_ALU | BPF_NEG:
		case BPF_ALU64 | BPF_NEG:
			if (BPF_CLASS(insn->code) == BPF_ALU64)
651 652 653 654
				EMIT1(add_1mod(0x48, dst_reg));
			else if (is_ereg(dst_reg))
				EMIT1(add_1mod(0x40, dst_reg));
			EMIT2(0xF7, add_1reg(0xD8, dst_reg));
655 656 657 658 659 660 661 662 663 664 665 666 667
			break;

		case BPF_ALU | BPF_ADD | BPF_K:
		case BPF_ALU | BPF_SUB | BPF_K:
		case BPF_ALU | BPF_AND | BPF_K:
		case BPF_ALU | BPF_OR | BPF_K:
		case BPF_ALU | BPF_XOR | BPF_K:
		case BPF_ALU64 | BPF_ADD | BPF_K:
		case BPF_ALU64 | BPF_SUB | BPF_K:
		case BPF_ALU64 | BPF_AND | BPF_K:
		case BPF_ALU64 | BPF_OR | BPF_K:
		case BPF_ALU64 | BPF_XOR | BPF_K:
			if (BPF_CLASS(insn->code) == BPF_ALU64)
668 669 670
				EMIT1(add_1mod(0x48, dst_reg));
			else if (is_ereg(dst_reg))
				EMIT1(add_1mod(0x40, dst_reg));
671

672 673
			/*
			 * b3 holds 'normal' opcode, b2 short form only valid
674 675
			 * in case dst is eax/rax.
			 */
676
			switch (BPF_OP(insn->code)) {
677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696
			case BPF_ADD:
				b3 = 0xC0;
				b2 = 0x05;
				break;
			case BPF_SUB:
				b3 = 0xE8;
				b2 = 0x2D;
				break;
			case BPF_AND:
				b3 = 0xE0;
				b2 = 0x25;
				break;
			case BPF_OR:
				b3 = 0xC8;
				b2 = 0x0D;
				break;
			case BPF_XOR:
				b3 = 0xF0;
				b2 = 0x35;
				break;
697 698
			}

699 700
			if (is_imm8(imm32))
				EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
701 702
			else if (is_axreg(dst_reg))
				EMIT1_off32(b2, imm32);
703
			else
704
				EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
705 706 707 708
			break;

		case BPF_ALU64 | BPF_MOV | BPF_K:
		case BPF_ALU | BPF_MOV | BPF_K:
709 710
			emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64,
				       dst_reg, imm32);
711 712
			break;

713
		case BPF_LD | BPF_IMM | BPF_DW:
714
			emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm);
715 716 717 718
			insn++;
			i++;
			break;

719
			/* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
720 721 722 723 724 725 726 727 728 729 730 731
		case BPF_ALU | BPF_MOD | BPF_X:
		case BPF_ALU | BPF_DIV | BPF_X:
		case BPF_ALU | BPF_MOD | BPF_K:
		case BPF_ALU | BPF_DIV | BPF_K:
		case BPF_ALU64 | BPF_MOD | BPF_X:
		case BPF_ALU64 | BPF_DIV | BPF_X:
		case BPF_ALU64 | BPF_MOD | BPF_K:
		case BPF_ALU64 | BPF_DIV | BPF_K:
			EMIT1(0x50); /* push rax */
			EMIT1(0x52); /* push rdx */

			if (BPF_SRC(insn->code) == BPF_X)
732 733
				/* mov r11, src_reg */
				EMIT_mov(AUX_REG, src_reg);
734
			else
735 736
				/* mov r11, imm32 */
				EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
737

738 739
			/* mov rax, dst_reg */
			EMIT_mov(BPF_REG_0, dst_reg);
740

741 742
			/*
			 * xor edx, edx
743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763
			 * equivalent to 'xor rdx, rdx', but one byte less
			 */
			EMIT2(0x31, 0xd2);

			if (BPF_CLASS(insn->code) == BPF_ALU64)
				/* div r11 */
				EMIT3(0x49, 0xF7, 0xF3);
			else
				/* div r11d */
				EMIT3(0x41, 0xF7, 0xF3);

			if (BPF_OP(insn->code) == BPF_MOD)
				/* mov r11, rdx */
				EMIT3(0x49, 0x89, 0xD3);
			else
				/* mov r11, rax */
				EMIT3(0x49, 0x89, 0xC3);

			EMIT1(0x5A); /* pop rdx */
			EMIT1(0x58); /* pop rax */

764 765
			/* mov dst_reg, r11 */
			EMIT_mov(dst_reg, AUX_REG);
766 767 768 769 770 771
			break;

		case BPF_ALU | BPF_MUL | BPF_K:
		case BPF_ALU | BPF_MUL | BPF_X:
		case BPF_ALU64 | BPF_MUL | BPF_K:
		case BPF_ALU64 | BPF_MUL | BPF_X:
772 773 774
		{
			bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;

775 776 777 778
			if (dst_reg != BPF_REG_0)
				EMIT1(0x50); /* push rax */
			if (dst_reg != BPF_REG_3)
				EMIT1(0x52); /* push rdx */
779

780 781
			/* mov r11, dst_reg */
			EMIT_mov(AUX_REG, dst_reg);
782 783

			if (BPF_SRC(insn->code) == BPF_X)
784
				emit_mov_reg(&prog, is64, BPF_REG_0, src_reg);
785
			else
786
				emit_mov_imm32(&prog, is64, BPF_REG_0, imm32);
787

788
			if (is64)
789 790 791 792 793 794
				EMIT1(add_1mod(0x48, AUX_REG));
			else if (is_ereg(AUX_REG))
				EMIT1(add_1mod(0x40, AUX_REG));
			/* mul(q) r11 */
			EMIT2(0xF7, add_1reg(0xE0, AUX_REG));

795 796 797 798 799 800 801
			if (dst_reg != BPF_REG_3)
				EMIT1(0x5A); /* pop rdx */
			if (dst_reg != BPF_REG_0) {
				/* mov dst_reg, rax */
				EMIT_mov(dst_reg, BPF_REG_0);
				EMIT1(0x58); /* pop rax */
			}
802
			break;
803
		}
804
			/* Shifts */
805 806 807 808 809 810 811
		case BPF_ALU | BPF_LSH | BPF_K:
		case BPF_ALU | BPF_RSH | BPF_K:
		case BPF_ALU | BPF_ARSH | BPF_K:
		case BPF_ALU64 | BPF_LSH | BPF_K:
		case BPF_ALU64 | BPF_RSH | BPF_K:
		case BPF_ALU64 | BPF_ARSH | BPF_K:
			if (BPF_CLASS(insn->code) == BPF_ALU64)
812 813 814
				EMIT1(add_1mod(0x48, dst_reg));
			else if (is_ereg(dst_reg))
				EMIT1(add_1mod(0x40, dst_reg));
815 816 817 818 819 820

			switch (BPF_OP(insn->code)) {
			case BPF_LSH: b3 = 0xE0; break;
			case BPF_RSH: b3 = 0xE8; break;
			case BPF_ARSH: b3 = 0xF8; break;
			}
821 822 823 824 825

			if (imm32 == 1)
				EMIT2(0xD1, add_1reg(b3, dst_reg));
			else
				EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
826 827
			break;

828 829 830 831 832 833 834
		case BPF_ALU | BPF_LSH | BPF_X:
		case BPF_ALU | BPF_RSH | BPF_X:
		case BPF_ALU | BPF_ARSH | BPF_X:
		case BPF_ALU64 | BPF_LSH | BPF_X:
		case BPF_ALU64 | BPF_RSH | BPF_X:
		case BPF_ALU64 | BPF_ARSH | BPF_X:

835
			/* Check for bad case when dst_reg == rcx */
836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869
			if (dst_reg == BPF_REG_4) {
				/* mov r11, dst_reg */
				EMIT_mov(AUX_REG, dst_reg);
				dst_reg = AUX_REG;
			}

			if (src_reg != BPF_REG_4) { /* common case */
				EMIT1(0x51); /* push rcx */

				/* mov rcx, src_reg */
				EMIT_mov(BPF_REG_4, src_reg);
			}

			/* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
			if (BPF_CLASS(insn->code) == BPF_ALU64)
				EMIT1(add_1mod(0x48, dst_reg));
			else if (is_ereg(dst_reg))
				EMIT1(add_1mod(0x40, dst_reg));

			switch (BPF_OP(insn->code)) {
			case BPF_LSH: b3 = 0xE0; break;
			case BPF_RSH: b3 = 0xE8; break;
			case BPF_ARSH: b3 = 0xF8; break;
			}
			EMIT2(0xD3, add_1reg(b3, dst_reg));

			if (src_reg != BPF_REG_4)
				EMIT1(0x59); /* pop rcx */

			if (insn->dst_reg == BPF_REG_4)
				/* mov dst_reg, r11 */
				EMIT_mov(insn->dst_reg, AUX_REG);
			break;

870
		case BPF_ALU | BPF_END | BPF_FROM_BE:
871
			switch (imm32) {
872
			case 16:
873
				/* Emit 'ror %ax, 8' to swap lower 2 bytes */
874
				EMIT1(0x66);
875
				if (is_ereg(dst_reg))
876
					EMIT1(0x41);
877
				EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
878

879
				/* Emit 'movzwl eax, ax' */
880 881 882 883 884
				if (is_ereg(dst_reg))
					EMIT3(0x45, 0x0F, 0xB7);
				else
					EMIT2(0x0F, 0xB7);
				EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
885 886
				break;
			case 32:
887
				/* Emit 'bswap eax' to swap lower 4 bytes */
888
				if (is_ereg(dst_reg))
889
					EMIT2(0x41, 0x0F);
890
				else
891
					EMIT1(0x0F);
892
				EMIT1(add_1reg(0xC8, dst_reg));
893
				break;
894
			case 64:
895
				/* Emit 'bswap rax' to swap 8 bytes */
896 897
				EMIT3(add_1mod(0x48, dst_reg), 0x0F,
				      add_1reg(0xC8, dst_reg));
898 899
				break;
			}
900 901 902
			break;

		case BPF_ALU | BPF_END | BPF_FROM_LE:
903 904
			switch (imm32) {
			case 16:
905 906
				/*
				 * Emit 'movzwl eax, ax' to zero extend 16-bit
907 908 909 910 911 912 913 914 915
				 * into 64 bit
				 */
				if (is_ereg(dst_reg))
					EMIT3(0x45, 0x0F, 0xB7);
				else
					EMIT2(0x0F, 0xB7);
				EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
				break;
			case 32:
916
				/* Emit 'mov eax, eax' to clear upper 32-bits */
917 918 919 920 921 922 923 924
				if (is_ereg(dst_reg))
					EMIT1(0x45);
				EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
				break;
			case 64:
				/* nop */
				break;
			}
925 926
			break;

927
			/* ST: *(u8*)(dst_reg + off) = imm */
928
		case BPF_ST | BPF_MEM | BPF_B:
929
			if (is_ereg(dst_reg))
930 931 932 933 934
				EMIT2(0x41, 0xC6);
			else
				EMIT1(0xC6);
			goto st;
		case BPF_ST | BPF_MEM | BPF_H:
935
			if (is_ereg(dst_reg))
936 937 938 939 940
				EMIT3(0x66, 0x41, 0xC7);
			else
				EMIT2(0x66, 0xC7);
			goto st;
		case BPF_ST | BPF_MEM | BPF_W:
941
			if (is_ereg(dst_reg))
942 943 944 945 946
				EMIT2(0x41, 0xC7);
			else
				EMIT1(0xC7);
			goto st;
		case BPF_ST | BPF_MEM | BPF_DW:
947
			EMIT2(add_1mod(0x48, dst_reg), 0xC7);
948 949

st:			if (is_imm8(insn->off))
950
				EMIT2(add_1reg(0x40, dst_reg), insn->off);
951
			else
952
				EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
953

954
			EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
955 956
			break;

957
			/* STX: *(u8*)(dst_reg + off) = src_reg */
958 959 960 961
		case BPF_STX | BPF_MEM | BPF_B:
		case BPF_STX | BPF_MEM | BPF_H:
		case BPF_STX | BPF_MEM | BPF_W:
		case BPF_STX | BPF_MEM | BPF_DW:
962
			emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
963 964
			break;

965
			/* LDX: dst_reg = *(u8*)(src_reg + off) */
966
		case BPF_LDX | BPF_MEM | BPF_B:
967
		case BPF_LDX | BPF_PROBE_MEM | BPF_B:
968
		case BPF_LDX | BPF_MEM | BPF_H:
969
		case BPF_LDX | BPF_PROBE_MEM | BPF_H:
970
		case BPF_LDX | BPF_MEM | BPF_W:
971
		case BPF_LDX | BPF_PROBE_MEM | BPF_W:
972
		case BPF_LDX | BPF_MEM | BPF_DW:
973
		case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
974
			emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016
			if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
				struct exception_table_entry *ex;
				u8 *_insn = image + proglen;
				s64 delta;

				if (!bpf_prog->aux->extable)
					break;

				if (excnt >= bpf_prog->aux->num_exentries) {
					pr_err("ex gen bug\n");
					return -EFAULT;
				}
				ex = &bpf_prog->aux->extable[excnt++];

				delta = _insn - (u8 *)&ex->insn;
				if (!is_simm32(delta)) {
					pr_err("extable->insn doesn't fit into 32-bit\n");
					return -EFAULT;
				}
				ex->insn = delta;

				delta = (u8 *)ex_handler_bpf - (u8 *)&ex->handler;
				if (!is_simm32(delta)) {
					pr_err("extable->handler doesn't fit into 32-bit\n");
					return -EFAULT;
				}
				ex->handler = delta;

				if (dst_reg > BPF_REG_9) {
					pr_err("verifier error\n");
					return -EFAULT;
				}
				/*
				 * Compute size of x86 insn and its target dest x86 register.
				 * ex_handler_bpf() will use lower 8 bits to adjust
				 * pt_regs->ip to jump over this x86 instruction
				 * and upper bits to figure out which pt_regs to zero out.
				 * End result: x86 insn "mov rbx, qword ptr [rax+0x14]"
				 * of 4 bytes will be ignored and rbx will be zero inited.
				 */
				ex->fixup = (prog - temp) | (reg2pt_regs[dst_reg] << 8);
			}
1017 1018
			break;

1019
			/* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */
1020
		case BPF_STX | BPF_XADD | BPF_W:
1021
			/* Emit 'lock add dword ptr [rax + off], eax' */
1022 1023
			if (is_ereg(dst_reg) || is_ereg(src_reg))
				EMIT3(0xF0, add_2mod(0x40, dst_reg, src_reg), 0x01);
1024 1025 1026 1027
			else
				EMIT2(0xF0, 0x01);
			goto xadd;
		case BPF_STX | BPF_XADD | BPF_DW:
1028
			EMIT3(0xF0, add_2mod(0x48, dst_reg, src_reg), 0x01);
1029
xadd:			if (is_imm8(insn->off))
1030
				EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
1031
			else
1032
				EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
1033 1034 1035 1036 1037
					    insn->off);
			break;

			/* call */
		case BPF_JMP | BPF_CALL:
1038
			func = (u8 *) __bpf_call_base + imm32;
1039
			if (!imm32 || emit_call(&prog, func, image + addrs[i - 1]))
1040 1041 1042
				return -EINVAL;
			break;

1043
		case BPF_JMP | BPF_TAIL_CALL:
1044 1045 1046
			emit_bpf_tail_call(&prog);
			break;

1047 1048 1049 1050
			/* cond jump */
		case BPF_JMP | BPF_JEQ | BPF_X:
		case BPF_JMP | BPF_JNE | BPF_X:
		case BPF_JMP | BPF_JGT | BPF_X:
1051
		case BPF_JMP | BPF_JLT | BPF_X:
1052
		case BPF_JMP | BPF_JGE | BPF_X:
1053
		case BPF_JMP | BPF_JLE | BPF_X:
1054
		case BPF_JMP | BPF_JSGT | BPF_X:
1055
		case BPF_JMP | BPF_JSLT | BPF_X:
1056
		case BPF_JMP | BPF_JSGE | BPF_X:
1057
		case BPF_JMP | BPF_JSLE | BPF_X:
1058 1059 1060 1061 1062 1063 1064 1065 1066 1067
		case BPF_JMP32 | BPF_JEQ | BPF_X:
		case BPF_JMP32 | BPF_JNE | BPF_X:
		case BPF_JMP32 | BPF_JGT | BPF_X:
		case BPF_JMP32 | BPF_JLT | BPF_X:
		case BPF_JMP32 | BPF_JGE | BPF_X:
		case BPF_JMP32 | BPF_JLE | BPF_X:
		case BPF_JMP32 | BPF_JSGT | BPF_X:
		case BPF_JMP32 | BPF_JSLT | BPF_X:
		case BPF_JMP32 | BPF_JSGE | BPF_X:
		case BPF_JMP32 | BPF_JSLE | BPF_X:
1068
			/* cmp dst_reg, src_reg */
1069 1070 1071 1072 1073
			if (BPF_CLASS(insn->code) == BPF_JMP)
				EMIT1(add_2mod(0x48, dst_reg, src_reg));
			else if (is_ereg(dst_reg) || is_ereg(src_reg))
				EMIT1(add_2mod(0x40, dst_reg, src_reg));
			EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg));
1074 1075 1076
			goto emit_cond_jmp;

		case BPF_JMP | BPF_JSET | BPF_X:
1077
		case BPF_JMP32 | BPF_JSET | BPF_X:
1078
			/* test dst_reg, src_reg */
1079 1080 1081 1082 1083
			if (BPF_CLASS(insn->code) == BPF_JMP)
				EMIT1(add_2mod(0x48, dst_reg, src_reg));
			else if (is_ereg(dst_reg) || is_ereg(src_reg))
				EMIT1(add_2mod(0x40, dst_reg, src_reg));
			EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg));
1084 1085 1086
			goto emit_cond_jmp;

		case BPF_JMP | BPF_JSET | BPF_K:
1087
		case BPF_JMP32 | BPF_JSET | BPF_K:
1088
			/* test dst_reg, imm32 */
1089 1090 1091 1092
			if (BPF_CLASS(insn->code) == BPF_JMP)
				EMIT1(add_1mod(0x48, dst_reg));
			else if (is_ereg(dst_reg))
				EMIT1(add_1mod(0x40, dst_reg));
1093
			EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
1094 1095 1096 1097 1098
			goto emit_cond_jmp;

		case BPF_JMP | BPF_JEQ | BPF_K:
		case BPF_JMP | BPF_JNE | BPF_K:
		case BPF_JMP | BPF_JGT | BPF_K:
1099
		case BPF_JMP | BPF_JLT | BPF_K:
1100
		case BPF_JMP | BPF_JGE | BPF_K:
1101
		case BPF_JMP | BPF_JLE | BPF_K:
1102
		case BPF_JMP | BPF_JSGT | BPF_K:
1103
		case BPF_JMP | BPF_JSLT | BPF_K:
1104
		case BPF_JMP | BPF_JSGE | BPF_K:
1105
		case BPF_JMP | BPF_JSLE | BPF_K:
1106 1107 1108 1109 1110 1111 1112 1113 1114 1115
		case BPF_JMP32 | BPF_JEQ | BPF_K:
		case BPF_JMP32 | BPF_JNE | BPF_K:
		case BPF_JMP32 | BPF_JGT | BPF_K:
		case BPF_JMP32 | BPF_JLT | BPF_K:
		case BPF_JMP32 | BPF_JGE | BPF_K:
		case BPF_JMP32 | BPF_JLE | BPF_K:
		case BPF_JMP32 | BPF_JSGT | BPF_K:
		case BPF_JMP32 | BPF_JSLT | BPF_K:
		case BPF_JMP32 | BPF_JSGE | BPF_K:
		case BPF_JMP32 | BPF_JSLE | BPF_K:
1116 1117 1118 1119 1120 1121 1122 1123 1124 1125
			/* test dst_reg, dst_reg to save one extra byte */
			if (imm32 == 0) {
				if (BPF_CLASS(insn->code) == BPF_JMP)
					EMIT1(add_2mod(0x48, dst_reg, dst_reg));
				else if (is_ereg(dst_reg))
					EMIT1(add_2mod(0x40, dst_reg, dst_reg));
				EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg));
				goto emit_cond_jmp;
			}

1126
			/* cmp dst_reg, imm8/32 */
1127 1128 1129 1130
			if (BPF_CLASS(insn->code) == BPF_JMP)
				EMIT1(add_1mod(0x48, dst_reg));
			else if (is_ereg(dst_reg))
				EMIT1(add_1mod(0x40, dst_reg));
1131

1132 1133
			if (is_imm8(imm32))
				EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
1134
			else
1135
				EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
1136

1137
emit_cond_jmp:		/* Convert BPF opcode to x86 */
1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149
			switch (BPF_OP(insn->code)) {
			case BPF_JEQ:
				jmp_cond = X86_JE;
				break;
			case BPF_JSET:
			case BPF_JNE:
				jmp_cond = X86_JNE;
				break;
			case BPF_JGT:
				/* GT is unsigned '>', JA in x86 */
				jmp_cond = X86_JA;
				break;
1150 1151 1152 1153
			case BPF_JLT:
				/* LT is unsigned '<', JB in x86 */
				jmp_cond = X86_JB;
				break;
1154 1155 1156 1157
			case BPF_JGE:
				/* GE is unsigned '>=', JAE in x86 */
				jmp_cond = X86_JAE;
				break;
1158 1159 1160 1161
			case BPF_JLE:
				/* LE is unsigned '<=', JBE in x86 */
				jmp_cond = X86_JBE;
				break;
1162
			case BPF_JSGT:
1163
				/* Signed '>', GT in x86 */
1164 1165
				jmp_cond = X86_JG;
				break;
1166
			case BPF_JSLT:
1167
				/* Signed '<', LT in x86 */
1168 1169
				jmp_cond = X86_JL;
				break;
1170
			case BPF_JSGE:
1171
				/* Signed '>=', GE in x86 */
1172 1173
				jmp_cond = X86_JGE;
				break;
1174
			case BPF_JSLE:
1175
				/* Signed '<=', LE in x86 */
1176 1177
				jmp_cond = X86_JLE;
				break;
1178
			default: /* to silence GCC warning */
1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191
				return -EFAULT;
			}
			jmp_offset = addrs[i + insn->off] - addrs[i];
			if (is_imm8(jmp_offset)) {
				EMIT2(jmp_cond, jmp_offset);
			} else if (is_simm32(jmp_offset)) {
				EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
			} else {
				pr_err("cond_jmp gen bug %llx\n", jmp_offset);
				return -EFAULT;
			}

			break;
1192

1193
		case BPF_JMP | BPF_JA:
1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204
			if (insn->off == -1)
				/* -1 jmp instructions will always jump
				 * backwards two bytes. Explicitly handling
				 * this case avoids wasting too many passes
				 * when there are long sequences of replaced
				 * dead code.
				 */
				jmp_offset = -2;
			else
				jmp_offset = addrs[i + insn->off] - addrs[i];

1205
			if (!jmp_offset)
1206
				/* Optimize out nop jumps */
1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219
				break;
emit_jmp:
			if (is_imm8(jmp_offset)) {
				EMIT2(0xEB, jmp_offset);
			} else if (is_simm32(jmp_offset)) {
				EMIT1_off32(0xE9, jmp_offset);
			} else {
				pr_err("jmp gen bug %llx\n", jmp_offset);
				return -EFAULT;
			}
			break;

		case BPF_JMP | BPF_EXIT:
1220
			if (seen_exit) {
1221 1222 1223
				jmp_offset = ctx->cleanup_addr - addrs[i];
				goto emit_jmp;
			}
1224
			seen_exit = true;
1225
			/* Update cleanup_addr */
1226
			ctx->cleanup_addr = proglen;
1227 1228 1229 1230 1231 1232 1233 1234
			if (!bpf_prog_was_classic(bpf_prog))
				EMIT1(0x5B); /* get rid of tail_call_cnt */
			EMIT2(0x41, 0x5F);   /* pop r15 */
			EMIT2(0x41, 0x5E);   /* pop r14 */
			EMIT2(0x41, 0x5D);   /* pop r13 */
			EMIT1(0x5B);         /* pop rbx */
			EMIT1(0xC9);         /* leave */
			EMIT1(0xC3);         /* ret */
1235 1236
			break;

1237
		default:
1238 1239
			/*
			 * By design x86-64 JIT should support all BPF instructions.
1240
			 * This error will be seen if new instruction was added
1241 1242
			 * to the interpreter, but not to the JIT, or if there is
			 * junk in bpf_prog.
1243 1244
			 */
			pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
1245 1246
			return -EINVAL;
		}
1247

1248
		ilen = prog - temp;
1249
		if (ilen > BPF_MAX_INSN_SIZE) {
1250
			pr_err("bpf_jit: fatal insn size error\n");
1251 1252 1253
			return -EFAULT;
		}

1254 1255
		if (image) {
			if (unlikely(proglen + ilen > oldproglen)) {
1256
				pr_err("bpf_jit: fatal error\n");
1257
				return -EFAULT;
1258
			}
1259
			memcpy(image + proglen, temp, ilen);
1260
		}
1261 1262 1263 1264
		proglen += ilen;
		addrs[i] = proglen;
		prog = temp;
	}
1265 1266 1267 1268 1269

	if (image && excnt != bpf_prog->aux->num_exentries) {
		pr_err("extable is not populated\n");
		return -EFAULT;
	}
1270 1271 1272
	return proglen;
}

A
Alexei Starovoitov 已提交
1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424
static void save_regs(struct btf_func_model *m, u8 **prog, int nr_args,
		      int stack_size)
{
	int i;
	/* Store function arguments to stack.
	 * For a function that accepts two pointers the sequence will be:
	 * mov QWORD PTR [rbp-0x10],rdi
	 * mov QWORD PTR [rbp-0x8],rsi
	 */
	for (i = 0; i < min(nr_args, 6); i++)
		emit_stx(prog, bytes_to_bpf_size(m->arg_size[i]),
			 BPF_REG_FP,
			 i == 5 ? X86_REG_R9 : BPF_REG_1 + i,
			 -(stack_size - i * 8));
}

static void restore_regs(struct btf_func_model *m, u8 **prog, int nr_args,
			 int stack_size)
{
	int i;

	/* Restore function arguments from stack.
	 * For a function that accepts two pointers the sequence will be:
	 * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10]
	 * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8]
	 */
	for (i = 0; i < min(nr_args, 6); i++)
		emit_ldx(prog, bytes_to_bpf_size(m->arg_size[i]),
			 i == 5 ? X86_REG_R9 : BPF_REG_1 + i,
			 BPF_REG_FP,
			 -(stack_size - i * 8));
}

static int invoke_bpf(struct btf_func_model *m, u8 **pprog,
		      struct bpf_prog **progs, int prog_cnt, int stack_size)
{
	u8 *prog = *pprog;
	int cnt = 0, i;

	for (i = 0; i < prog_cnt; i++) {
		if (emit_call(&prog, __bpf_prog_enter, prog))
			return -EINVAL;
		/* remember prog start time returned by __bpf_prog_enter */
		emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);

		/* arg1: lea rdi, [rbp - stack_size] */
		EMIT4(0x48, 0x8D, 0x7D, -stack_size);
		/* arg2: progs[i]->insnsi for interpreter */
		if (!progs[i]->jited)
			emit_mov_imm64(&prog, BPF_REG_2,
				       (long) progs[i]->insnsi >> 32,
				       (u32) (long) progs[i]->insnsi);
		/* call JITed bpf program or interpreter */
		if (emit_call(&prog, progs[i]->bpf_func, prog))
			return -EINVAL;

		/* arg1: mov rdi, progs[i] */
		emit_mov_imm64(&prog, BPF_REG_1, (long) progs[i] >> 32,
			       (u32) (long) progs[i]);
		/* arg2: mov rsi, rbx <- start time in nsec */
		emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
		if (emit_call(&prog, __bpf_prog_exit, prog))
			return -EINVAL;
	}
	*pprog = prog;
	return 0;
}

/* Example:
 * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
 * its 'struct btf_func_model' will be nr_args=2
 * The assembly code when eth_type_trans is executing after trampoline:
 *
 * push rbp
 * mov rbp, rsp
 * sub rsp, 16                     // space for skb and dev
 * push rbx                        // temp regs to pass start time
 * mov qword ptr [rbp - 16], rdi   // save skb pointer to stack
 * mov qword ptr [rbp - 8], rsi    // save dev pointer to stack
 * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
 * mov rbx, rax                    // remember start time in bpf stats are enabled
 * lea rdi, [rbp - 16]             // R1==ctx of bpf prog
 * call addr_of_jited_FENTRY_prog
 * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
 * mov rsi, rbx                    // prog start time
 * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
 * mov rdi, qword ptr [rbp - 16]   // restore skb pointer from stack
 * mov rsi, qword ptr [rbp - 8]    // restore dev pointer from stack
 * pop rbx
 * leave
 * ret
 *
 * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be
 * replaced with 'call generated_bpf_trampoline'. When it returns
 * eth_type_trans will continue executing with original skb and dev pointers.
 *
 * The assembly code when eth_type_trans is called from trampoline:
 *
 * push rbp
 * mov rbp, rsp
 * sub rsp, 24                     // space for skb, dev, return value
 * push rbx                        // temp regs to pass start time
 * mov qword ptr [rbp - 24], rdi   // save skb pointer to stack
 * mov qword ptr [rbp - 16], rsi   // save dev pointer to stack
 * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
 * mov rbx, rax                    // remember start time if bpf stats are enabled
 * lea rdi, [rbp - 24]             // R1==ctx of bpf prog
 * call addr_of_jited_FENTRY_prog  // bpf prog can access skb and dev
 * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
 * mov rsi, rbx                    // prog start time
 * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
 * mov rdi, qword ptr [rbp - 24]   // restore skb pointer from stack
 * mov rsi, qword ptr [rbp - 16]   // restore dev pointer from stack
 * call eth_type_trans+5           // execute body of eth_type_trans
 * mov qword ptr [rbp - 8], rax    // save return value
 * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
 * mov rbx, rax                    // remember start time in bpf stats are enabled
 * lea rdi, [rbp - 24]             // R1==ctx of bpf prog
 * call addr_of_jited_FEXIT_prog   // bpf prog can access skb, dev, return value
 * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
 * mov rsi, rbx                    // prog start time
 * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
 * mov rax, qword ptr [rbp - 8]    // restore eth_type_trans's return value
 * pop rbx
 * leave
 * add rsp, 8                      // skip eth_type_trans's frame
 * ret                             // return to its caller
 */
int arch_prepare_bpf_trampoline(void *image, struct btf_func_model *m, u32 flags,
				struct bpf_prog **fentry_progs, int fentry_cnt,
				struct bpf_prog **fexit_progs, int fexit_cnt,
				void *orig_call)
{
	int cnt = 0, nr_args = m->nr_args;
	int stack_size = nr_args * 8;
	u8 *prog;

	/* x86-64 supports up to 6 arguments. 7+ can be added in the future */
	if (nr_args > 6)
		return -ENOTSUPP;

	if ((flags & BPF_TRAMP_F_RESTORE_REGS) &&
	    (flags & BPF_TRAMP_F_SKIP_FRAME))
		return -EINVAL;

	if (flags & BPF_TRAMP_F_CALL_ORIG)
		stack_size += 8; /* room for return value of orig_call */

	if (flags & BPF_TRAMP_F_SKIP_FRAME)
		/* skip patched call instruction and point orig_call to actual
		 * body of the kernel function.
		 */
1425
		orig_call += X86_PATCH_SIZE;
A
Alexei Starovoitov 已提交
1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476

	prog = image;

	EMIT1(0x55);		 /* push rbp */
	EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
	EMIT4(0x48, 0x83, 0xEC, stack_size); /* sub rsp, stack_size */
	EMIT1(0x53);		 /* push rbx */

	save_regs(m, &prog, nr_args, stack_size);

	if (fentry_cnt)
		if (invoke_bpf(m, &prog, fentry_progs, fentry_cnt, stack_size))
			return -EINVAL;

	if (flags & BPF_TRAMP_F_CALL_ORIG) {
		if (fentry_cnt)
			restore_regs(m, &prog, nr_args, stack_size);

		/* call original function */
		if (emit_call(&prog, orig_call, prog))
			return -EINVAL;
		/* remember return value in a stack for bpf prog to access */
		emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
	}

	if (fexit_cnt)
		if (invoke_bpf(m, &prog, fexit_progs, fexit_cnt, stack_size))
			return -EINVAL;

	if (flags & BPF_TRAMP_F_RESTORE_REGS)
		restore_regs(m, &prog, nr_args, stack_size);

	if (flags & BPF_TRAMP_F_CALL_ORIG)
		/* restore original return value back into RAX */
		emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);

	EMIT1(0x5B); /* pop rbx */
	EMIT1(0xC9); /* leave */
	if (flags & BPF_TRAMP_F_SKIP_FRAME)
		/* skip our return address and return to parent */
		EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */
	EMIT1(0xC3); /* ret */
	/* One half of the page has active running trampoline.
	 * Another half is an area for next trampoline.
	 * Make sure the trampoline generation logic doesn't overflow.
	 */
	if (WARN_ON_ONCE(prog - (u8 *)image > PAGE_SIZE / 2 - BPF_INSN_SAFETY))
		return -EFAULT;
	return 0;
}

1477 1478 1479 1480 1481 1482 1483 1484
struct x64_jit_data {
	struct bpf_binary_header *header;
	int *addrs;
	u8 *image;
	int proglen;
	struct jit_context ctx;
};

1485
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1486 1487
{
	struct bpf_binary_header *header = NULL;
1488
	struct bpf_prog *tmp, *orig_prog = prog;
1489
	struct x64_jit_data *jit_data;
1490 1491
	int proglen, oldproglen = 0;
	struct jit_context ctx = {};
1492
	bool tmp_blinded = false;
1493
	bool extra_pass = false;
1494 1495 1496 1497 1498
	u8 *image = NULL;
	int *addrs;
	int pass;
	int i;

1499
	if (!prog->jit_requested)
1500 1501 1502
		return orig_prog;

	tmp = bpf_jit_blind_constants(prog);
1503 1504
	/*
	 * If blinding was requested and we failed during blinding,
1505 1506 1507 1508 1509 1510 1511 1512
	 * we must fall back to the interpreter.
	 */
	if (IS_ERR(tmp))
		return orig_prog;
	if (tmp != prog) {
		tmp_blinded = true;
		prog = tmp;
	}
1513

1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531
	jit_data = prog->aux->jit_data;
	if (!jit_data) {
		jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
		if (!jit_data) {
			prog = orig_prog;
			goto out;
		}
		prog->aux->jit_data = jit_data;
	}
	addrs = jit_data->addrs;
	if (addrs) {
		ctx = jit_data->ctx;
		oldproglen = jit_data->proglen;
		image = jit_data->image;
		header = jit_data->header;
		extra_pass = true;
		goto skip_init_addrs;
	}
1532
	addrs = kmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
1533 1534
	if (!addrs) {
		prog = orig_prog;
1535
		goto out_addrs;
1536
	}
1537

1538 1539 1540
	/*
	 * Before first pass, make a rough estimation of addrs[]
	 * each BPF instruction is translated to less than 64 bytes
1541
	 */
1542
	for (proglen = 0, i = 0; i <= prog->len; i++) {
1543 1544 1545 1546
		proglen += 64;
		addrs[i] = proglen;
	}
	ctx.cleanup_addr = proglen;
1547
skip_init_addrs:
1548

1549 1550 1551
	/*
	 * JITed image shrinks with every pass and the loop iterates
	 * until the image stops shrinking. Very large BPF programs
1552
	 * may converge on the last pass. In such case do one more
1553
	 * pass to emit the final image.
1554
	 */
1555
	for (pass = 0; pass < 20 || image; pass++) {
1556 1557
		proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
		if (proglen <= 0) {
1558
out_image:
1559 1560
			image = NULL;
			if (header)
1561
				bpf_jit_binary_free(header);
1562 1563
			prog = orig_prog;
			goto out_addrs;
1564
		}
1565
		if (image) {
1566
			if (proglen != oldproglen) {
1567 1568
				pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
				       proglen, oldproglen);
1569
				goto out_image;
1570
			}
1571 1572 1573
			break;
		}
		if (proglen == oldproglen) {
1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586
			/*
			 * The number of entries in extable is the number of BPF_LDX
			 * insns that access kernel memory via "pointer to BTF type".
			 * The verifier changed their opcode from LDX|MEM|size
			 * to LDX|PROBE_MEM|size to make JITing easier.
			 */
			u32 align = __alignof__(struct exception_table_entry);
			u32 extable_size = prog->aux->num_exentries *
				sizeof(struct exception_table_entry);

			/* allocate module memory for x86 insns and extable */
			header = bpf_jit_binary_alloc(roundup(proglen, align) + extable_size,
						      &image, align, jit_fill_hole);
1587 1588 1589 1590
			if (!header) {
				prog = orig_prog;
				goto out_addrs;
			}
1591
			prog->aux->extable = (void *) image + roundup(proglen, align);
1592 1593
		}
		oldproglen = proglen;
1594
		cond_resched();
1595
	}
1596

1597
	if (bpf_jit_enable > 1)
1598
		bpf_jit_dump(prog->len, proglen, pass + 1, image);
1599 1600

	if (image) {
1601 1602 1603 1604 1605 1606 1607 1608 1609
		if (!prog->is_func || extra_pass) {
			bpf_jit_binary_lock_ro(header);
		} else {
			jit_data->addrs = addrs;
			jit_data->ctx = ctx;
			jit_data->proglen = proglen;
			jit_data->image = image;
			jit_data->header = header;
		}
1610
		prog->bpf_func = (void *)image;
1611
		prog->jited = 1;
1612
		prog->jited_len = proglen;
1613 1614
	} else {
		prog = orig_prog;
1615
	}
1616

1617
	if (!image || !prog->is_func || extra_pass) {
M
Martin KaFai Lau 已提交
1618
		if (image)
1619
			bpf_prog_fill_jited_linfo(prog, addrs + 1);
1620
out_addrs:
1621 1622 1623 1624
		kfree(addrs);
		kfree(jit_data);
		prog->aux->jit_data = NULL;
	}
1625 1626 1627 1628
out:
	if (tmp_blinded)
		bpf_jit_prog_release_other(prog, prog == orig_prog ?
					   tmp : orig_prog);
1629
	return prog;
1630
}