bpf_jit_comp.c 48.4 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3
/*
 * bpf_jit_comp.c: BPF JIT compiler
4
 *
5
 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
6
 * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
7 8 9
 */
#include <linux/netdevice.h>
#include <linux/filter.h>
10
#include <linux/if_vlan.h>
11
#include <linux/bpf.h>
12
#include <linux/memory.h>
B
Björn Töpel 已提交
13
#include <linux/sort.h>
14
#include <asm/extable.h>
L
Laura Abbott 已提交
15
#include <asm/set_memory.h>
16
#include <asm/nospec-branch.h>
17
#include <asm/text-patching.h>
B
Björn Töpel 已提交
18
#include <asm/asm-prototypes.h>
19

20
static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
21 22 23 24 25 26 27 28 29 30 31 32
{
	if (len == 1)
		*ptr = bytes;
	else if (len == 2)
		*(u16 *)ptr = bytes;
	else {
		*(u32 *)ptr = bytes;
		barrier();
	}
	return ptr + len;
}

33 34
#define EMIT(bytes, len) \
	do { prog = emit_code(prog, bytes, len); cnt += len; } while (0)
35 36 37 38 39

#define EMIT1(b1)		EMIT(b1, 1)
#define EMIT2(b1, b2)		EMIT((b1) + ((b2) << 8), 2)
#define EMIT3(b1, b2, b3)	EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
#define EMIT4(b1, b2, b3, b4)   EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
40

41
#define EMIT1_off32(b1, off) \
42
	do { EMIT1(b1); EMIT(off, 4); } while (0)
43
#define EMIT2_off32(b1, b2, off) \
44
	do { EMIT2(b1, b2); EMIT(off, 4); } while (0)
45
#define EMIT3_off32(b1, b2, b3, off) \
46
	do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
47
#define EMIT4_off32(b1, b2, b3, b4, off) \
48
	do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
49

50
static bool is_imm8(int value)
51 52 53 54
{
	return value <= 127 && value >= -128;
}

55
static bool is_simm32(s64 value)
56
{
57 58 59 60 61 62
	return value == (s64)(s32)value;
}

static bool is_uimm32(u64 value)
{
	return value == (u64)(u32)value;
63 64
}

65
/* mov dst, src */
66 67 68 69
#define EMIT_mov(DST, SRC)								 \
	do {										 \
		if (DST != SRC)								 \
			EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
70 71 72 73 74 75 76 77 78 79 80 81 82 83 84
	} while (0)

static int bpf_size_to_x86_bytes(int bpf_size)
{
	if (bpf_size == BPF_W)
		return 4;
	else if (bpf_size == BPF_H)
		return 2;
	else if (bpf_size == BPF_B)
		return 1;
	else if (bpf_size == BPF_DW)
		return 4; /* imm32 */
	else
		return 0;
}
85

86 87
/*
 * List of x86 cond jumps opcodes (. + s8)
88 89 90 91 92 93 94 95
 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
 */
#define X86_JB  0x72
#define X86_JAE 0x73
#define X86_JE  0x74
#define X86_JNE 0x75
#define X86_JBE 0x76
#define X86_JA  0x77
96
#define X86_JL  0x7C
97
#define X86_JGE 0x7D
98
#define X86_JLE 0x7E
99
#define X86_JG  0x7F
100

101
/* Pick a register outside of BPF range for JIT internal work */
102
#define AUX_REG (MAX_BPF_JIT_REG + 1)
A
Alexei Starovoitov 已提交
103
#define X86_REG_R9 (MAX_BPF_JIT_REG + 2)
104

105 106
/*
 * The following table maps BPF registers to x86-64 registers.
107
 *
108
 * x86-64 register R12 is unused, since if used as base address
109 110 111
 * register in load/store instructions, it always needs an
 * extra byte of encoding and is callee saved.
 *
A
Alexei Starovoitov 已提交
112 113
 * x86-64 register R9 is not used by BPF programs, but can be used by BPF
 * trampoline. x86-64 register R10 is used for blinding (if enabled).
114 115
 */
static const int reg2hex[] = {
116 117 118 119 120 121 122 123 124 125 126 127 128
	[BPF_REG_0] = 0,  /* RAX */
	[BPF_REG_1] = 7,  /* RDI */
	[BPF_REG_2] = 6,  /* RSI */
	[BPF_REG_3] = 2,  /* RDX */
	[BPF_REG_4] = 1,  /* RCX */
	[BPF_REG_5] = 0,  /* R8  */
	[BPF_REG_6] = 3,  /* RBX callee saved */
	[BPF_REG_7] = 5,  /* R13 callee saved */
	[BPF_REG_8] = 6,  /* R14 callee saved */
	[BPF_REG_9] = 7,  /* R15 callee saved */
	[BPF_REG_FP] = 5, /* RBP readonly */
	[BPF_REG_AX] = 2, /* R10 temp register */
	[AUX_REG] = 3,    /* R11 temp register */
A
Alexei Starovoitov 已提交
129
	[X86_REG_R9] = 1, /* R9 register, 6th function argument */
130 131
};

132 133 134 135 136 137 138 139 140 141 142 143 144
static const int reg2pt_regs[] = {
	[BPF_REG_0] = offsetof(struct pt_regs, ax),
	[BPF_REG_1] = offsetof(struct pt_regs, di),
	[BPF_REG_2] = offsetof(struct pt_regs, si),
	[BPF_REG_3] = offsetof(struct pt_regs, dx),
	[BPF_REG_4] = offsetof(struct pt_regs, cx),
	[BPF_REG_5] = offsetof(struct pt_regs, r8),
	[BPF_REG_6] = offsetof(struct pt_regs, bx),
	[BPF_REG_7] = offsetof(struct pt_regs, r13),
	[BPF_REG_8] = offsetof(struct pt_regs, r14),
	[BPF_REG_9] = offsetof(struct pt_regs, r15),
};

145 146
/*
 * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15
147 148 149
 * which need extra byte of encoding.
 * rax,rcx,...,rbp have simpler encoding
 */
150
static bool is_ereg(u32 reg)
151
{
152 153 154 155
	return (1 << reg) & (BIT(BPF_REG_5) |
			     BIT(AUX_REG) |
			     BIT(BPF_REG_7) |
			     BIT(BPF_REG_8) |
156
			     BIT(BPF_REG_9) |
A
Alexei Starovoitov 已提交
157
			     BIT(X86_REG_R9) |
158
			     BIT(BPF_REG_AX));
159 160
}

161 162 163 164 165
static bool is_axreg(u32 reg)
{
	return reg == BPF_REG_0;
}

166
/* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */
167
static u8 add_1mod(u8 byte, u32 reg)
168 169 170 171 172 173
{
	if (is_ereg(reg))
		byte |= 1;
	return byte;
}

174
static u8 add_2mod(u8 byte, u32 r1, u32 r2)
175 176 177 178 179 180 181 182
{
	if (is_ereg(r1))
		byte |= 1;
	if (is_ereg(r2))
		byte |= 4;
	return byte;
}

183
/* Encode 'dst_reg' register into x86-64 opcode 'byte' */
184
static u8 add_1reg(u8 byte, u32 dst_reg)
185
{
186
	return byte + reg2hex[dst_reg];
187 188
}

189
/* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */
190
static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
191
{
192
	return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
193 194
}

195 196
static void jit_fill_hole(void *area, unsigned int size)
{
197
	/* Fill whole space with INT3 instructions */
198 199 200
	memset(area, 0xcc, size);
}

201
struct jit_context {
202
	int cleanup_addr; /* Epilogue code offset */
203 204
};

205
/* Maximum number of bytes emitted while JITing one eBPF insn */
206 207
#define BPF_MAX_INSN_SIZE	128
#define BPF_INSN_SAFETY		64
208 209 210

/* Number of bytes emit_patch() needs to generate instructions */
#define X86_PATCH_SIZE		5
211

212
#define PROLOGUE_SIZE		25
213

214 215
/*
 * Emit x86-64 prologue code for BPF program and check its size.
216 217
 * bpf_tail_call helper will skip it while jumping into another program
 */
218
static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf)
219
{
220
	u8 *prog = *pprog;
221
	int cnt = X86_PATCH_SIZE;
222

223 224 225 226 227
	/* BPF trampoline can be made to work without these nops,
	 * but let's waste 5 bytes for now and optimize later
	 */
	memcpy(prog, ideal_nops[NOP_ATOMIC5], cnt);
	prog += cnt;
228 229 230 231 232 233 234 235
	EMIT1(0x55);             /* push rbp */
	EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
	/* sub rsp, rounded_stack_depth */
	EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8));
	EMIT1(0x53);             /* push rbx */
	EMIT2(0x41, 0x55);       /* push r13 */
	EMIT2(0x41, 0x56);       /* push r14 */
	EMIT2(0x41, 0x57);       /* push r15 */
236
	if (!ebpf_from_cbpf) {
237 238
		/* zero init tail_call_cnt */
		EMIT2(0x6a, 0x00);
239 240
		BUILD_BUG_ON(cnt != PROLOGUE_SIZE);
	}
241 242 243
	*pprog = prog;
}

244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274
static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode)
{
	u8 *prog = *pprog;
	int cnt = 0;
	s64 offset;

	offset = func - (ip + X86_PATCH_SIZE);
	if (!is_simm32(offset)) {
		pr_err("Target call %p is out of range\n", func);
		return -ERANGE;
	}
	EMIT1_off32(opcode, offset);
	*pprog = prog;
	return 0;
}

static int emit_call(u8 **pprog, void *func, void *ip)
{
	return emit_patch(pprog, func, ip, 0xE8);
}

static int emit_jump(u8 **pprog, void *func, void *ip)
{
	return emit_patch(pprog, func, ip, 0xE9);
}

static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
				void *old_addr, void *new_addr,
				const bool text_live)
{
	const u8 *nop_insn = ideal_nops[NOP_ATOMIC5];
275 276
	u8 old_insn[X86_PATCH_SIZE];
	u8 new_insn[X86_PATCH_SIZE];
277 278 279
	u8 *prog;
	int ret;

280 281 282 283 284 285 286 287
	memcpy(old_insn, nop_insn, X86_PATCH_SIZE);
	if (old_addr) {
		prog = old_insn;
		ret = t == BPF_MOD_CALL ?
		      emit_call(&prog, old_addr, ip) :
		      emit_jump(&prog, old_addr, ip);
		if (ret)
			return ret;
288 289
	}

290 291 292 293 294 295 296 297
	memcpy(new_insn, nop_insn, X86_PATCH_SIZE);
	if (new_addr) {
		prog = new_insn;
		ret = t == BPF_MOD_CALL ?
		      emit_call(&prog, new_addr, ip) :
		      emit_jump(&prog, new_addr, ip);
		if (ret)
			return ret;
298 299 300 301 302 303
	}

	ret = -EBUSY;
	mutex_lock(&text_mutex);
	if (memcmp(ip, old_insn, X86_PATCH_SIZE))
		goto out;
304 305 306 307 308 309
	if (memcmp(ip, new_insn, X86_PATCH_SIZE)) {
		if (text_live)
			text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL);
		else
			memcpy(ip, new_insn, X86_PATCH_SIZE);
	}
310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326
	ret = 0;
out:
	mutex_unlock(&text_mutex);
	return ret;
}

int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
		       void *old_addr, void *new_addr)
{
	if (!is_kernel_text((long)ip) &&
	    !is_bpf_text_address((long)ip))
		/* BPF poking in modules is not supported */
		return -EINVAL;

	return __bpf_arch_text_poke(ip, t, old_addr, new_addr, true);
}

327 328 329
/*
 * Generate the following code:
 *
330 331 332 333 334
 * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
 *   if (index >= array->map.max_entries)
 *     goto out;
 *   if (++tail_call_cnt > MAX_TAIL_CALL_CNT)
 *     goto out;
335
 *   prog = array->ptrs[index];
336 337 338 339 340
 *   if (prog == NULL)
 *     goto out;
 *   goto *(prog->bpf_func + prologue_size);
 * out:
 */
341
static void emit_bpf_tail_call_indirect(u8 **pprog)
342 343 344 345 346
{
	u8 *prog = *pprog;
	int label1, label2, label3;
	int cnt = 0;

347 348
	/*
	 * rdi - pointer to ctx
349 350 351 352
	 * rsi - pointer to bpf_array
	 * rdx - index in bpf_array
	 */

353 354 355
	/*
	 * if (index >= array->map.max_entries)
	 *	goto out;
356
	 */
357 358
	EMIT2(0x89, 0xD2);                        /* mov edx, edx */
	EMIT3(0x39, 0x56,                         /* cmp dword ptr [rsi + 16], edx */
359
	      offsetof(struct bpf_array, map.max_entries));
360
#define OFFSET1 (41 + RETPOLINE_RAX_BPF_JIT_SIZE) /* Number of bytes to jump */
361 362 363
	EMIT2(X86_JBE, OFFSET1);                  /* jbe out */
	label1 = cnt;

364 365 366
	/*
	 * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
	 *	goto out;
367
	 */
368
	EMIT2_off32(0x8B, 0x85, -36 - MAX_BPF_STACK); /* mov eax, dword ptr [rbp - 548] */
369
	EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT);     /* cmp eax, MAX_TAIL_CALL_CNT */
370
#define OFFSET2 (30 + RETPOLINE_RAX_BPF_JIT_SIZE)
371 372 373
	EMIT2(X86_JA, OFFSET2);                   /* ja out */
	label2 = cnt;
	EMIT3(0x83, 0xC0, 0x01);                  /* add eax, 1 */
374
	EMIT2_off32(0x89, 0x85, -36 - MAX_BPF_STACK); /* mov dword ptr [rbp -548], eax */
375

376
	/* prog = array->ptrs[index]; */
377
	EMIT4_off32(0x48, 0x8B, 0x84, 0xD6,       /* mov rax, [rsi + rdx * 8 + offsetof(...)] */
378
		    offsetof(struct bpf_array, ptrs));
379

380 381 382
	/*
	 * if (prog == NULL)
	 *	goto out;
383
	 */
384
	EMIT3(0x48, 0x85, 0xC0);		  /* test rax,rax */
385
#define OFFSET3 (8 + RETPOLINE_RAX_BPF_JIT_SIZE)
386 387 388 389 390 391 392 393
	EMIT2(X86_JE, OFFSET3);                   /* je out */
	label3 = cnt;

	/* goto *(prog->bpf_func + prologue_size); */
	EMIT4(0x48, 0x8B, 0x40,                   /* mov rax, qword ptr [rax + 32] */
	      offsetof(struct bpf_prog, bpf_func));
	EMIT4(0x48, 0x83, 0xC0, PROLOGUE_SIZE);   /* add rax, prologue_size */

394 395
	/*
	 * Wow we're ready to jump into next BPF program
396 397 398
	 * rdi == ctx (1st arg)
	 * rax == prog->bpf_func + prologue_size
	 */
399
	RETPOLINE_RAX_BPF_JIT();
400 401 402 403 404 405 406 407

	/* out: */
	BUILD_BUG_ON(cnt - label1 != OFFSET1);
	BUILD_BUG_ON(cnt - label2 != OFFSET2);
	BUILD_BUG_ON(cnt - label3 != OFFSET3);
	*pprog = prog;
}

408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459
static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke,
				      u8 **pprog, int addr, u8 *image)
{
	u8 *prog = *pprog;
	int cnt = 0;

	/*
	 * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
	 *	goto out;
	 */
	EMIT2_off32(0x8B, 0x85, -36 - MAX_BPF_STACK); /* mov eax, dword ptr [rbp - 548] */
	EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT);         /* cmp eax, MAX_TAIL_CALL_CNT */
	EMIT2(X86_JA, 14);                            /* ja out */
	EMIT3(0x83, 0xC0, 0x01);                      /* add eax, 1 */
	EMIT2_off32(0x89, 0x85, -36 - MAX_BPF_STACK); /* mov dword ptr [rbp -548], eax */

	poke->ip = image + (addr - X86_PATCH_SIZE);
	poke->adj_off = PROLOGUE_SIZE;

	memcpy(prog, ideal_nops[NOP_ATOMIC5], X86_PATCH_SIZE);
	prog += X86_PATCH_SIZE;
	/* out: */

	*pprog = prog;
}

static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
{
	struct bpf_jit_poke_descriptor *poke;
	struct bpf_array *array;
	struct bpf_prog *target;
	int i, ret;

	for (i = 0; i < prog->aux->size_poke_tab; i++) {
		poke = &prog->aux->poke_tab[i];
		WARN_ON_ONCE(READ_ONCE(poke->ip_stable));

		if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
			continue;

		array = container_of(poke->tail_call.map, struct bpf_array, map);
		mutex_lock(&array->aux->poke_mutex);
		target = array->ptrs[poke->tail_call.key];
		if (target) {
			/* Plain memcpy is used when image is not live yet
			 * and still not locked as read-only. Once poke
			 * location is active (poke->ip_stable), any parallel
			 * bpf_arch_text_poke() might occur still on the
			 * read-write image until we finally locked it as
			 * read-only. Both modifications on the given image
			 * are under text_mutex to avoid interference.
			 */
460
			ret = __bpf_arch_text_poke(poke->ip, BPF_MOD_JUMP, NULL,
461 462 463 464 465 466 467 468 469
						   (u8 *)target->bpf_func +
						   poke->adj_off, false);
			BUG_ON(ret < 0);
		}
		WRITE_ONCE(poke->ip_stable, true);
		mutex_unlock(&array->aux->poke_mutex);
	}
}

470 471 472 473 474 475 476
static void emit_mov_imm32(u8 **pprog, bool sign_propagate,
			   u32 dst_reg, const u32 imm32)
{
	u8 *prog = *pprog;
	u8 b1, b2, b3;
	int cnt = 0;

477 478
	/*
	 * Optimization: if imm32 is positive, use 'mov %eax, imm32'
479 480 481 482 483 484 485 486 487 488 489
	 * (which zero-extends imm32) to save 2 bytes.
	 */
	if (sign_propagate && (s32)imm32 < 0) {
		/* 'mov %rax, imm32' sign extends imm32 */
		b1 = add_1mod(0x48, dst_reg);
		b2 = 0xC7;
		b3 = 0xC0;
		EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
		goto done;
	}

490 491
	/*
	 * Optimization: if imm32 is zero, use 'xor %eax, %eax'
492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517
	 * to save 3 bytes.
	 */
	if (imm32 == 0) {
		if (is_ereg(dst_reg))
			EMIT1(add_2mod(0x40, dst_reg, dst_reg));
		b2 = 0x31; /* xor */
		b3 = 0xC0;
		EMIT2(b2, add_2reg(b3, dst_reg, dst_reg));
		goto done;
	}

	/* mov %eax, imm32 */
	if (is_ereg(dst_reg))
		EMIT1(add_1mod(0x40, dst_reg));
	EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
done:
	*pprog = prog;
}

static void emit_mov_imm64(u8 **pprog, u32 dst_reg,
			   const u32 imm32_hi, const u32 imm32_lo)
{
	u8 *prog = *pprog;
	int cnt = 0;

	if (is_uimm32(((u64)imm32_hi << 32) | (u32)imm32_lo)) {
518 519
		/*
		 * For emitting plain u32, where sign bit must not be
520 521 522 523 524 525 526 527 528 529 530 531 532 533 534
		 * propagated LLVM tends to load imm64 over mov32
		 * directly, so save couple of bytes by just doing
		 * 'mov %eax, imm32' instead.
		 */
		emit_mov_imm32(&prog, false, dst_reg, imm32_lo);
	} else {
		/* movabsq %rax, imm64 */
		EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
		EMIT(imm32_lo, 4);
		EMIT(imm32_hi, 4);
	}

	*pprog = prog;
}

535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552
static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg)
{
	u8 *prog = *pprog;
	int cnt = 0;

	if (is64) {
		/* mov dst, src */
		EMIT_mov(dst_reg, src_reg);
	} else {
		/* mov32 dst, src */
		if (is_ereg(dst_reg) || is_ereg(src_reg))
			EMIT1(add_2mod(0x40, dst_reg, src_reg));
		EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
	}

	*pprog = prog;
}

553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630
/* LDX: dst_reg = *(u8*)(src_reg + off) */
static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
{
	u8 *prog = *pprog;
	int cnt = 0;

	switch (size) {
	case BPF_B:
		/* Emit 'movzx rax, byte ptr [rax + off]' */
		EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
		break;
	case BPF_H:
		/* Emit 'movzx rax, word ptr [rax + off]' */
		EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
		break;
	case BPF_W:
		/* Emit 'mov eax, dword ptr [rax+0x14]' */
		if (is_ereg(dst_reg) || is_ereg(src_reg))
			EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
		else
			EMIT1(0x8B);
		break;
	case BPF_DW:
		/* Emit 'mov rax, qword ptr [rax+0x14]' */
		EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
		break;
	}
	/*
	 * If insn->off == 0 we can save one extra byte, but
	 * special case of x86 R13 which always needs an offset
	 * is not worth the hassle
	 */
	if (is_imm8(off))
		EMIT2(add_2reg(0x40, src_reg, dst_reg), off);
	else
		EMIT1_off32(add_2reg(0x80, src_reg, dst_reg), off);
	*pprog = prog;
}

/* STX: *(u8*)(dst_reg + off) = src_reg */
static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
{
	u8 *prog = *pprog;
	int cnt = 0;

	switch (size) {
	case BPF_B:
		/* Emit 'mov byte ptr [rax + off], al' */
		if (is_ereg(dst_reg) || is_ereg(src_reg) ||
		    /* We have to add extra byte for x86 SIL, DIL regs */
		    src_reg == BPF_REG_1 || src_reg == BPF_REG_2)
			EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
		else
			EMIT1(0x88);
		break;
	case BPF_H:
		if (is_ereg(dst_reg) || is_ereg(src_reg))
			EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
		else
			EMIT2(0x66, 0x89);
		break;
	case BPF_W:
		if (is_ereg(dst_reg) || is_ereg(src_reg))
			EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
		else
			EMIT1(0x89);
		break;
	case BPF_DW:
		EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
		break;
	}
	if (is_imm8(off))
		EMIT2(add_2reg(0x40, dst_reg, src_reg), off);
	else
		EMIT1_off32(add_2reg(0x80, dst_reg, src_reg), off);
	*pprog = prog;
}

631 632 633 634 635 636 637 638 639 640 641 642
static bool ex_handler_bpf(const struct exception_table_entry *x,
			   struct pt_regs *regs, int trapnr,
			   unsigned long error_code, unsigned long fault_addr)
{
	u32 reg = x->fixup >> 8;

	/* jump over faulting load and clear dest register */
	*(unsigned long *)((void *)regs + reg) = 0;
	regs->ip += x->fixup & 0xff;
	return true;
}

643 644 645 646 647 648 649
static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
		  int oldproglen, struct jit_context *ctx)
{
	struct bpf_insn *insn = bpf_prog->insnsi;
	int insn_cnt = bpf_prog->len;
	bool seen_exit = false;
	u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
650
	int i, cnt = 0, excnt = 0;
651 652 653
	int proglen = 0;
	u8 *prog = temp;

654 655
	emit_prologue(&prog, bpf_prog->aux->stack_depth,
		      bpf_prog_was_classic(bpf_prog));
656
	addrs[0] = prog - temp;
657

658
	for (i = 1; i <= insn_cnt; i++, insn++) {
659 660 661
		const s32 imm32 = insn->imm;
		u32 dst_reg = insn->dst_reg;
		u32 src_reg = insn->src_reg;
662
		u8 b2 = 0, b3 = 0;
663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685
		s64 jmp_offset;
		u8 jmp_cond;
		int ilen;
		u8 *func;

		switch (insn->code) {
			/* ALU */
		case BPF_ALU | BPF_ADD | BPF_X:
		case BPF_ALU | BPF_SUB | BPF_X:
		case BPF_ALU | BPF_AND | BPF_X:
		case BPF_ALU | BPF_OR | BPF_X:
		case BPF_ALU | BPF_XOR | BPF_X:
		case BPF_ALU64 | BPF_ADD | BPF_X:
		case BPF_ALU64 | BPF_SUB | BPF_X:
		case BPF_ALU64 | BPF_AND | BPF_X:
		case BPF_ALU64 | BPF_OR | BPF_X:
		case BPF_ALU64 | BPF_XOR | BPF_X:
			switch (BPF_OP(insn->code)) {
			case BPF_ADD: b2 = 0x01; break;
			case BPF_SUB: b2 = 0x29; break;
			case BPF_AND: b2 = 0x21; break;
			case BPF_OR: b2 = 0x09; break;
			case BPF_XOR: b2 = 0x31; break;
686
			}
687
			if (BPF_CLASS(insn->code) == BPF_ALU64)
688 689 690 691
				EMIT1(add_2mod(0x48, dst_reg, src_reg));
			else if (is_ereg(dst_reg) || is_ereg(src_reg))
				EMIT1(add_2mod(0x40, dst_reg, src_reg));
			EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
692
			break;
693

694 695
		case BPF_ALU64 | BPF_MOV | BPF_X:
		case BPF_ALU | BPF_MOV | BPF_X:
696 697 698
			emit_mov_reg(&prog,
				     BPF_CLASS(insn->code) == BPF_ALU64,
				     dst_reg, src_reg);
699
			break;
700

701
			/* neg dst */
702 703 704
		case BPF_ALU | BPF_NEG:
		case BPF_ALU64 | BPF_NEG:
			if (BPF_CLASS(insn->code) == BPF_ALU64)
705 706 707 708
				EMIT1(add_1mod(0x48, dst_reg));
			else if (is_ereg(dst_reg))
				EMIT1(add_1mod(0x40, dst_reg));
			EMIT2(0xF7, add_1reg(0xD8, dst_reg));
709 710 711 712 713 714 715 716 717 718 719 720 721
			break;

		case BPF_ALU | BPF_ADD | BPF_K:
		case BPF_ALU | BPF_SUB | BPF_K:
		case BPF_ALU | BPF_AND | BPF_K:
		case BPF_ALU | BPF_OR | BPF_K:
		case BPF_ALU | BPF_XOR | BPF_K:
		case BPF_ALU64 | BPF_ADD | BPF_K:
		case BPF_ALU64 | BPF_SUB | BPF_K:
		case BPF_ALU64 | BPF_AND | BPF_K:
		case BPF_ALU64 | BPF_OR | BPF_K:
		case BPF_ALU64 | BPF_XOR | BPF_K:
			if (BPF_CLASS(insn->code) == BPF_ALU64)
722 723 724
				EMIT1(add_1mod(0x48, dst_reg));
			else if (is_ereg(dst_reg))
				EMIT1(add_1mod(0x40, dst_reg));
725

726 727
			/*
			 * b3 holds 'normal' opcode, b2 short form only valid
728 729
			 * in case dst is eax/rax.
			 */
730
			switch (BPF_OP(insn->code)) {
731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750
			case BPF_ADD:
				b3 = 0xC0;
				b2 = 0x05;
				break;
			case BPF_SUB:
				b3 = 0xE8;
				b2 = 0x2D;
				break;
			case BPF_AND:
				b3 = 0xE0;
				b2 = 0x25;
				break;
			case BPF_OR:
				b3 = 0xC8;
				b2 = 0x0D;
				break;
			case BPF_XOR:
				b3 = 0xF0;
				b2 = 0x35;
				break;
751 752
			}

753 754
			if (is_imm8(imm32))
				EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
755 756
			else if (is_axreg(dst_reg))
				EMIT1_off32(b2, imm32);
757
			else
758
				EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
759 760 761 762
			break;

		case BPF_ALU64 | BPF_MOV | BPF_K:
		case BPF_ALU | BPF_MOV | BPF_K:
763 764
			emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64,
				       dst_reg, imm32);
765 766
			break;

767
		case BPF_LD | BPF_IMM | BPF_DW:
768
			emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm);
769 770 771 772
			insn++;
			i++;
			break;

773
			/* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
774 775 776 777 778 779 780 781 782 783 784 785
		case BPF_ALU | BPF_MOD | BPF_X:
		case BPF_ALU | BPF_DIV | BPF_X:
		case BPF_ALU | BPF_MOD | BPF_K:
		case BPF_ALU | BPF_DIV | BPF_K:
		case BPF_ALU64 | BPF_MOD | BPF_X:
		case BPF_ALU64 | BPF_DIV | BPF_X:
		case BPF_ALU64 | BPF_MOD | BPF_K:
		case BPF_ALU64 | BPF_DIV | BPF_K:
			EMIT1(0x50); /* push rax */
			EMIT1(0x52); /* push rdx */

			if (BPF_SRC(insn->code) == BPF_X)
786 787
				/* mov r11, src_reg */
				EMIT_mov(AUX_REG, src_reg);
788
			else
789 790
				/* mov r11, imm32 */
				EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
791

792 793
			/* mov rax, dst_reg */
			EMIT_mov(BPF_REG_0, dst_reg);
794

795 796
			/*
			 * xor edx, edx
797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817
			 * equivalent to 'xor rdx, rdx', but one byte less
			 */
			EMIT2(0x31, 0xd2);

			if (BPF_CLASS(insn->code) == BPF_ALU64)
				/* div r11 */
				EMIT3(0x49, 0xF7, 0xF3);
			else
				/* div r11d */
				EMIT3(0x41, 0xF7, 0xF3);

			if (BPF_OP(insn->code) == BPF_MOD)
				/* mov r11, rdx */
				EMIT3(0x49, 0x89, 0xD3);
			else
				/* mov r11, rax */
				EMIT3(0x49, 0x89, 0xC3);

			EMIT1(0x5A); /* pop rdx */
			EMIT1(0x58); /* pop rax */

818 819
			/* mov dst_reg, r11 */
			EMIT_mov(dst_reg, AUX_REG);
820 821 822 823 824 825
			break;

		case BPF_ALU | BPF_MUL | BPF_K:
		case BPF_ALU | BPF_MUL | BPF_X:
		case BPF_ALU64 | BPF_MUL | BPF_K:
		case BPF_ALU64 | BPF_MUL | BPF_X:
826 827 828
		{
			bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;

829 830 831 832
			if (dst_reg != BPF_REG_0)
				EMIT1(0x50); /* push rax */
			if (dst_reg != BPF_REG_3)
				EMIT1(0x52); /* push rdx */
833

834 835
			/* mov r11, dst_reg */
			EMIT_mov(AUX_REG, dst_reg);
836 837

			if (BPF_SRC(insn->code) == BPF_X)
838
				emit_mov_reg(&prog, is64, BPF_REG_0, src_reg);
839
			else
840
				emit_mov_imm32(&prog, is64, BPF_REG_0, imm32);
841

842
			if (is64)
843 844 845 846 847 848
				EMIT1(add_1mod(0x48, AUX_REG));
			else if (is_ereg(AUX_REG))
				EMIT1(add_1mod(0x40, AUX_REG));
			/* mul(q) r11 */
			EMIT2(0xF7, add_1reg(0xE0, AUX_REG));

849 850 851 852 853 854 855
			if (dst_reg != BPF_REG_3)
				EMIT1(0x5A); /* pop rdx */
			if (dst_reg != BPF_REG_0) {
				/* mov dst_reg, rax */
				EMIT_mov(dst_reg, BPF_REG_0);
				EMIT1(0x58); /* pop rax */
			}
856
			break;
857
		}
858
			/* Shifts */
859 860 861 862 863 864 865
		case BPF_ALU | BPF_LSH | BPF_K:
		case BPF_ALU | BPF_RSH | BPF_K:
		case BPF_ALU | BPF_ARSH | BPF_K:
		case BPF_ALU64 | BPF_LSH | BPF_K:
		case BPF_ALU64 | BPF_RSH | BPF_K:
		case BPF_ALU64 | BPF_ARSH | BPF_K:
			if (BPF_CLASS(insn->code) == BPF_ALU64)
866 867 868
				EMIT1(add_1mod(0x48, dst_reg));
			else if (is_ereg(dst_reg))
				EMIT1(add_1mod(0x40, dst_reg));
869 870 871 872 873 874

			switch (BPF_OP(insn->code)) {
			case BPF_LSH: b3 = 0xE0; break;
			case BPF_RSH: b3 = 0xE8; break;
			case BPF_ARSH: b3 = 0xF8; break;
			}
875 876 877 878 879

			if (imm32 == 1)
				EMIT2(0xD1, add_1reg(b3, dst_reg));
			else
				EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
880 881
			break;

882 883 884 885 886 887 888
		case BPF_ALU | BPF_LSH | BPF_X:
		case BPF_ALU | BPF_RSH | BPF_X:
		case BPF_ALU | BPF_ARSH | BPF_X:
		case BPF_ALU64 | BPF_LSH | BPF_X:
		case BPF_ALU64 | BPF_RSH | BPF_X:
		case BPF_ALU64 | BPF_ARSH | BPF_X:

889
			/* Check for bad case when dst_reg == rcx */
890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923
			if (dst_reg == BPF_REG_4) {
				/* mov r11, dst_reg */
				EMIT_mov(AUX_REG, dst_reg);
				dst_reg = AUX_REG;
			}

			if (src_reg != BPF_REG_4) { /* common case */
				EMIT1(0x51); /* push rcx */

				/* mov rcx, src_reg */
				EMIT_mov(BPF_REG_4, src_reg);
			}

			/* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
			if (BPF_CLASS(insn->code) == BPF_ALU64)
				EMIT1(add_1mod(0x48, dst_reg));
			else if (is_ereg(dst_reg))
				EMIT1(add_1mod(0x40, dst_reg));

			switch (BPF_OP(insn->code)) {
			case BPF_LSH: b3 = 0xE0; break;
			case BPF_RSH: b3 = 0xE8; break;
			case BPF_ARSH: b3 = 0xF8; break;
			}
			EMIT2(0xD3, add_1reg(b3, dst_reg));

			if (src_reg != BPF_REG_4)
				EMIT1(0x59); /* pop rcx */

			if (insn->dst_reg == BPF_REG_4)
				/* mov dst_reg, r11 */
				EMIT_mov(insn->dst_reg, AUX_REG);
			break;

924
		case BPF_ALU | BPF_END | BPF_FROM_BE:
925
			switch (imm32) {
926
			case 16:
927
				/* Emit 'ror %ax, 8' to swap lower 2 bytes */
928
				EMIT1(0x66);
929
				if (is_ereg(dst_reg))
930
					EMIT1(0x41);
931
				EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
932

933
				/* Emit 'movzwl eax, ax' */
934 935 936 937 938
				if (is_ereg(dst_reg))
					EMIT3(0x45, 0x0F, 0xB7);
				else
					EMIT2(0x0F, 0xB7);
				EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
939 940
				break;
			case 32:
941
				/* Emit 'bswap eax' to swap lower 4 bytes */
942
				if (is_ereg(dst_reg))
943
					EMIT2(0x41, 0x0F);
944
				else
945
					EMIT1(0x0F);
946
				EMIT1(add_1reg(0xC8, dst_reg));
947
				break;
948
			case 64:
949
				/* Emit 'bswap rax' to swap 8 bytes */
950 951
				EMIT3(add_1mod(0x48, dst_reg), 0x0F,
				      add_1reg(0xC8, dst_reg));
952 953
				break;
			}
954 955 956
			break;

		case BPF_ALU | BPF_END | BPF_FROM_LE:
957 958
			switch (imm32) {
			case 16:
959 960
				/*
				 * Emit 'movzwl eax, ax' to zero extend 16-bit
961 962 963 964 965 966 967 968 969
				 * into 64 bit
				 */
				if (is_ereg(dst_reg))
					EMIT3(0x45, 0x0F, 0xB7);
				else
					EMIT2(0x0F, 0xB7);
				EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
				break;
			case 32:
970
				/* Emit 'mov eax, eax' to clear upper 32-bits */
971 972 973 974 975 976 977 978
				if (is_ereg(dst_reg))
					EMIT1(0x45);
				EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
				break;
			case 64:
				/* nop */
				break;
			}
979 980
			break;

981
			/* ST: *(u8*)(dst_reg + off) = imm */
982
		case BPF_ST | BPF_MEM | BPF_B:
983
			if (is_ereg(dst_reg))
984 985 986 987 988
				EMIT2(0x41, 0xC6);
			else
				EMIT1(0xC6);
			goto st;
		case BPF_ST | BPF_MEM | BPF_H:
989
			if (is_ereg(dst_reg))
990 991 992 993 994
				EMIT3(0x66, 0x41, 0xC7);
			else
				EMIT2(0x66, 0xC7);
			goto st;
		case BPF_ST | BPF_MEM | BPF_W:
995
			if (is_ereg(dst_reg))
996 997 998 999 1000
				EMIT2(0x41, 0xC7);
			else
				EMIT1(0xC7);
			goto st;
		case BPF_ST | BPF_MEM | BPF_DW:
1001
			EMIT2(add_1mod(0x48, dst_reg), 0xC7);
1002 1003

st:			if (is_imm8(insn->off))
1004
				EMIT2(add_1reg(0x40, dst_reg), insn->off);
1005
			else
1006
				EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
1007

1008
			EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
1009 1010
			break;

1011
			/* STX: *(u8*)(dst_reg + off) = src_reg */
1012 1013 1014 1015
		case BPF_STX | BPF_MEM | BPF_B:
		case BPF_STX | BPF_MEM | BPF_H:
		case BPF_STX | BPF_MEM | BPF_W:
		case BPF_STX | BPF_MEM | BPF_DW:
1016
			emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1017 1018
			break;

1019
			/* LDX: dst_reg = *(u8*)(src_reg + off) */
1020
		case BPF_LDX | BPF_MEM | BPF_B:
1021
		case BPF_LDX | BPF_PROBE_MEM | BPF_B:
1022
		case BPF_LDX | BPF_MEM | BPF_H:
1023
		case BPF_LDX | BPF_PROBE_MEM | BPF_H:
1024
		case BPF_LDX | BPF_MEM | BPF_W:
1025
		case BPF_LDX | BPF_PROBE_MEM | BPF_W:
1026
		case BPF_LDX | BPF_MEM | BPF_DW:
1027
		case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
1028
			emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070
			if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
				struct exception_table_entry *ex;
				u8 *_insn = image + proglen;
				s64 delta;

				if (!bpf_prog->aux->extable)
					break;

				if (excnt >= bpf_prog->aux->num_exentries) {
					pr_err("ex gen bug\n");
					return -EFAULT;
				}
				ex = &bpf_prog->aux->extable[excnt++];

				delta = _insn - (u8 *)&ex->insn;
				if (!is_simm32(delta)) {
					pr_err("extable->insn doesn't fit into 32-bit\n");
					return -EFAULT;
				}
				ex->insn = delta;

				delta = (u8 *)ex_handler_bpf - (u8 *)&ex->handler;
				if (!is_simm32(delta)) {
					pr_err("extable->handler doesn't fit into 32-bit\n");
					return -EFAULT;
				}
				ex->handler = delta;

				if (dst_reg > BPF_REG_9) {
					pr_err("verifier error\n");
					return -EFAULT;
				}
				/*
				 * Compute size of x86 insn and its target dest x86 register.
				 * ex_handler_bpf() will use lower 8 bits to adjust
				 * pt_regs->ip to jump over this x86 instruction
				 * and upper bits to figure out which pt_regs to zero out.
				 * End result: x86 insn "mov rbx, qword ptr [rax+0x14]"
				 * of 4 bytes will be ignored and rbx will be zero inited.
				 */
				ex->fixup = (prog - temp) | (reg2pt_regs[dst_reg] << 8);
			}
1071 1072
			break;

1073
			/* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */
1074
		case BPF_STX | BPF_XADD | BPF_W:
1075
			/* Emit 'lock add dword ptr [rax + off], eax' */
1076 1077
			if (is_ereg(dst_reg) || is_ereg(src_reg))
				EMIT3(0xF0, add_2mod(0x40, dst_reg, src_reg), 0x01);
1078 1079 1080 1081
			else
				EMIT2(0xF0, 0x01);
			goto xadd;
		case BPF_STX | BPF_XADD | BPF_DW:
1082
			EMIT3(0xF0, add_2mod(0x48, dst_reg, src_reg), 0x01);
1083
xadd:			if (is_imm8(insn->off))
1084
				EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
1085
			else
1086
				EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
1087 1088 1089 1090 1091
					    insn->off);
			break;

			/* call */
		case BPF_JMP | BPF_CALL:
1092
			func = (u8 *) __bpf_call_base + imm32;
1093
			if (!imm32 || emit_call(&prog, func, image + addrs[i - 1]))
1094 1095 1096
				return -EINVAL;
			break;

1097
		case BPF_JMP | BPF_TAIL_CALL:
1098 1099 1100 1101 1102
			if (imm32)
				emit_bpf_tail_call_direct(&bpf_prog->aux->poke_tab[imm32 - 1],
							  &prog, addrs[i], image);
			else
				emit_bpf_tail_call_indirect(&prog);
1103 1104
			break;

1105 1106 1107 1108
			/* cond jump */
		case BPF_JMP | BPF_JEQ | BPF_X:
		case BPF_JMP | BPF_JNE | BPF_X:
		case BPF_JMP | BPF_JGT | BPF_X:
1109
		case BPF_JMP | BPF_JLT | BPF_X:
1110
		case BPF_JMP | BPF_JGE | BPF_X:
1111
		case BPF_JMP | BPF_JLE | BPF_X:
1112
		case BPF_JMP | BPF_JSGT | BPF_X:
1113
		case BPF_JMP | BPF_JSLT | BPF_X:
1114
		case BPF_JMP | BPF_JSGE | BPF_X:
1115
		case BPF_JMP | BPF_JSLE | BPF_X:
1116 1117 1118 1119 1120 1121 1122 1123 1124 1125
		case BPF_JMP32 | BPF_JEQ | BPF_X:
		case BPF_JMP32 | BPF_JNE | BPF_X:
		case BPF_JMP32 | BPF_JGT | BPF_X:
		case BPF_JMP32 | BPF_JLT | BPF_X:
		case BPF_JMP32 | BPF_JGE | BPF_X:
		case BPF_JMP32 | BPF_JLE | BPF_X:
		case BPF_JMP32 | BPF_JSGT | BPF_X:
		case BPF_JMP32 | BPF_JSLT | BPF_X:
		case BPF_JMP32 | BPF_JSGE | BPF_X:
		case BPF_JMP32 | BPF_JSLE | BPF_X:
1126
			/* cmp dst_reg, src_reg */
1127 1128 1129 1130 1131
			if (BPF_CLASS(insn->code) == BPF_JMP)
				EMIT1(add_2mod(0x48, dst_reg, src_reg));
			else if (is_ereg(dst_reg) || is_ereg(src_reg))
				EMIT1(add_2mod(0x40, dst_reg, src_reg));
			EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg));
1132 1133 1134
			goto emit_cond_jmp;

		case BPF_JMP | BPF_JSET | BPF_X:
1135
		case BPF_JMP32 | BPF_JSET | BPF_X:
1136
			/* test dst_reg, src_reg */
1137 1138 1139 1140 1141
			if (BPF_CLASS(insn->code) == BPF_JMP)
				EMIT1(add_2mod(0x48, dst_reg, src_reg));
			else if (is_ereg(dst_reg) || is_ereg(src_reg))
				EMIT1(add_2mod(0x40, dst_reg, src_reg));
			EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg));
1142 1143 1144
			goto emit_cond_jmp;

		case BPF_JMP | BPF_JSET | BPF_K:
1145
		case BPF_JMP32 | BPF_JSET | BPF_K:
1146
			/* test dst_reg, imm32 */
1147 1148 1149 1150
			if (BPF_CLASS(insn->code) == BPF_JMP)
				EMIT1(add_1mod(0x48, dst_reg));
			else if (is_ereg(dst_reg))
				EMIT1(add_1mod(0x40, dst_reg));
1151
			EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
1152 1153 1154 1155 1156
			goto emit_cond_jmp;

		case BPF_JMP | BPF_JEQ | BPF_K:
		case BPF_JMP | BPF_JNE | BPF_K:
		case BPF_JMP | BPF_JGT | BPF_K:
1157
		case BPF_JMP | BPF_JLT | BPF_K:
1158
		case BPF_JMP | BPF_JGE | BPF_K:
1159
		case BPF_JMP | BPF_JLE | BPF_K:
1160
		case BPF_JMP | BPF_JSGT | BPF_K:
1161
		case BPF_JMP | BPF_JSLT | BPF_K:
1162
		case BPF_JMP | BPF_JSGE | BPF_K:
1163
		case BPF_JMP | BPF_JSLE | BPF_K:
1164 1165 1166 1167 1168 1169 1170 1171 1172 1173
		case BPF_JMP32 | BPF_JEQ | BPF_K:
		case BPF_JMP32 | BPF_JNE | BPF_K:
		case BPF_JMP32 | BPF_JGT | BPF_K:
		case BPF_JMP32 | BPF_JLT | BPF_K:
		case BPF_JMP32 | BPF_JGE | BPF_K:
		case BPF_JMP32 | BPF_JLE | BPF_K:
		case BPF_JMP32 | BPF_JSGT | BPF_K:
		case BPF_JMP32 | BPF_JSLT | BPF_K:
		case BPF_JMP32 | BPF_JSGE | BPF_K:
		case BPF_JMP32 | BPF_JSLE | BPF_K:
1174 1175 1176 1177 1178 1179 1180 1181 1182 1183
			/* test dst_reg, dst_reg to save one extra byte */
			if (imm32 == 0) {
				if (BPF_CLASS(insn->code) == BPF_JMP)
					EMIT1(add_2mod(0x48, dst_reg, dst_reg));
				else if (is_ereg(dst_reg))
					EMIT1(add_2mod(0x40, dst_reg, dst_reg));
				EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg));
				goto emit_cond_jmp;
			}

1184
			/* cmp dst_reg, imm8/32 */
1185 1186 1187 1188
			if (BPF_CLASS(insn->code) == BPF_JMP)
				EMIT1(add_1mod(0x48, dst_reg));
			else if (is_ereg(dst_reg))
				EMIT1(add_1mod(0x40, dst_reg));
1189

1190 1191
			if (is_imm8(imm32))
				EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
1192
			else
1193
				EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
1194

1195
emit_cond_jmp:		/* Convert BPF opcode to x86 */
1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207
			switch (BPF_OP(insn->code)) {
			case BPF_JEQ:
				jmp_cond = X86_JE;
				break;
			case BPF_JSET:
			case BPF_JNE:
				jmp_cond = X86_JNE;
				break;
			case BPF_JGT:
				/* GT is unsigned '>', JA in x86 */
				jmp_cond = X86_JA;
				break;
1208 1209 1210 1211
			case BPF_JLT:
				/* LT is unsigned '<', JB in x86 */
				jmp_cond = X86_JB;
				break;
1212 1213 1214 1215
			case BPF_JGE:
				/* GE is unsigned '>=', JAE in x86 */
				jmp_cond = X86_JAE;
				break;
1216 1217 1218 1219
			case BPF_JLE:
				/* LE is unsigned '<=', JBE in x86 */
				jmp_cond = X86_JBE;
				break;
1220
			case BPF_JSGT:
1221
				/* Signed '>', GT in x86 */
1222 1223
				jmp_cond = X86_JG;
				break;
1224
			case BPF_JSLT:
1225
				/* Signed '<', LT in x86 */
1226 1227
				jmp_cond = X86_JL;
				break;
1228
			case BPF_JSGE:
1229
				/* Signed '>=', GE in x86 */
1230 1231
				jmp_cond = X86_JGE;
				break;
1232
			case BPF_JSLE:
1233
				/* Signed '<=', LE in x86 */
1234 1235
				jmp_cond = X86_JLE;
				break;
1236
			default: /* to silence GCC warning */
1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249
				return -EFAULT;
			}
			jmp_offset = addrs[i + insn->off] - addrs[i];
			if (is_imm8(jmp_offset)) {
				EMIT2(jmp_cond, jmp_offset);
			} else if (is_simm32(jmp_offset)) {
				EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
			} else {
				pr_err("cond_jmp gen bug %llx\n", jmp_offset);
				return -EFAULT;
			}

			break;
1250

1251
		case BPF_JMP | BPF_JA:
1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262
			if (insn->off == -1)
				/* -1 jmp instructions will always jump
				 * backwards two bytes. Explicitly handling
				 * this case avoids wasting too many passes
				 * when there are long sequences of replaced
				 * dead code.
				 */
				jmp_offset = -2;
			else
				jmp_offset = addrs[i + insn->off] - addrs[i];

1263
			if (!jmp_offset)
1264
				/* Optimize out nop jumps */
1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277
				break;
emit_jmp:
			if (is_imm8(jmp_offset)) {
				EMIT2(0xEB, jmp_offset);
			} else if (is_simm32(jmp_offset)) {
				EMIT1_off32(0xE9, jmp_offset);
			} else {
				pr_err("jmp gen bug %llx\n", jmp_offset);
				return -EFAULT;
			}
			break;

		case BPF_JMP | BPF_EXIT:
1278
			if (seen_exit) {
1279 1280 1281
				jmp_offset = ctx->cleanup_addr - addrs[i];
				goto emit_jmp;
			}
1282
			seen_exit = true;
1283
			/* Update cleanup_addr */
1284
			ctx->cleanup_addr = proglen;
1285 1286 1287 1288 1289 1290 1291 1292
			if (!bpf_prog_was_classic(bpf_prog))
				EMIT1(0x5B); /* get rid of tail_call_cnt */
			EMIT2(0x41, 0x5F);   /* pop r15 */
			EMIT2(0x41, 0x5E);   /* pop r14 */
			EMIT2(0x41, 0x5D);   /* pop r13 */
			EMIT1(0x5B);         /* pop rbx */
			EMIT1(0xC9);         /* leave */
			EMIT1(0xC3);         /* ret */
1293 1294
			break;

1295
		default:
1296 1297
			/*
			 * By design x86-64 JIT should support all BPF instructions.
1298
			 * This error will be seen if new instruction was added
1299 1300
			 * to the interpreter, but not to the JIT, or if there is
			 * junk in bpf_prog.
1301 1302
			 */
			pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
1303 1304
			return -EINVAL;
		}
1305

1306
		ilen = prog - temp;
1307
		if (ilen > BPF_MAX_INSN_SIZE) {
1308
			pr_err("bpf_jit: fatal insn size error\n");
1309 1310 1311
			return -EFAULT;
		}

1312 1313
		if (image) {
			if (unlikely(proglen + ilen > oldproglen)) {
1314
				pr_err("bpf_jit: fatal error\n");
1315
				return -EFAULT;
1316
			}
1317
			memcpy(image + proglen, temp, ilen);
1318
		}
1319 1320 1321 1322
		proglen += ilen;
		addrs[i] = proglen;
		prog = temp;
	}
1323 1324 1325 1326 1327

	if (image && excnt != bpf_prog->aux->num_exentries) {
		pr_err("extable is not populated\n");
		return -EFAULT;
	}
1328 1329 1330
	return proglen;
}

1331
static void save_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
A
Alexei Starovoitov 已提交
1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346
		      int stack_size)
{
	int i;
	/* Store function arguments to stack.
	 * For a function that accepts two pointers the sequence will be:
	 * mov QWORD PTR [rbp-0x10],rdi
	 * mov QWORD PTR [rbp-0x8],rsi
	 */
	for (i = 0; i < min(nr_args, 6); i++)
		emit_stx(prog, bytes_to_bpf_size(m->arg_size[i]),
			 BPF_REG_FP,
			 i == 5 ? X86_REG_R9 : BPF_REG_1 + i,
			 -(stack_size - i * 8));
}

1347
static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
A
Alexei Starovoitov 已提交
1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363
			 int stack_size)
{
	int i;

	/* Restore function arguments from stack.
	 * For a function that accepts two pointers the sequence will be:
	 * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10]
	 * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8]
	 */
	for (i = 0; i < min(nr_args, 6); i++)
		emit_ldx(prog, bytes_to_bpf_size(m->arg_size[i]),
			 i == 5 ? X86_REG_R9 : BPF_REG_1 + i,
			 BPF_REG_FP,
			 -(stack_size - i * 8));
}

1364
static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
K
KP Singh 已提交
1365
		      struct bpf_tramp_progs *tp, int stack_size)
A
Alexei Starovoitov 已提交
1366 1367 1368 1369
{
	u8 *prog = *pprog;
	int cnt = 0, i;

K
KP Singh 已提交
1370
	for (i = 0; i < tp->nr_progs; i++) {
A
Alexei Starovoitov 已提交
1371 1372 1373 1374 1375 1376 1377 1378
		if (emit_call(&prog, __bpf_prog_enter, prog))
			return -EINVAL;
		/* remember prog start time returned by __bpf_prog_enter */
		emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);

		/* arg1: lea rdi, [rbp - stack_size] */
		EMIT4(0x48, 0x8D, 0x7D, -stack_size);
		/* arg2: progs[i]->insnsi for interpreter */
K
KP Singh 已提交
1379
		if (!tp->progs[i]->jited)
A
Alexei Starovoitov 已提交
1380
			emit_mov_imm64(&prog, BPF_REG_2,
K
KP Singh 已提交
1381 1382
				       (long) tp->progs[i]->insnsi >> 32,
				       (u32) (long) tp->progs[i]->insnsi);
A
Alexei Starovoitov 已提交
1383
		/* call JITed bpf program or interpreter */
K
KP Singh 已提交
1384
		if (emit_call(&prog, tp->progs[i]->bpf_func, prog))
A
Alexei Starovoitov 已提交
1385 1386 1387
			return -EINVAL;

		/* arg1: mov rdi, progs[i] */
K
KP Singh 已提交
1388 1389
		emit_mov_imm64(&prog, BPF_REG_1, (long) tp->progs[i] >> 32,
			       (u32) (long) tp->progs[i]);
A
Alexei Starovoitov 已提交
1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458
		/* arg2: mov rsi, rbx <- start time in nsec */
		emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
		if (emit_call(&prog, __bpf_prog_exit, prog))
			return -EINVAL;
	}
	*pprog = prog;
	return 0;
}

/* Example:
 * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
 * its 'struct btf_func_model' will be nr_args=2
 * The assembly code when eth_type_trans is executing after trampoline:
 *
 * push rbp
 * mov rbp, rsp
 * sub rsp, 16                     // space for skb and dev
 * push rbx                        // temp regs to pass start time
 * mov qword ptr [rbp - 16], rdi   // save skb pointer to stack
 * mov qword ptr [rbp - 8], rsi    // save dev pointer to stack
 * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
 * mov rbx, rax                    // remember start time in bpf stats are enabled
 * lea rdi, [rbp - 16]             // R1==ctx of bpf prog
 * call addr_of_jited_FENTRY_prog
 * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
 * mov rsi, rbx                    // prog start time
 * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
 * mov rdi, qword ptr [rbp - 16]   // restore skb pointer from stack
 * mov rsi, qword ptr [rbp - 8]    // restore dev pointer from stack
 * pop rbx
 * leave
 * ret
 *
 * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be
 * replaced with 'call generated_bpf_trampoline'. When it returns
 * eth_type_trans will continue executing with original skb and dev pointers.
 *
 * The assembly code when eth_type_trans is called from trampoline:
 *
 * push rbp
 * mov rbp, rsp
 * sub rsp, 24                     // space for skb, dev, return value
 * push rbx                        // temp regs to pass start time
 * mov qword ptr [rbp - 24], rdi   // save skb pointer to stack
 * mov qword ptr [rbp - 16], rsi   // save dev pointer to stack
 * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
 * mov rbx, rax                    // remember start time if bpf stats are enabled
 * lea rdi, [rbp - 24]             // R1==ctx of bpf prog
 * call addr_of_jited_FENTRY_prog  // bpf prog can access skb and dev
 * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
 * mov rsi, rbx                    // prog start time
 * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
 * mov rdi, qword ptr [rbp - 24]   // restore skb pointer from stack
 * mov rsi, qword ptr [rbp - 16]   // restore dev pointer from stack
 * call eth_type_trans+5           // execute body of eth_type_trans
 * mov qword ptr [rbp - 8], rax    // save return value
 * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
 * mov rbx, rax                    // remember start time in bpf stats are enabled
 * lea rdi, [rbp - 24]             // R1==ctx of bpf prog
 * call addr_of_jited_FEXIT_prog   // bpf prog can access skb, dev, return value
 * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
 * mov rsi, rbx                    // prog start time
 * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
 * mov rax, qword ptr [rbp - 8]    // restore eth_type_trans's return value
 * pop rbx
 * leave
 * add rsp, 8                      // skip eth_type_trans's frame
 * ret                             // return to its caller
 */
1459 1460
int arch_prepare_bpf_trampoline(void *image, void *image_end,
				const struct btf_func_model *m, u32 flags,
K
KP Singh 已提交
1461
				struct bpf_tramp_progs *tprogs,
A
Alexei Starovoitov 已提交
1462 1463 1464 1465
				void *orig_call)
{
	int cnt = 0, nr_args = m->nr_args;
	int stack_size = nr_args * 8;
K
KP Singh 已提交
1466 1467
	struct bpf_tramp_progs *fentry = &tprogs[BPF_TRAMP_FENTRY];
	struct bpf_tramp_progs *fexit = &tprogs[BPF_TRAMP_FEXIT];
A
Alexei Starovoitov 已提交
1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484
	u8 *prog;

	/* x86-64 supports up to 6 arguments. 7+ can be added in the future */
	if (nr_args > 6)
		return -ENOTSUPP;

	if ((flags & BPF_TRAMP_F_RESTORE_REGS) &&
	    (flags & BPF_TRAMP_F_SKIP_FRAME))
		return -EINVAL;

	if (flags & BPF_TRAMP_F_CALL_ORIG)
		stack_size += 8; /* room for return value of orig_call */

	if (flags & BPF_TRAMP_F_SKIP_FRAME)
		/* skip patched call instruction and point orig_call to actual
		 * body of the kernel function.
		 */
1485
		orig_call += X86_PATCH_SIZE;
A
Alexei Starovoitov 已提交
1486 1487 1488 1489 1490 1491 1492 1493 1494 1495

	prog = image;

	EMIT1(0x55);		 /* push rbp */
	EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
	EMIT4(0x48, 0x83, 0xEC, stack_size); /* sub rsp, stack_size */
	EMIT1(0x53);		 /* push rbx */

	save_regs(m, &prog, nr_args, stack_size);

K
KP Singh 已提交
1496 1497
	if (fentry->nr_progs)
		if (invoke_bpf(m, &prog, fentry, stack_size))
A
Alexei Starovoitov 已提交
1498 1499 1500
			return -EINVAL;

	if (flags & BPF_TRAMP_F_CALL_ORIG) {
K
KP Singh 已提交
1501
		if (fentry->nr_progs)
A
Alexei Starovoitov 已提交
1502 1503 1504 1505 1506 1507 1508 1509 1510
			restore_regs(m, &prog, nr_args, stack_size);

		/* call original function */
		if (emit_call(&prog, orig_call, prog))
			return -EINVAL;
		/* remember return value in a stack for bpf prog to access */
		emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
	}

K
KP Singh 已提交
1511 1512
	if (fexit->nr_progs)
		if (invoke_bpf(m, &prog, fexit, stack_size))
A
Alexei Starovoitov 已提交
1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527
			return -EINVAL;

	if (flags & BPF_TRAMP_F_RESTORE_REGS)
		restore_regs(m, &prog, nr_args, stack_size);

	if (flags & BPF_TRAMP_F_CALL_ORIG)
		/* restore original return value back into RAX */
		emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);

	EMIT1(0x5B); /* pop rbx */
	EMIT1(0xC9); /* leave */
	if (flags & BPF_TRAMP_F_SKIP_FRAME)
		/* skip our return address and return to parent */
		EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */
	EMIT1(0xC3); /* ret */
1528 1529
	/* Make sure the trampoline generation logic doesn't overflow */
	if (WARN_ON_ONCE(prog > (u8 *)image_end - BPF_INSN_SAFETY))
A
Alexei Starovoitov 已提交
1530
		return -EFAULT;
1531
	return prog - (u8 *)image;
A
Alexei Starovoitov 已提交
1532 1533
}

B
Björn Töpel 已提交
1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549
static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
{
	u8 *prog = *pprog;
	int cnt = 0;
	s64 offset;

	offset = func - (ip + 2 + 4);
	if (!is_simm32(offset)) {
		pr_err("Target %p is out of range\n", func);
		return -EINVAL;
	}
	EMIT2_off32(0x0F, jmp_cond + 0x10, offset);
	*pprog = prog;
	return 0;
}

1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569
static void emit_nops(u8 **pprog, unsigned int len)
{
	unsigned int i, noplen;
	u8 *prog = *pprog;
	int cnt = 0;

	while (len > 0) {
		noplen = len;

		if (noplen > ASM_NOP_MAX)
			noplen = ASM_NOP_MAX;

		for (i = 0; i < noplen; i++)
			EMIT1(ideal_nops[noplen][i]);
		len -= noplen;
	}

	*pprog = prog;
}

B
Björn Töpel 已提交
1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591
static int emit_fallback_jump(u8 **pprog)
{
	u8 *prog = *pprog;
	int err = 0;

#ifdef CONFIG_RETPOLINE
	/* Note that this assumes the the compiler uses external
	 * thunks for indirect calls. Both clang and GCC use the same
	 * naming convention for external thunks.
	 */
	err = emit_jump(&prog, __x86_indirect_thunk_rdx, prog);
#else
	int cnt = 0;

	EMIT2(0xFF, 0xE2);	/* jmp rdx */
#endif
	*pprog = prog;
	return err;
}

static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs)
{
1592
	u8 *jg_reloc, *jg_target, *prog = *pprog;
B
Björn Töpel 已提交
1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641
	int pivot, err, jg_bytes = 1, cnt = 0;
	s64 jg_offset;

	if (a == b) {
		/* Leaf node of recursion, i.e. not a range of indices
		 * anymore.
		 */
		EMIT1(add_1mod(0x48, BPF_REG_3));	/* cmp rdx,func */
		if (!is_simm32(progs[a]))
			return -1;
		EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3),
			    progs[a]);
		err = emit_cond_near_jump(&prog,	/* je func */
					  (void *)progs[a], prog,
					  X86_JE);
		if (err)
			return err;

		err = emit_fallback_jump(&prog);	/* jmp thunk/indirect */
		if (err)
			return err;

		*pprog = prog;
		return 0;
	}

	/* Not a leaf node, so we pivot, and recursively descend into
	 * the lower and upper ranges.
	 */
	pivot = (b - a) / 2;
	EMIT1(add_1mod(0x48, BPF_REG_3));		/* cmp rdx,func */
	if (!is_simm32(progs[a + pivot]))
		return -1;
	EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), progs[a + pivot]);

	if (pivot > 2) {				/* jg upper_part */
		/* Require near jump. */
		jg_bytes = 4;
		EMIT2_off32(0x0F, X86_JG + 0x10, 0);
	} else {
		EMIT2(X86_JG, 0);
	}
	jg_reloc = prog;

	err = emit_bpf_dispatcher(&prog, a, a + pivot,	/* emit lower_part */
				  progs);
	if (err)
		return err;

1642 1643 1644 1645 1646 1647 1648 1649
	/* From Intel 64 and IA-32 Architectures Optimization
	 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
	 * Coding Rule 11: All branch targets should be 16-byte
	 * aligned.
	 */
	jg_target = PTR_ALIGN(prog, 16);
	if (jg_target != prog)
		emit_nops(&prog, jg_target - prog);
B
Björn Töpel 已提交
1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681
	jg_offset = prog - jg_reloc;
	emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes);

	err = emit_bpf_dispatcher(&prog, a + pivot + 1,	/* emit upper_part */
				  b, progs);
	if (err)
		return err;

	*pprog = prog;
	return 0;
}

static int cmp_ips(const void *a, const void *b)
{
	const s64 *ipa = a;
	const s64 *ipb = b;

	if (*ipa > *ipb)
		return 1;
	if (*ipa < *ipb)
		return -1;
	return 0;
}

int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs)
{
	u8 *prog = image;

	sort(funcs, num_funcs, sizeof(funcs[0]), cmp_ips, NULL);
	return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs);
}

1682 1683 1684 1685 1686 1687 1688 1689
struct x64_jit_data {
	struct bpf_binary_header *header;
	int *addrs;
	u8 *image;
	int proglen;
	struct jit_context ctx;
};

1690
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1691 1692
{
	struct bpf_binary_header *header = NULL;
1693
	struct bpf_prog *tmp, *orig_prog = prog;
1694
	struct x64_jit_data *jit_data;
1695 1696
	int proglen, oldproglen = 0;
	struct jit_context ctx = {};
1697
	bool tmp_blinded = false;
1698
	bool extra_pass = false;
1699 1700 1701 1702 1703
	u8 *image = NULL;
	int *addrs;
	int pass;
	int i;

1704
	if (!prog->jit_requested)
1705 1706 1707
		return orig_prog;

	tmp = bpf_jit_blind_constants(prog);
1708 1709
	/*
	 * If blinding was requested and we failed during blinding,
1710 1711 1712 1713 1714 1715 1716 1717
	 * we must fall back to the interpreter.
	 */
	if (IS_ERR(tmp))
		return orig_prog;
	if (tmp != prog) {
		tmp_blinded = true;
		prog = tmp;
	}
1718

1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736
	jit_data = prog->aux->jit_data;
	if (!jit_data) {
		jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
		if (!jit_data) {
			prog = orig_prog;
			goto out;
		}
		prog->aux->jit_data = jit_data;
	}
	addrs = jit_data->addrs;
	if (addrs) {
		ctx = jit_data->ctx;
		oldproglen = jit_data->proglen;
		image = jit_data->image;
		header = jit_data->header;
		extra_pass = true;
		goto skip_init_addrs;
	}
1737
	addrs = kmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
1738 1739
	if (!addrs) {
		prog = orig_prog;
1740
		goto out_addrs;
1741
	}
1742

1743 1744 1745
	/*
	 * Before first pass, make a rough estimation of addrs[]
	 * each BPF instruction is translated to less than 64 bytes
1746
	 */
1747
	for (proglen = 0, i = 0; i <= prog->len; i++) {
1748 1749 1750 1751
		proglen += 64;
		addrs[i] = proglen;
	}
	ctx.cleanup_addr = proglen;
1752
skip_init_addrs:
1753

1754 1755 1756
	/*
	 * JITed image shrinks with every pass and the loop iterates
	 * until the image stops shrinking. Very large BPF programs
1757
	 * may converge on the last pass. In such case do one more
1758
	 * pass to emit the final image.
1759
	 */
1760
	for (pass = 0; pass < 20 || image; pass++) {
1761 1762
		proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
		if (proglen <= 0) {
1763
out_image:
1764 1765
			image = NULL;
			if (header)
1766
				bpf_jit_binary_free(header);
1767 1768
			prog = orig_prog;
			goto out_addrs;
1769
		}
1770
		if (image) {
1771
			if (proglen != oldproglen) {
1772 1773
				pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
				       proglen, oldproglen);
1774
				goto out_image;
1775
			}
1776 1777 1778
			break;
		}
		if (proglen == oldproglen) {
1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791
			/*
			 * The number of entries in extable is the number of BPF_LDX
			 * insns that access kernel memory via "pointer to BTF type".
			 * The verifier changed their opcode from LDX|MEM|size
			 * to LDX|PROBE_MEM|size to make JITing easier.
			 */
			u32 align = __alignof__(struct exception_table_entry);
			u32 extable_size = prog->aux->num_exentries *
				sizeof(struct exception_table_entry);

			/* allocate module memory for x86 insns and extable */
			header = bpf_jit_binary_alloc(roundup(proglen, align) + extable_size,
						      &image, align, jit_fill_hole);
1792 1793 1794 1795
			if (!header) {
				prog = orig_prog;
				goto out_addrs;
			}
1796
			prog->aux->extable = (void *) image + roundup(proglen, align);
1797 1798
		}
		oldproglen = proglen;
1799
		cond_resched();
1800
	}
1801

1802
	if (bpf_jit_enable > 1)
1803
		bpf_jit_dump(prog->len, proglen, pass + 1, image);
1804 1805

	if (image) {
1806
		if (!prog->is_func || extra_pass) {
1807
			bpf_tail_call_direct_fixup(prog);
1808 1809 1810 1811 1812 1813 1814 1815
			bpf_jit_binary_lock_ro(header);
		} else {
			jit_data->addrs = addrs;
			jit_data->ctx = ctx;
			jit_data->proglen = proglen;
			jit_data->image = image;
			jit_data->header = header;
		}
1816
		prog->bpf_func = (void *)image;
1817
		prog->jited = 1;
1818
		prog->jited_len = proglen;
1819 1820
	} else {
		prog = orig_prog;
1821
	}
1822

1823
	if (!image || !prog->is_func || extra_pass) {
M
Martin KaFai Lau 已提交
1824
		if (image)
1825
			bpf_prog_fill_jited_linfo(prog, addrs + 1);
1826
out_addrs:
1827 1828 1829 1830
		kfree(addrs);
		kfree(jit_data);
		prog->aux->jit_data = NULL;
	}
1831 1832 1833 1834
out:
	if (tmp_blinded)
		bpf_jit_prog_release_other(prog, prog == orig_prog ?
					   tmp : orig_prog);
1835
	return prog;
1836
}