bpf_jit_comp.c 45.2 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3
/*
 * bpf_jit_comp.c: BPF JIT compiler
4
 *
5
 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
6
 * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
7 8 9
 */
#include <linux/netdevice.h>
#include <linux/filter.h>
10
#include <linux/if_vlan.h>
11
#include <linux/bpf.h>
12
#include <linux/memory.h>
13
#include <asm/extable.h>
L
Laura Abbott 已提交
14
#include <asm/set_memory.h>
15
#include <asm/nospec-branch.h>
16
#include <asm/text-patching.h>
17

18
static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
19 20 21 22 23 24 25 26 27 28 29 30
{
	if (len == 1)
		*ptr = bytes;
	else if (len == 2)
		*(u16 *)ptr = bytes;
	else {
		*(u32 *)ptr = bytes;
		barrier();
	}
	return ptr + len;
}

31 32
#define EMIT(bytes, len) \
	do { prog = emit_code(prog, bytes, len); cnt += len; } while (0)
33 34 35 36 37

#define EMIT1(b1)		EMIT(b1, 1)
#define EMIT2(b1, b2)		EMIT((b1) + ((b2) << 8), 2)
#define EMIT3(b1, b2, b3)	EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
#define EMIT4(b1, b2, b3, b4)   EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
38

39
#define EMIT1_off32(b1, off) \
40
	do { EMIT1(b1); EMIT(off, 4); } while (0)
41
#define EMIT2_off32(b1, b2, off) \
42
	do { EMIT2(b1, b2); EMIT(off, 4); } while (0)
43
#define EMIT3_off32(b1, b2, b3, off) \
44
	do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
45
#define EMIT4_off32(b1, b2, b3, b4, off) \
46
	do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
47

48
static bool is_imm8(int value)
49 50 51 52
{
	return value <= 127 && value >= -128;
}

53
static bool is_simm32(s64 value)
54
{
55 56 57 58 59 60
	return value == (s64)(s32)value;
}

static bool is_uimm32(u64 value)
{
	return value == (u64)(u32)value;
61 62
}

63
/* mov dst, src */
64 65 66 67
#define EMIT_mov(DST, SRC)								 \
	do {										 \
		if (DST != SRC)								 \
			EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
	} while (0)

static int bpf_size_to_x86_bytes(int bpf_size)
{
	if (bpf_size == BPF_W)
		return 4;
	else if (bpf_size == BPF_H)
		return 2;
	else if (bpf_size == BPF_B)
		return 1;
	else if (bpf_size == BPF_DW)
		return 4; /* imm32 */
	else
		return 0;
}
83

84 85
/*
 * List of x86 cond jumps opcodes (. + s8)
86 87 88 89 90 91 92 93
 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
 */
#define X86_JB  0x72
#define X86_JAE 0x73
#define X86_JE  0x74
#define X86_JNE 0x75
#define X86_JBE 0x76
#define X86_JA  0x77
94
#define X86_JL  0x7C
95
#define X86_JGE 0x7D
96
#define X86_JLE 0x7E
97
#define X86_JG  0x7F
98

99
/* Pick a register outside of BPF range for JIT internal work */
100
#define AUX_REG (MAX_BPF_JIT_REG + 1)
A
Alexei Starovoitov 已提交
101
#define X86_REG_R9 (MAX_BPF_JIT_REG + 2)
102

103 104
/*
 * The following table maps BPF registers to x86-64 registers.
105
 *
106
 * x86-64 register R12 is unused, since if used as base address
107 108 109
 * register in load/store instructions, it always needs an
 * extra byte of encoding and is callee saved.
 *
A
Alexei Starovoitov 已提交
110 111
 * x86-64 register R9 is not used by BPF programs, but can be used by BPF
 * trampoline. x86-64 register R10 is used for blinding (if enabled).
112 113
 */
static const int reg2hex[] = {
114 115 116 117 118 119 120 121 122 123 124 125 126
	[BPF_REG_0] = 0,  /* RAX */
	[BPF_REG_1] = 7,  /* RDI */
	[BPF_REG_2] = 6,  /* RSI */
	[BPF_REG_3] = 2,  /* RDX */
	[BPF_REG_4] = 1,  /* RCX */
	[BPF_REG_5] = 0,  /* R8  */
	[BPF_REG_6] = 3,  /* RBX callee saved */
	[BPF_REG_7] = 5,  /* R13 callee saved */
	[BPF_REG_8] = 6,  /* R14 callee saved */
	[BPF_REG_9] = 7,  /* R15 callee saved */
	[BPF_REG_FP] = 5, /* RBP readonly */
	[BPF_REG_AX] = 2, /* R10 temp register */
	[AUX_REG] = 3,    /* R11 temp register */
A
Alexei Starovoitov 已提交
127
	[X86_REG_R9] = 1, /* R9 register, 6th function argument */
128 129
};

130 131 132 133 134 135 136 137 138 139 140 141 142
static const int reg2pt_regs[] = {
	[BPF_REG_0] = offsetof(struct pt_regs, ax),
	[BPF_REG_1] = offsetof(struct pt_regs, di),
	[BPF_REG_2] = offsetof(struct pt_regs, si),
	[BPF_REG_3] = offsetof(struct pt_regs, dx),
	[BPF_REG_4] = offsetof(struct pt_regs, cx),
	[BPF_REG_5] = offsetof(struct pt_regs, r8),
	[BPF_REG_6] = offsetof(struct pt_regs, bx),
	[BPF_REG_7] = offsetof(struct pt_regs, r13),
	[BPF_REG_8] = offsetof(struct pt_regs, r14),
	[BPF_REG_9] = offsetof(struct pt_regs, r15),
};

143 144
/*
 * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15
145 146 147
 * which need extra byte of encoding.
 * rax,rcx,...,rbp have simpler encoding
 */
148
static bool is_ereg(u32 reg)
149
{
150 151 152 153
	return (1 << reg) & (BIT(BPF_REG_5) |
			     BIT(AUX_REG) |
			     BIT(BPF_REG_7) |
			     BIT(BPF_REG_8) |
154
			     BIT(BPF_REG_9) |
A
Alexei Starovoitov 已提交
155
			     BIT(X86_REG_R9) |
156
			     BIT(BPF_REG_AX));
157 158
}

159 160 161 162 163
static bool is_axreg(u32 reg)
{
	return reg == BPF_REG_0;
}

164
/* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */
165
static u8 add_1mod(u8 byte, u32 reg)
166 167 168 169 170 171
{
	if (is_ereg(reg))
		byte |= 1;
	return byte;
}

172
static u8 add_2mod(u8 byte, u32 r1, u32 r2)
173 174 175 176 177 178 179 180
{
	if (is_ereg(r1))
		byte |= 1;
	if (is_ereg(r2))
		byte |= 4;
	return byte;
}

181
/* Encode 'dst_reg' register into x86-64 opcode 'byte' */
182
static u8 add_1reg(u8 byte, u32 dst_reg)
183
{
184
	return byte + reg2hex[dst_reg];
185 186
}

187
/* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */
188
static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
189
{
190
	return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
191 192
}

193 194
static void jit_fill_hole(void *area, unsigned int size)
{
195
	/* Fill whole space with INT3 instructions */
196 197 198
	memset(area, 0xcc, size);
}

199
struct jit_context {
200
	int cleanup_addr; /* Epilogue code offset */
201 202
};

203
/* Maximum number of bytes emitted while JITing one eBPF insn */
204 205
#define BPF_MAX_INSN_SIZE	128
#define BPF_INSN_SAFETY		64
206 207 208

/* Number of bytes emit_patch() needs to generate instructions */
#define X86_PATCH_SIZE		5
209

210
#define PROLOGUE_SIZE		25
211

212 213
/*
 * Emit x86-64 prologue code for BPF program and check its size.
214 215
 * bpf_tail_call helper will skip it while jumping into another program
 */
216
static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf)
217
{
218
	u8 *prog = *pprog;
219
	int cnt = X86_PATCH_SIZE;
220

221 222 223 224 225
	/* BPF trampoline can be made to work without these nops,
	 * but let's waste 5 bytes for now and optimize later
	 */
	memcpy(prog, ideal_nops[NOP_ATOMIC5], cnt);
	prog += cnt;
226 227 228 229 230 231 232 233
	EMIT1(0x55);             /* push rbp */
	EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
	/* sub rsp, rounded_stack_depth */
	EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8));
	EMIT1(0x53);             /* push rbx */
	EMIT2(0x41, 0x55);       /* push r13 */
	EMIT2(0x41, 0x56);       /* push r14 */
	EMIT2(0x41, 0x57);       /* push r15 */
234
	if (!ebpf_from_cbpf) {
235 236
		/* zero init tail_call_cnt */
		EMIT2(0x6a, 0x00);
237 238
		BUILD_BUG_ON(cnt != PROLOGUE_SIZE);
	}
239 240 241
	*pprog = prog;
}

242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272
static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode)
{
	u8 *prog = *pprog;
	int cnt = 0;
	s64 offset;

	offset = func - (ip + X86_PATCH_SIZE);
	if (!is_simm32(offset)) {
		pr_err("Target call %p is out of range\n", func);
		return -ERANGE;
	}
	EMIT1_off32(opcode, offset);
	*pprog = prog;
	return 0;
}

static int emit_call(u8 **pprog, void *func, void *ip)
{
	return emit_patch(pprog, func, ip, 0xE8);
}

static int emit_jump(u8 **pprog, void *func, void *ip)
{
	return emit_patch(pprog, func, ip, 0xE9);
}

static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
				void *old_addr, void *new_addr,
				const bool text_live)
{
	const u8 *nop_insn = ideal_nops[NOP_ATOMIC5];
273 274
	u8 old_insn[X86_PATCH_SIZE];
	u8 new_insn[X86_PATCH_SIZE];
275 276 277
	u8 *prog;
	int ret;

278 279 280 281 282 283 284 285
	memcpy(old_insn, nop_insn, X86_PATCH_SIZE);
	if (old_addr) {
		prog = old_insn;
		ret = t == BPF_MOD_CALL ?
		      emit_call(&prog, old_addr, ip) :
		      emit_jump(&prog, old_addr, ip);
		if (ret)
			return ret;
286 287
	}

288 289 290 291 292 293 294 295
	memcpy(new_insn, nop_insn, X86_PATCH_SIZE);
	if (new_addr) {
		prog = new_insn;
		ret = t == BPF_MOD_CALL ?
		      emit_call(&prog, new_addr, ip) :
		      emit_jump(&prog, new_addr, ip);
		if (ret)
			return ret;
296 297 298 299 300 301
	}

	ret = -EBUSY;
	mutex_lock(&text_mutex);
	if (memcmp(ip, old_insn, X86_PATCH_SIZE))
		goto out;
302 303 304 305 306 307
	if (memcmp(ip, new_insn, X86_PATCH_SIZE)) {
		if (text_live)
			text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL);
		else
			memcpy(ip, new_insn, X86_PATCH_SIZE);
	}
308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324
	ret = 0;
out:
	mutex_unlock(&text_mutex);
	return ret;
}

int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
		       void *old_addr, void *new_addr)
{
	if (!is_kernel_text((long)ip) &&
	    !is_bpf_text_address((long)ip))
		/* BPF poking in modules is not supported */
		return -EINVAL;

	return __bpf_arch_text_poke(ip, t, old_addr, new_addr, true);
}

325 326 327
/*
 * Generate the following code:
 *
328 329 330 331 332
 * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
 *   if (index >= array->map.max_entries)
 *     goto out;
 *   if (++tail_call_cnt > MAX_TAIL_CALL_CNT)
 *     goto out;
333
 *   prog = array->ptrs[index];
334 335 336 337 338
 *   if (prog == NULL)
 *     goto out;
 *   goto *(prog->bpf_func + prologue_size);
 * out:
 */
339
static void emit_bpf_tail_call_indirect(u8 **pprog)
340 341 342 343 344
{
	u8 *prog = *pprog;
	int label1, label2, label3;
	int cnt = 0;

345 346
	/*
	 * rdi - pointer to ctx
347 348 349 350
	 * rsi - pointer to bpf_array
	 * rdx - index in bpf_array
	 */

351 352 353
	/*
	 * if (index >= array->map.max_entries)
	 *	goto out;
354
	 */
355 356
	EMIT2(0x89, 0xD2);                        /* mov edx, edx */
	EMIT3(0x39, 0x56,                         /* cmp dword ptr [rsi + 16], edx */
357
	      offsetof(struct bpf_array, map.max_entries));
358
#define OFFSET1 (41 + RETPOLINE_RAX_BPF_JIT_SIZE) /* Number of bytes to jump */
359 360 361
	EMIT2(X86_JBE, OFFSET1);                  /* jbe out */
	label1 = cnt;

362 363 364
	/*
	 * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
	 *	goto out;
365
	 */
366
	EMIT2_off32(0x8B, 0x85, -36 - MAX_BPF_STACK); /* mov eax, dword ptr [rbp - 548] */
367
	EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT);     /* cmp eax, MAX_TAIL_CALL_CNT */
368
#define OFFSET2 (30 + RETPOLINE_RAX_BPF_JIT_SIZE)
369 370 371
	EMIT2(X86_JA, OFFSET2);                   /* ja out */
	label2 = cnt;
	EMIT3(0x83, 0xC0, 0x01);                  /* add eax, 1 */
372
	EMIT2_off32(0x89, 0x85, -36 - MAX_BPF_STACK); /* mov dword ptr [rbp -548], eax */
373

374
	/* prog = array->ptrs[index]; */
375
	EMIT4_off32(0x48, 0x8B, 0x84, 0xD6,       /* mov rax, [rsi + rdx * 8 + offsetof(...)] */
376
		    offsetof(struct bpf_array, ptrs));
377

378 379 380
	/*
	 * if (prog == NULL)
	 *	goto out;
381
	 */
382
	EMIT3(0x48, 0x85, 0xC0);		  /* test rax,rax */
383
#define OFFSET3 (8 + RETPOLINE_RAX_BPF_JIT_SIZE)
384 385 386 387 388 389 390 391
	EMIT2(X86_JE, OFFSET3);                   /* je out */
	label3 = cnt;

	/* goto *(prog->bpf_func + prologue_size); */
	EMIT4(0x48, 0x8B, 0x40,                   /* mov rax, qword ptr [rax + 32] */
	      offsetof(struct bpf_prog, bpf_func));
	EMIT4(0x48, 0x83, 0xC0, PROLOGUE_SIZE);   /* add rax, prologue_size */

392 393
	/*
	 * Wow we're ready to jump into next BPF program
394 395 396
	 * rdi == ctx (1st arg)
	 * rax == prog->bpf_func + prologue_size
	 */
397
	RETPOLINE_RAX_BPF_JIT();
398 399 400 401 402 403 404 405

	/* out: */
	BUILD_BUG_ON(cnt - label1 != OFFSET1);
	BUILD_BUG_ON(cnt - label2 != OFFSET2);
	BUILD_BUG_ON(cnt - label3 != OFFSET3);
	*pprog = prog;
}

406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457
static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke,
				      u8 **pprog, int addr, u8 *image)
{
	u8 *prog = *pprog;
	int cnt = 0;

	/*
	 * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
	 *	goto out;
	 */
	EMIT2_off32(0x8B, 0x85, -36 - MAX_BPF_STACK); /* mov eax, dword ptr [rbp - 548] */
	EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT);         /* cmp eax, MAX_TAIL_CALL_CNT */
	EMIT2(X86_JA, 14);                            /* ja out */
	EMIT3(0x83, 0xC0, 0x01);                      /* add eax, 1 */
	EMIT2_off32(0x89, 0x85, -36 - MAX_BPF_STACK); /* mov dword ptr [rbp -548], eax */

	poke->ip = image + (addr - X86_PATCH_SIZE);
	poke->adj_off = PROLOGUE_SIZE;

	memcpy(prog, ideal_nops[NOP_ATOMIC5], X86_PATCH_SIZE);
	prog += X86_PATCH_SIZE;
	/* out: */

	*pprog = prog;
}

static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
{
	struct bpf_jit_poke_descriptor *poke;
	struct bpf_array *array;
	struct bpf_prog *target;
	int i, ret;

	for (i = 0; i < prog->aux->size_poke_tab; i++) {
		poke = &prog->aux->poke_tab[i];
		WARN_ON_ONCE(READ_ONCE(poke->ip_stable));

		if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
			continue;

		array = container_of(poke->tail_call.map, struct bpf_array, map);
		mutex_lock(&array->aux->poke_mutex);
		target = array->ptrs[poke->tail_call.key];
		if (target) {
			/* Plain memcpy is used when image is not live yet
			 * and still not locked as read-only. Once poke
			 * location is active (poke->ip_stable), any parallel
			 * bpf_arch_text_poke() might occur still on the
			 * read-write image until we finally locked it as
			 * read-only. Both modifications on the given image
			 * are under text_mutex to avoid interference.
			 */
458
			ret = __bpf_arch_text_poke(poke->ip, BPF_MOD_JUMP, NULL,
459 460 461 462 463 464 465 466 467
						   (u8 *)target->bpf_func +
						   poke->adj_off, false);
			BUG_ON(ret < 0);
		}
		WRITE_ONCE(poke->ip_stable, true);
		mutex_unlock(&array->aux->poke_mutex);
	}
}

468 469 470 471 472 473 474
static void emit_mov_imm32(u8 **pprog, bool sign_propagate,
			   u32 dst_reg, const u32 imm32)
{
	u8 *prog = *pprog;
	u8 b1, b2, b3;
	int cnt = 0;

475 476
	/*
	 * Optimization: if imm32 is positive, use 'mov %eax, imm32'
477 478 479 480 481 482 483 484 485 486 487
	 * (which zero-extends imm32) to save 2 bytes.
	 */
	if (sign_propagate && (s32)imm32 < 0) {
		/* 'mov %rax, imm32' sign extends imm32 */
		b1 = add_1mod(0x48, dst_reg);
		b2 = 0xC7;
		b3 = 0xC0;
		EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
		goto done;
	}

488 489
	/*
	 * Optimization: if imm32 is zero, use 'xor %eax, %eax'
490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515
	 * to save 3 bytes.
	 */
	if (imm32 == 0) {
		if (is_ereg(dst_reg))
			EMIT1(add_2mod(0x40, dst_reg, dst_reg));
		b2 = 0x31; /* xor */
		b3 = 0xC0;
		EMIT2(b2, add_2reg(b3, dst_reg, dst_reg));
		goto done;
	}

	/* mov %eax, imm32 */
	if (is_ereg(dst_reg))
		EMIT1(add_1mod(0x40, dst_reg));
	EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
done:
	*pprog = prog;
}

static void emit_mov_imm64(u8 **pprog, u32 dst_reg,
			   const u32 imm32_hi, const u32 imm32_lo)
{
	u8 *prog = *pprog;
	int cnt = 0;

	if (is_uimm32(((u64)imm32_hi << 32) | (u32)imm32_lo)) {
516 517
		/*
		 * For emitting plain u32, where sign bit must not be
518 519 520 521 522 523 524 525 526 527 528 529 530 531 532
		 * propagated LLVM tends to load imm64 over mov32
		 * directly, so save couple of bytes by just doing
		 * 'mov %eax, imm32' instead.
		 */
		emit_mov_imm32(&prog, false, dst_reg, imm32_lo);
	} else {
		/* movabsq %rax, imm64 */
		EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
		EMIT(imm32_lo, 4);
		EMIT(imm32_hi, 4);
	}

	*pprog = prog;
}

533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550
static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg)
{
	u8 *prog = *pprog;
	int cnt = 0;

	if (is64) {
		/* mov dst, src */
		EMIT_mov(dst_reg, src_reg);
	} else {
		/* mov32 dst, src */
		if (is_ereg(dst_reg) || is_ereg(src_reg))
			EMIT1(add_2mod(0x40, dst_reg, src_reg));
		EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
	}

	*pprog = prog;
}

551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628
/* LDX: dst_reg = *(u8*)(src_reg + off) */
static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
{
	u8 *prog = *pprog;
	int cnt = 0;

	switch (size) {
	case BPF_B:
		/* Emit 'movzx rax, byte ptr [rax + off]' */
		EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
		break;
	case BPF_H:
		/* Emit 'movzx rax, word ptr [rax + off]' */
		EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
		break;
	case BPF_W:
		/* Emit 'mov eax, dword ptr [rax+0x14]' */
		if (is_ereg(dst_reg) || is_ereg(src_reg))
			EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
		else
			EMIT1(0x8B);
		break;
	case BPF_DW:
		/* Emit 'mov rax, qword ptr [rax+0x14]' */
		EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
		break;
	}
	/*
	 * If insn->off == 0 we can save one extra byte, but
	 * special case of x86 R13 which always needs an offset
	 * is not worth the hassle
	 */
	if (is_imm8(off))
		EMIT2(add_2reg(0x40, src_reg, dst_reg), off);
	else
		EMIT1_off32(add_2reg(0x80, src_reg, dst_reg), off);
	*pprog = prog;
}

/* STX: *(u8*)(dst_reg + off) = src_reg */
static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
{
	u8 *prog = *pprog;
	int cnt = 0;

	switch (size) {
	case BPF_B:
		/* Emit 'mov byte ptr [rax + off], al' */
		if (is_ereg(dst_reg) || is_ereg(src_reg) ||
		    /* We have to add extra byte for x86 SIL, DIL regs */
		    src_reg == BPF_REG_1 || src_reg == BPF_REG_2)
			EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
		else
			EMIT1(0x88);
		break;
	case BPF_H:
		if (is_ereg(dst_reg) || is_ereg(src_reg))
			EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
		else
			EMIT2(0x66, 0x89);
		break;
	case BPF_W:
		if (is_ereg(dst_reg) || is_ereg(src_reg))
			EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
		else
			EMIT1(0x89);
		break;
	case BPF_DW:
		EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
		break;
	}
	if (is_imm8(off))
		EMIT2(add_2reg(0x40, dst_reg, src_reg), off);
	else
		EMIT1_off32(add_2reg(0x80, dst_reg, src_reg), off);
	*pprog = prog;
}

629 630 631 632 633 634 635 636 637 638 639 640
static bool ex_handler_bpf(const struct exception_table_entry *x,
			   struct pt_regs *regs, int trapnr,
			   unsigned long error_code, unsigned long fault_addr)
{
	u32 reg = x->fixup >> 8;

	/* jump over faulting load and clear dest register */
	*(unsigned long *)((void *)regs + reg) = 0;
	regs->ip += x->fixup & 0xff;
	return true;
}

641 642 643 644 645 646 647
static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
		  int oldproglen, struct jit_context *ctx)
{
	struct bpf_insn *insn = bpf_prog->insnsi;
	int insn_cnt = bpf_prog->len;
	bool seen_exit = false;
	u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
648
	int i, cnt = 0, excnt = 0;
649 650 651
	int proglen = 0;
	u8 *prog = temp;

652 653
	emit_prologue(&prog, bpf_prog->aux->stack_depth,
		      bpf_prog_was_classic(bpf_prog));
654
	addrs[0] = prog - temp;
655

656
	for (i = 1; i <= insn_cnt; i++, insn++) {
657 658 659
		const s32 imm32 = insn->imm;
		u32 dst_reg = insn->dst_reg;
		u32 src_reg = insn->src_reg;
660
		u8 b2 = 0, b3 = 0;
661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683
		s64 jmp_offset;
		u8 jmp_cond;
		int ilen;
		u8 *func;

		switch (insn->code) {
			/* ALU */
		case BPF_ALU | BPF_ADD | BPF_X:
		case BPF_ALU | BPF_SUB | BPF_X:
		case BPF_ALU | BPF_AND | BPF_X:
		case BPF_ALU | BPF_OR | BPF_X:
		case BPF_ALU | BPF_XOR | BPF_X:
		case BPF_ALU64 | BPF_ADD | BPF_X:
		case BPF_ALU64 | BPF_SUB | BPF_X:
		case BPF_ALU64 | BPF_AND | BPF_X:
		case BPF_ALU64 | BPF_OR | BPF_X:
		case BPF_ALU64 | BPF_XOR | BPF_X:
			switch (BPF_OP(insn->code)) {
			case BPF_ADD: b2 = 0x01; break;
			case BPF_SUB: b2 = 0x29; break;
			case BPF_AND: b2 = 0x21; break;
			case BPF_OR: b2 = 0x09; break;
			case BPF_XOR: b2 = 0x31; break;
684
			}
685
			if (BPF_CLASS(insn->code) == BPF_ALU64)
686 687 688 689
				EMIT1(add_2mod(0x48, dst_reg, src_reg));
			else if (is_ereg(dst_reg) || is_ereg(src_reg))
				EMIT1(add_2mod(0x40, dst_reg, src_reg));
			EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
690
			break;
691

692 693
		case BPF_ALU64 | BPF_MOV | BPF_X:
		case BPF_ALU | BPF_MOV | BPF_X:
694 695 696
			emit_mov_reg(&prog,
				     BPF_CLASS(insn->code) == BPF_ALU64,
				     dst_reg, src_reg);
697
			break;
698

699
			/* neg dst */
700 701 702
		case BPF_ALU | BPF_NEG:
		case BPF_ALU64 | BPF_NEG:
			if (BPF_CLASS(insn->code) == BPF_ALU64)
703 704 705 706
				EMIT1(add_1mod(0x48, dst_reg));
			else if (is_ereg(dst_reg))
				EMIT1(add_1mod(0x40, dst_reg));
			EMIT2(0xF7, add_1reg(0xD8, dst_reg));
707 708 709 710 711 712 713 714 715 716 717 718 719
			break;

		case BPF_ALU | BPF_ADD | BPF_K:
		case BPF_ALU | BPF_SUB | BPF_K:
		case BPF_ALU | BPF_AND | BPF_K:
		case BPF_ALU | BPF_OR | BPF_K:
		case BPF_ALU | BPF_XOR | BPF_K:
		case BPF_ALU64 | BPF_ADD | BPF_K:
		case BPF_ALU64 | BPF_SUB | BPF_K:
		case BPF_ALU64 | BPF_AND | BPF_K:
		case BPF_ALU64 | BPF_OR | BPF_K:
		case BPF_ALU64 | BPF_XOR | BPF_K:
			if (BPF_CLASS(insn->code) == BPF_ALU64)
720 721 722
				EMIT1(add_1mod(0x48, dst_reg));
			else if (is_ereg(dst_reg))
				EMIT1(add_1mod(0x40, dst_reg));
723

724 725
			/*
			 * b3 holds 'normal' opcode, b2 short form only valid
726 727
			 * in case dst is eax/rax.
			 */
728
			switch (BPF_OP(insn->code)) {
729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748
			case BPF_ADD:
				b3 = 0xC0;
				b2 = 0x05;
				break;
			case BPF_SUB:
				b3 = 0xE8;
				b2 = 0x2D;
				break;
			case BPF_AND:
				b3 = 0xE0;
				b2 = 0x25;
				break;
			case BPF_OR:
				b3 = 0xC8;
				b2 = 0x0D;
				break;
			case BPF_XOR:
				b3 = 0xF0;
				b2 = 0x35;
				break;
749 750
			}

751 752
			if (is_imm8(imm32))
				EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
753 754
			else if (is_axreg(dst_reg))
				EMIT1_off32(b2, imm32);
755
			else
756
				EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
757 758 759 760
			break;

		case BPF_ALU64 | BPF_MOV | BPF_K:
		case BPF_ALU | BPF_MOV | BPF_K:
761 762
			emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64,
				       dst_reg, imm32);
763 764
			break;

765
		case BPF_LD | BPF_IMM | BPF_DW:
766
			emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm);
767 768 769 770
			insn++;
			i++;
			break;

771
			/* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
772 773 774 775 776 777 778 779 780 781 782 783
		case BPF_ALU | BPF_MOD | BPF_X:
		case BPF_ALU | BPF_DIV | BPF_X:
		case BPF_ALU | BPF_MOD | BPF_K:
		case BPF_ALU | BPF_DIV | BPF_K:
		case BPF_ALU64 | BPF_MOD | BPF_X:
		case BPF_ALU64 | BPF_DIV | BPF_X:
		case BPF_ALU64 | BPF_MOD | BPF_K:
		case BPF_ALU64 | BPF_DIV | BPF_K:
			EMIT1(0x50); /* push rax */
			EMIT1(0x52); /* push rdx */

			if (BPF_SRC(insn->code) == BPF_X)
784 785
				/* mov r11, src_reg */
				EMIT_mov(AUX_REG, src_reg);
786
			else
787 788
				/* mov r11, imm32 */
				EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
789

790 791
			/* mov rax, dst_reg */
			EMIT_mov(BPF_REG_0, dst_reg);
792

793 794
			/*
			 * xor edx, edx
795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815
			 * equivalent to 'xor rdx, rdx', but one byte less
			 */
			EMIT2(0x31, 0xd2);

			if (BPF_CLASS(insn->code) == BPF_ALU64)
				/* div r11 */
				EMIT3(0x49, 0xF7, 0xF3);
			else
				/* div r11d */
				EMIT3(0x41, 0xF7, 0xF3);

			if (BPF_OP(insn->code) == BPF_MOD)
				/* mov r11, rdx */
				EMIT3(0x49, 0x89, 0xD3);
			else
				/* mov r11, rax */
				EMIT3(0x49, 0x89, 0xC3);

			EMIT1(0x5A); /* pop rdx */
			EMIT1(0x58); /* pop rax */

816 817
			/* mov dst_reg, r11 */
			EMIT_mov(dst_reg, AUX_REG);
818 819 820 821 822 823
			break;

		case BPF_ALU | BPF_MUL | BPF_K:
		case BPF_ALU | BPF_MUL | BPF_X:
		case BPF_ALU64 | BPF_MUL | BPF_K:
		case BPF_ALU64 | BPF_MUL | BPF_X:
824 825 826
		{
			bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;

827 828 829 830
			if (dst_reg != BPF_REG_0)
				EMIT1(0x50); /* push rax */
			if (dst_reg != BPF_REG_3)
				EMIT1(0x52); /* push rdx */
831

832 833
			/* mov r11, dst_reg */
			EMIT_mov(AUX_REG, dst_reg);
834 835

			if (BPF_SRC(insn->code) == BPF_X)
836
				emit_mov_reg(&prog, is64, BPF_REG_0, src_reg);
837
			else
838
				emit_mov_imm32(&prog, is64, BPF_REG_0, imm32);
839

840
			if (is64)
841 842 843 844 845 846
				EMIT1(add_1mod(0x48, AUX_REG));
			else if (is_ereg(AUX_REG))
				EMIT1(add_1mod(0x40, AUX_REG));
			/* mul(q) r11 */
			EMIT2(0xF7, add_1reg(0xE0, AUX_REG));

847 848 849 850 851 852 853
			if (dst_reg != BPF_REG_3)
				EMIT1(0x5A); /* pop rdx */
			if (dst_reg != BPF_REG_0) {
				/* mov dst_reg, rax */
				EMIT_mov(dst_reg, BPF_REG_0);
				EMIT1(0x58); /* pop rax */
			}
854
			break;
855
		}
856
			/* Shifts */
857 858 859 860 861 862 863
		case BPF_ALU | BPF_LSH | BPF_K:
		case BPF_ALU | BPF_RSH | BPF_K:
		case BPF_ALU | BPF_ARSH | BPF_K:
		case BPF_ALU64 | BPF_LSH | BPF_K:
		case BPF_ALU64 | BPF_RSH | BPF_K:
		case BPF_ALU64 | BPF_ARSH | BPF_K:
			if (BPF_CLASS(insn->code) == BPF_ALU64)
864 865 866
				EMIT1(add_1mod(0x48, dst_reg));
			else if (is_ereg(dst_reg))
				EMIT1(add_1mod(0x40, dst_reg));
867 868 869 870 871 872

			switch (BPF_OP(insn->code)) {
			case BPF_LSH: b3 = 0xE0; break;
			case BPF_RSH: b3 = 0xE8; break;
			case BPF_ARSH: b3 = 0xF8; break;
			}
873 874 875 876 877

			if (imm32 == 1)
				EMIT2(0xD1, add_1reg(b3, dst_reg));
			else
				EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
878 879
			break;

880 881 882 883 884 885 886
		case BPF_ALU | BPF_LSH | BPF_X:
		case BPF_ALU | BPF_RSH | BPF_X:
		case BPF_ALU | BPF_ARSH | BPF_X:
		case BPF_ALU64 | BPF_LSH | BPF_X:
		case BPF_ALU64 | BPF_RSH | BPF_X:
		case BPF_ALU64 | BPF_ARSH | BPF_X:

887
			/* Check for bad case when dst_reg == rcx */
888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921
			if (dst_reg == BPF_REG_4) {
				/* mov r11, dst_reg */
				EMIT_mov(AUX_REG, dst_reg);
				dst_reg = AUX_REG;
			}

			if (src_reg != BPF_REG_4) { /* common case */
				EMIT1(0x51); /* push rcx */

				/* mov rcx, src_reg */
				EMIT_mov(BPF_REG_4, src_reg);
			}

			/* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
			if (BPF_CLASS(insn->code) == BPF_ALU64)
				EMIT1(add_1mod(0x48, dst_reg));
			else if (is_ereg(dst_reg))
				EMIT1(add_1mod(0x40, dst_reg));

			switch (BPF_OP(insn->code)) {
			case BPF_LSH: b3 = 0xE0; break;
			case BPF_RSH: b3 = 0xE8; break;
			case BPF_ARSH: b3 = 0xF8; break;
			}
			EMIT2(0xD3, add_1reg(b3, dst_reg));

			if (src_reg != BPF_REG_4)
				EMIT1(0x59); /* pop rcx */

			if (insn->dst_reg == BPF_REG_4)
				/* mov dst_reg, r11 */
				EMIT_mov(insn->dst_reg, AUX_REG);
			break;

922
		case BPF_ALU | BPF_END | BPF_FROM_BE:
923
			switch (imm32) {
924
			case 16:
925
				/* Emit 'ror %ax, 8' to swap lower 2 bytes */
926
				EMIT1(0x66);
927
				if (is_ereg(dst_reg))
928
					EMIT1(0x41);
929
				EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
930

931
				/* Emit 'movzwl eax, ax' */
932 933 934 935 936
				if (is_ereg(dst_reg))
					EMIT3(0x45, 0x0F, 0xB7);
				else
					EMIT2(0x0F, 0xB7);
				EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
937 938
				break;
			case 32:
939
				/* Emit 'bswap eax' to swap lower 4 bytes */
940
				if (is_ereg(dst_reg))
941
					EMIT2(0x41, 0x0F);
942
				else
943
					EMIT1(0x0F);
944
				EMIT1(add_1reg(0xC8, dst_reg));
945
				break;
946
			case 64:
947
				/* Emit 'bswap rax' to swap 8 bytes */
948 949
				EMIT3(add_1mod(0x48, dst_reg), 0x0F,
				      add_1reg(0xC8, dst_reg));
950 951
				break;
			}
952 953 954
			break;

		case BPF_ALU | BPF_END | BPF_FROM_LE:
955 956
			switch (imm32) {
			case 16:
957 958
				/*
				 * Emit 'movzwl eax, ax' to zero extend 16-bit
959 960 961 962 963 964 965 966 967
				 * into 64 bit
				 */
				if (is_ereg(dst_reg))
					EMIT3(0x45, 0x0F, 0xB7);
				else
					EMIT2(0x0F, 0xB7);
				EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
				break;
			case 32:
968
				/* Emit 'mov eax, eax' to clear upper 32-bits */
969 970 971 972 973 974 975 976
				if (is_ereg(dst_reg))
					EMIT1(0x45);
				EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
				break;
			case 64:
				/* nop */
				break;
			}
977 978
			break;

979
			/* ST: *(u8*)(dst_reg + off) = imm */
980
		case BPF_ST | BPF_MEM | BPF_B:
981
			if (is_ereg(dst_reg))
982 983 984 985 986
				EMIT2(0x41, 0xC6);
			else
				EMIT1(0xC6);
			goto st;
		case BPF_ST | BPF_MEM | BPF_H:
987
			if (is_ereg(dst_reg))
988 989 990 991 992
				EMIT3(0x66, 0x41, 0xC7);
			else
				EMIT2(0x66, 0xC7);
			goto st;
		case BPF_ST | BPF_MEM | BPF_W:
993
			if (is_ereg(dst_reg))
994 995 996 997 998
				EMIT2(0x41, 0xC7);
			else
				EMIT1(0xC7);
			goto st;
		case BPF_ST | BPF_MEM | BPF_DW:
999
			EMIT2(add_1mod(0x48, dst_reg), 0xC7);
1000 1001

st:			if (is_imm8(insn->off))
1002
				EMIT2(add_1reg(0x40, dst_reg), insn->off);
1003
			else
1004
				EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
1005

1006
			EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
1007 1008
			break;

1009
			/* STX: *(u8*)(dst_reg + off) = src_reg */
1010 1011 1012 1013
		case BPF_STX | BPF_MEM | BPF_B:
		case BPF_STX | BPF_MEM | BPF_H:
		case BPF_STX | BPF_MEM | BPF_W:
		case BPF_STX | BPF_MEM | BPF_DW:
1014
			emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1015 1016
			break;

1017
			/* LDX: dst_reg = *(u8*)(src_reg + off) */
1018
		case BPF_LDX | BPF_MEM | BPF_B:
1019
		case BPF_LDX | BPF_PROBE_MEM | BPF_B:
1020
		case BPF_LDX | BPF_MEM | BPF_H:
1021
		case BPF_LDX | BPF_PROBE_MEM | BPF_H:
1022
		case BPF_LDX | BPF_MEM | BPF_W:
1023
		case BPF_LDX | BPF_PROBE_MEM | BPF_W:
1024
		case BPF_LDX | BPF_MEM | BPF_DW:
1025
		case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
1026
			emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068
			if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
				struct exception_table_entry *ex;
				u8 *_insn = image + proglen;
				s64 delta;

				if (!bpf_prog->aux->extable)
					break;

				if (excnt >= bpf_prog->aux->num_exentries) {
					pr_err("ex gen bug\n");
					return -EFAULT;
				}
				ex = &bpf_prog->aux->extable[excnt++];

				delta = _insn - (u8 *)&ex->insn;
				if (!is_simm32(delta)) {
					pr_err("extable->insn doesn't fit into 32-bit\n");
					return -EFAULT;
				}
				ex->insn = delta;

				delta = (u8 *)ex_handler_bpf - (u8 *)&ex->handler;
				if (!is_simm32(delta)) {
					pr_err("extable->handler doesn't fit into 32-bit\n");
					return -EFAULT;
				}
				ex->handler = delta;

				if (dst_reg > BPF_REG_9) {
					pr_err("verifier error\n");
					return -EFAULT;
				}
				/*
				 * Compute size of x86 insn and its target dest x86 register.
				 * ex_handler_bpf() will use lower 8 bits to adjust
				 * pt_regs->ip to jump over this x86 instruction
				 * and upper bits to figure out which pt_regs to zero out.
				 * End result: x86 insn "mov rbx, qword ptr [rax+0x14]"
				 * of 4 bytes will be ignored and rbx will be zero inited.
				 */
				ex->fixup = (prog - temp) | (reg2pt_regs[dst_reg] << 8);
			}
1069 1070
			break;

1071
			/* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */
1072
		case BPF_STX | BPF_XADD | BPF_W:
1073
			/* Emit 'lock add dword ptr [rax + off], eax' */
1074 1075
			if (is_ereg(dst_reg) || is_ereg(src_reg))
				EMIT3(0xF0, add_2mod(0x40, dst_reg, src_reg), 0x01);
1076 1077 1078 1079
			else
				EMIT2(0xF0, 0x01);
			goto xadd;
		case BPF_STX | BPF_XADD | BPF_DW:
1080
			EMIT3(0xF0, add_2mod(0x48, dst_reg, src_reg), 0x01);
1081
xadd:			if (is_imm8(insn->off))
1082
				EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
1083
			else
1084
				EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
1085 1086 1087 1088 1089
					    insn->off);
			break;

			/* call */
		case BPF_JMP | BPF_CALL:
1090
			func = (u8 *) __bpf_call_base + imm32;
1091
			if (!imm32 || emit_call(&prog, func, image + addrs[i - 1]))
1092 1093 1094
				return -EINVAL;
			break;

1095
		case BPF_JMP | BPF_TAIL_CALL:
1096 1097 1098 1099 1100
			if (imm32)
				emit_bpf_tail_call_direct(&bpf_prog->aux->poke_tab[imm32 - 1],
							  &prog, addrs[i], image);
			else
				emit_bpf_tail_call_indirect(&prog);
1101 1102
			break;

1103 1104 1105 1106
			/* cond jump */
		case BPF_JMP | BPF_JEQ | BPF_X:
		case BPF_JMP | BPF_JNE | BPF_X:
		case BPF_JMP | BPF_JGT | BPF_X:
1107
		case BPF_JMP | BPF_JLT | BPF_X:
1108
		case BPF_JMP | BPF_JGE | BPF_X:
1109
		case BPF_JMP | BPF_JLE | BPF_X:
1110
		case BPF_JMP | BPF_JSGT | BPF_X:
1111
		case BPF_JMP | BPF_JSLT | BPF_X:
1112
		case BPF_JMP | BPF_JSGE | BPF_X:
1113
		case BPF_JMP | BPF_JSLE | BPF_X:
1114 1115 1116 1117 1118 1119 1120 1121 1122 1123
		case BPF_JMP32 | BPF_JEQ | BPF_X:
		case BPF_JMP32 | BPF_JNE | BPF_X:
		case BPF_JMP32 | BPF_JGT | BPF_X:
		case BPF_JMP32 | BPF_JLT | BPF_X:
		case BPF_JMP32 | BPF_JGE | BPF_X:
		case BPF_JMP32 | BPF_JLE | BPF_X:
		case BPF_JMP32 | BPF_JSGT | BPF_X:
		case BPF_JMP32 | BPF_JSLT | BPF_X:
		case BPF_JMP32 | BPF_JSGE | BPF_X:
		case BPF_JMP32 | BPF_JSLE | BPF_X:
1124
			/* cmp dst_reg, src_reg */
1125 1126 1127 1128 1129
			if (BPF_CLASS(insn->code) == BPF_JMP)
				EMIT1(add_2mod(0x48, dst_reg, src_reg));
			else if (is_ereg(dst_reg) || is_ereg(src_reg))
				EMIT1(add_2mod(0x40, dst_reg, src_reg));
			EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg));
1130 1131 1132
			goto emit_cond_jmp;

		case BPF_JMP | BPF_JSET | BPF_X:
1133
		case BPF_JMP32 | BPF_JSET | BPF_X:
1134
			/* test dst_reg, src_reg */
1135 1136 1137 1138 1139
			if (BPF_CLASS(insn->code) == BPF_JMP)
				EMIT1(add_2mod(0x48, dst_reg, src_reg));
			else if (is_ereg(dst_reg) || is_ereg(src_reg))
				EMIT1(add_2mod(0x40, dst_reg, src_reg));
			EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg));
1140 1141 1142
			goto emit_cond_jmp;

		case BPF_JMP | BPF_JSET | BPF_K:
1143
		case BPF_JMP32 | BPF_JSET | BPF_K:
1144
			/* test dst_reg, imm32 */
1145 1146 1147 1148
			if (BPF_CLASS(insn->code) == BPF_JMP)
				EMIT1(add_1mod(0x48, dst_reg));
			else if (is_ereg(dst_reg))
				EMIT1(add_1mod(0x40, dst_reg));
1149
			EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
1150 1151 1152 1153 1154
			goto emit_cond_jmp;

		case BPF_JMP | BPF_JEQ | BPF_K:
		case BPF_JMP | BPF_JNE | BPF_K:
		case BPF_JMP | BPF_JGT | BPF_K:
1155
		case BPF_JMP | BPF_JLT | BPF_K:
1156
		case BPF_JMP | BPF_JGE | BPF_K:
1157
		case BPF_JMP | BPF_JLE | BPF_K:
1158
		case BPF_JMP | BPF_JSGT | BPF_K:
1159
		case BPF_JMP | BPF_JSLT | BPF_K:
1160
		case BPF_JMP | BPF_JSGE | BPF_K:
1161
		case BPF_JMP | BPF_JSLE | BPF_K:
1162 1163 1164 1165 1166 1167 1168 1169 1170 1171
		case BPF_JMP32 | BPF_JEQ | BPF_K:
		case BPF_JMP32 | BPF_JNE | BPF_K:
		case BPF_JMP32 | BPF_JGT | BPF_K:
		case BPF_JMP32 | BPF_JLT | BPF_K:
		case BPF_JMP32 | BPF_JGE | BPF_K:
		case BPF_JMP32 | BPF_JLE | BPF_K:
		case BPF_JMP32 | BPF_JSGT | BPF_K:
		case BPF_JMP32 | BPF_JSLT | BPF_K:
		case BPF_JMP32 | BPF_JSGE | BPF_K:
		case BPF_JMP32 | BPF_JSLE | BPF_K:
1172 1173 1174 1175 1176 1177 1178 1179 1180 1181
			/* test dst_reg, dst_reg to save one extra byte */
			if (imm32 == 0) {
				if (BPF_CLASS(insn->code) == BPF_JMP)
					EMIT1(add_2mod(0x48, dst_reg, dst_reg));
				else if (is_ereg(dst_reg))
					EMIT1(add_2mod(0x40, dst_reg, dst_reg));
				EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg));
				goto emit_cond_jmp;
			}

1182
			/* cmp dst_reg, imm8/32 */
1183 1184 1185 1186
			if (BPF_CLASS(insn->code) == BPF_JMP)
				EMIT1(add_1mod(0x48, dst_reg));
			else if (is_ereg(dst_reg))
				EMIT1(add_1mod(0x40, dst_reg));
1187

1188 1189
			if (is_imm8(imm32))
				EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
1190
			else
1191
				EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
1192

1193
emit_cond_jmp:		/* Convert BPF opcode to x86 */
1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205
			switch (BPF_OP(insn->code)) {
			case BPF_JEQ:
				jmp_cond = X86_JE;
				break;
			case BPF_JSET:
			case BPF_JNE:
				jmp_cond = X86_JNE;
				break;
			case BPF_JGT:
				/* GT is unsigned '>', JA in x86 */
				jmp_cond = X86_JA;
				break;
1206 1207 1208 1209
			case BPF_JLT:
				/* LT is unsigned '<', JB in x86 */
				jmp_cond = X86_JB;
				break;
1210 1211 1212 1213
			case BPF_JGE:
				/* GE is unsigned '>=', JAE in x86 */
				jmp_cond = X86_JAE;
				break;
1214 1215 1216 1217
			case BPF_JLE:
				/* LE is unsigned '<=', JBE in x86 */
				jmp_cond = X86_JBE;
				break;
1218
			case BPF_JSGT:
1219
				/* Signed '>', GT in x86 */
1220 1221
				jmp_cond = X86_JG;
				break;
1222
			case BPF_JSLT:
1223
				/* Signed '<', LT in x86 */
1224 1225
				jmp_cond = X86_JL;
				break;
1226
			case BPF_JSGE:
1227
				/* Signed '>=', GE in x86 */
1228 1229
				jmp_cond = X86_JGE;
				break;
1230
			case BPF_JSLE:
1231
				/* Signed '<=', LE in x86 */
1232 1233
				jmp_cond = X86_JLE;
				break;
1234
			default: /* to silence GCC warning */
1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247
				return -EFAULT;
			}
			jmp_offset = addrs[i + insn->off] - addrs[i];
			if (is_imm8(jmp_offset)) {
				EMIT2(jmp_cond, jmp_offset);
			} else if (is_simm32(jmp_offset)) {
				EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
			} else {
				pr_err("cond_jmp gen bug %llx\n", jmp_offset);
				return -EFAULT;
			}

			break;
1248

1249
		case BPF_JMP | BPF_JA:
1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260
			if (insn->off == -1)
				/* -1 jmp instructions will always jump
				 * backwards two bytes. Explicitly handling
				 * this case avoids wasting too many passes
				 * when there are long sequences of replaced
				 * dead code.
				 */
				jmp_offset = -2;
			else
				jmp_offset = addrs[i + insn->off] - addrs[i];

1261
			if (!jmp_offset)
1262
				/* Optimize out nop jumps */
1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275
				break;
emit_jmp:
			if (is_imm8(jmp_offset)) {
				EMIT2(0xEB, jmp_offset);
			} else if (is_simm32(jmp_offset)) {
				EMIT1_off32(0xE9, jmp_offset);
			} else {
				pr_err("jmp gen bug %llx\n", jmp_offset);
				return -EFAULT;
			}
			break;

		case BPF_JMP | BPF_EXIT:
1276
			if (seen_exit) {
1277 1278 1279
				jmp_offset = ctx->cleanup_addr - addrs[i];
				goto emit_jmp;
			}
1280
			seen_exit = true;
1281
			/* Update cleanup_addr */
1282
			ctx->cleanup_addr = proglen;
1283 1284 1285 1286 1287 1288 1289 1290
			if (!bpf_prog_was_classic(bpf_prog))
				EMIT1(0x5B); /* get rid of tail_call_cnt */
			EMIT2(0x41, 0x5F);   /* pop r15 */
			EMIT2(0x41, 0x5E);   /* pop r14 */
			EMIT2(0x41, 0x5D);   /* pop r13 */
			EMIT1(0x5B);         /* pop rbx */
			EMIT1(0xC9);         /* leave */
			EMIT1(0xC3);         /* ret */
1291 1292
			break;

1293
		default:
1294 1295
			/*
			 * By design x86-64 JIT should support all BPF instructions.
1296
			 * This error will be seen if new instruction was added
1297 1298
			 * to the interpreter, but not to the JIT, or if there is
			 * junk in bpf_prog.
1299 1300
			 */
			pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
1301 1302
			return -EINVAL;
		}
1303

1304
		ilen = prog - temp;
1305
		if (ilen > BPF_MAX_INSN_SIZE) {
1306
			pr_err("bpf_jit: fatal insn size error\n");
1307 1308 1309
			return -EFAULT;
		}

1310 1311
		if (image) {
			if (unlikely(proglen + ilen > oldproglen)) {
1312
				pr_err("bpf_jit: fatal error\n");
1313
				return -EFAULT;
1314
			}
1315
			memcpy(image + proglen, temp, ilen);
1316
		}
1317 1318 1319 1320
		proglen += ilen;
		addrs[i] = proglen;
		prog = temp;
	}
1321 1322 1323 1324 1325

	if (image && excnt != bpf_prog->aux->num_exentries) {
		pr_err("extable is not populated\n");
		return -EFAULT;
	}
1326 1327 1328
	return proglen;
}

A
Alexei Starovoitov 已提交
1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480
static void save_regs(struct btf_func_model *m, u8 **prog, int nr_args,
		      int stack_size)
{
	int i;
	/* Store function arguments to stack.
	 * For a function that accepts two pointers the sequence will be:
	 * mov QWORD PTR [rbp-0x10],rdi
	 * mov QWORD PTR [rbp-0x8],rsi
	 */
	for (i = 0; i < min(nr_args, 6); i++)
		emit_stx(prog, bytes_to_bpf_size(m->arg_size[i]),
			 BPF_REG_FP,
			 i == 5 ? X86_REG_R9 : BPF_REG_1 + i,
			 -(stack_size - i * 8));
}

static void restore_regs(struct btf_func_model *m, u8 **prog, int nr_args,
			 int stack_size)
{
	int i;

	/* Restore function arguments from stack.
	 * For a function that accepts two pointers the sequence will be:
	 * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10]
	 * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8]
	 */
	for (i = 0; i < min(nr_args, 6); i++)
		emit_ldx(prog, bytes_to_bpf_size(m->arg_size[i]),
			 i == 5 ? X86_REG_R9 : BPF_REG_1 + i,
			 BPF_REG_FP,
			 -(stack_size - i * 8));
}

static int invoke_bpf(struct btf_func_model *m, u8 **pprog,
		      struct bpf_prog **progs, int prog_cnt, int stack_size)
{
	u8 *prog = *pprog;
	int cnt = 0, i;

	for (i = 0; i < prog_cnt; i++) {
		if (emit_call(&prog, __bpf_prog_enter, prog))
			return -EINVAL;
		/* remember prog start time returned by __bpf_prog_enter */
		emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);

		/* arg1: lea rdi, [rbp - stack_size] */
		EMIT4(0x48, 0x8D, 0x7D, -stack_size);
		/* arg2: progs[i]->insnsi for interpreter */
		if (!progs[i]->jited)
			emit_mov_imm64(&prog, BPF_REG_2,
				       (long) progs[i]->insnsi >> 32,
				       (u32) (long) progs[i]->insnsi);
		/* call JITed bpf program or interpreter */
		if (emit_call(&prog, progs[i]->bpf_func, prog))
			return -EINVAL;

		/* arg1: mov rdi, progs[i] */
		emit_mov_imm64(&prog, BPF_REG_1, (long) progs[i] >> 32,
			       (u32) (long) progs[i]);
		/* arg2: mov rsi, rbx <- start time in nsec */
		emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
		if (emit_call(&prog, __bpf_prog_exit, prog))
			return -EINVAL;
	}
	*pprog = prog;
	return 0;
}

/* Example:
 * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
 * its 'struct btf_func_model' will be nr_args=2
 * The assembly code when eth_type_trans is executing after trampoline:
 *
 * push rbp
 * mov rbp, rsp
 * sub rsp, 16                     // space for skb and dev
 * push rbx                        // temp regs to pass start time
 * mov qword ptr [rbp - 16], rdi   // save skb pointer to stack
 * mov qword ptr [rbp - 8], rsi    // save dev pointer to stack
 * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
 * mov rbx, rax                    // remember start time in bpf stats are enabled
 * lea rdi, [rbp - 16]             // R1==ctx of bpf prog
 * call addr_of_jited_FENTRY_prog
 * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
 * mov rsi, rbx                    // prog start time
 * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
 * mov rdi, qword ptr [rbp - 16]   // restore skb pointer from stack
 * mov rsi, qword ptr [rbp - 8]    // restore dev pointer from stack
 * pop rbx
 * leave
 * ret
 *
 * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be
 * replaced with 'call generated_bpf_trampoline'. When it returns
 * eth_type_trans will continue executing with original skb and dev pointers.
 *
 * The assembly code when eth_type_trans is called from trampoline:
 *
 * push rbp
 * mov rbp, rsp
 * sub rsp, 24                     // space for skb, dev, return value
 * push rbx                        // temp regs to pass start time
 * mov qword ptr [rbp - 24], rdi   // save skb pointer to stack
 * mov qword ptr [rbp - 16], rsi   // save dev pointer to stack
 * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
 * mov rbx, rax                    // remember start time if bpf stats are enabled
 * lea rdi, [rbp - 24]             // R1==ctx of bpf prog
 * call addr_of_jited_FENTRY_prog  // bpf prog can access skb and dev
 * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
 * mov rsi, rbx                    // prog start time
 * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
 * mov rdi, qword ptr [rbp - 24]   // restore skb pointer from stack
 * mov rsi, qword ptr [rbp - 16]   // restore dev pointer from stack
 * call eth_type_trans+5           // execute body of eth_type_trans
 * mov qword ptr [rbp - 8], rax    // save return value
 * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
 * mov rbx, rax                    // remember start time in bpf stats are enabled
 * lea rdi, [rbp - 24]             // R1==ctx of bpf prog
 * call addr_of_jited_FEXIT_prog   // bpf prog can access skb, dev, return value
 * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
 * mov rsi, rbx                    // prog start time
 * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
 * mov rax, qword ptr [rbp - 8]    // restore eth_type_trans's return value
 * pop rbx
 * leave
 * add rsp, 8                      // skip eth_type_trans's frame
 * ret                             // return to its caller
 */
int arch_prepare_bpf_trampoline(void *image, struct btf_func_model *m, u32 flags,
				struct bpf_prog **fentry_progs, int fentry_cnt,
				struct bpf_prog **fexit_progs, int fexit_cnt,
				void *orig_call)
{
	int cnt = 0, nr_args = m->nr_args;
	int stack_size = nr_args * 8;
	u8 *prog;

	/* x86-64 supports up to 6 arguments. 7+ can be added in the future */
	if (nr_args > 6)
		return -ENOTSUPP;

	if ((flags & BPF_TRAMP_F_RESTORE_REGS) &&
	    (flags & BPF_TRAMP_F_SKIP_FRAME))
		return -EINVAL;

	if (flags & BPF_TRAMP_F_CALL_ORIG)
		stack_size += 8; /* room for return value of orig_call */

	if (flags & BPF_TRAMP_F_SKIP_FRAME)
		/* skip patched call instruction and point orig_call to actual
		 * body of the kernel function.
		 */
1481
		orig_call += X86_PATCH_SIZE;
A
Alexei Starovoitov 已提交
1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532

	prog = image;

	EMIT1(0x55);		 /* push rbp */
	EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
	EMIT4(0x48, 0x83, 0xEC, stack_size); /* sub rsp, stack_size */
	EMIT1(0x53);		 /* push rbx */

	save_regs(m, &prog, nr_args, stack_size);

	if (fentry_cnt)
		if (invoke_bpf(m, &prog, fentry_progs, fentry_cnt, stack_size))
			return -EINVAL;

	if (flags & BPF_TRAMP_F_CALL_ORIG) {
		if (fentry_cnt)
			restore_regs(m, &prog, nr_args, stack_size);

		/* call original function */
		if (emit_call(&prog, orig_call, prog))
			return -EINVAL;
		/* remember return value in a stack for bpf prog to access */
		emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
	}

	if (fexit_cnt)
		if (invoke_bpf(m, &prog, fexit_progs, fexit_cnt, stack_size))
			return -EINVAL;

	if (flags & BPF_TRAMP_F_RESTORE_REGS)
		restore_regs(m, &prog, nr_args, stack_size);

	if (flags & BPF_TRAMP_F_CALL_ORIG)
		/* restore original return value back into RAX */
		emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);

	EMIT1(0x5B); /* pop rbx */
	EMIT1(0xC9); /* leave */
	if (flags & BPF_TRAMP_F_SKIP_FRAME)
		/* skip our return address and return to parent */
		EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */
	EMIT1(0xC3); /* ret */
	/* One half of the page has active running trampoline.
	 * Another half is an area for next trampoline.
	 * Make sure the trampoline generation logic doesn't overflow.
	 */
	if (WARN_ON_ONCE(prog - (u8 *)image > PAGE_SIZE / 2 - BPF_INSN_SAFETY))
		return -EFAULT;
	return 0;
}

1533 1534 1535 1536 1537 1538 1539 1540
struct x64_jit_data {
	struct bpf_binary_header *header;
	int *addrs;
	u8 *image;
	int proglen;
	struct jit_context ctx;
};

1541
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1542 1543
{
	struct bpf_binary_header *header = NULL;
1544
	struct bpf_prog *tmp, *orig_prog = prog;
1545
	struct x64_jit_data *jit_data;
1546 1547
	int proglen, oldproglen = 0;
	struct jit_context ctx = {};
1548
	bool tmp_blinded = false;
1549
	bool extra_pass = false;
1550 1551 1552 1553 1554
	u8 *image = NULL;
	int *addrs;
	int pass;
	int i;

1555
	if (!prog->jit_requested)
1556 1557 1558
		return orig_prog;

	tmp = bpf_jit_blind_constants(prog);
1559 1560
	/*
	 * If blinding was requested and we failed during blinding,
1561 1562 1563 1564 1565 1566 1567 1568
	 * we must fall back to the interpreter.
	 */
	if (IS_ERR(tmp))
		return orig_prog;
	if (tmp != prog) {
		tmp_blinded = true;
		prog = tmp;
	}
1569

1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587
	jit_data = prog->aux->jit_data;
	if (!jit_data) {
		jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
		if (!jit_data) {
			prog = orig_prog;
			goto out;
		}
		prog->aux->jit_data = jit_data;
	}
	addrs = jit_data->addrs;
	if (addrs) {
		ctx = jit_data->ctx;
		oldproglen = jit_data->proglen;
		image = jit_data->image;
		header = jit_data->header;
		extra_pass = true;
		goto skip_init_addrs;
	}
1588
	addrs = kmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
1589 1590
	if (!addrs) {
		prog = orig_prog;
1591
		goto out_addrs;
1592
	}
1593

1594 1595 1596
	/*
	 * Before first pass, make a rough estimation of addrs[]
	 * each BPF instruction is translated to less than 64 bytes
1597
	 */
1598
	for (proglen = 0, i = 0; i <= prog->len; i++) {
1599 1600 1601 1602
		proglen += 64;
		addrs[i] = proglen;
	}
	ctx.cleanup_addr = proglen;
1603
skip_init_addrs:
1604

1605 1606 1607
	/*
	 * JITed image shrinks with every pass and the loop iterates
	 * until the image stops shrinking. Very large BPF programs
1608
	 * may converge on the last pass. In such case do one more
1609
	 * pass to emit the final image.
1610
	 */
1611
	for (pass = 0; pass < 20 || image; pass++) {
1612 1613
		proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
		if (proglen <= 0) {
1614
out_image:
1615 1616
			image = NULL;
			if (header)
1617
				bpf_jit_binary_free(header);
1618 1619
			prog = orig_prog;
			goto out_addrs;
1620
		}
1621
		if (image) {
1622
			if (proglen != oldproglen) {
1623 1624
				pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
				       proglen, oldproglen);
1625
				goto out_image;
1626
			}
1627 1628 1629
			break;
		}
		if (proglen == oldproglen) {
1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642
			/*
			 * The number of entries in extable is the number of BPF_LDX
			 * insns that access kernel memory via "pointer to BTF type".
			 * The verifier changed their opcode from LDX|MEM|size
			 * to LDX|PROBE_MEM|size to make JITing easier.
			 */
			u32 align = __alignof__(struct exception_table_entry);
			u32 extable_size = prog->aux->num_exentries *
				sizeof(struct exception_table_entry);

			/* allocate module memory for x86 insns and extable */
			header = bpf_jit_binary_alloc(roundup(proglen, align) + extable_size,
						      &image, align, jit_fill_hole);
1643 1644 1645 1646
			if (!header) {
				prog = orig_prog;
				goto out_addrs;
			}
1647
			prog->aux->extable = (void *) image + roundup(proglen, align);
1648 1649
		}
		oldproglen = proglen;
1650
		cond_resched();
1651
	}
1652

1653
	if (bpf_jit_enable > 1)
1654
		bpf_jit_dump(prog->len, proglen, pass + 1, image);
1655 1656

	if (image) {
1657
		if (!prog->is_func || extra_pass) {
1658
			bpf_tail_call_direct_fixup(prog);
1659 1660 1661 1662 1663 1664 1665 1666
			bpf_jit_binary_lock_ro(header);
		} else {
			jit_data->addrs = addrs;
			jit_data->ctx = ctx;
			jit_data->proglen = proglen;
			jit_data->image = image;
			jit_data->header = header;
		}
1667
		prog->bpf_func = (void *)image;
1668
		prog->jited = 1;
1669
		prog->jited_len = proglen;
1670 1671
	} else {
		prog = orig_prog;
1672
	}
1673

1674
	if (!image || !prog->is_func || extra_pass) {
M
Martin KaFai Lau 已提交
1675
		if (image)
1676
			bpf_prog_fill_jited_linfo(prog, addrs + 1);
1677
out_addrs:
1678 1679 1680 1681
		kfree(addrs);
		kfree(jit_data);
		prog->aux->jit_data = NULL;
	}
1682 1683 1684 1685
out:
	if (tmp_blinded)
		bpf_jit_prog_release_other(prog, prog == orig_prog ?
					   tmp : orig_prog);
1686
	return prog;
1687
}