bpf_jit_comp64.c 31.5 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3 4 5 6 7 8 9 10 11
/*
 * bpf_jit_comp64.c: eBPF JIT compiler
 *
 * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
 *		  IBM Corporation
 *
 * Based on the powerpc classic BPF JIT compiler by Matt Evans
 */
#include <linux/moduleloader.h>
#include <asm/cacheflush.h>
12
#include <asm/asm-compat.h>
13 14 15 16
#include <linux/netdevice.h>
#include <linux/filter.h>
#include <linux/if_vlan.h>
#include <asm/kprobes.h>
17
#include <linux/bpf.h>
18
#include <asm/security_features.h>
19 20 21 22 23 24 25 26 27 28 29

#include "bpf_jit64.h"

static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
{
	/*
	 * We only need a stack frame if:
	 * - we call other functions (kernel helpers), or
	 * - the bpf program uses its stack area
	 * The latter condition is deduced from the usage of BPF_REG_FP
	 */
30
	return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, b2p[BPF_REG_FP]);
31 32
}

33 34 35 36 37 38
/*
 * When not setting up our own stackframe, the redzone usage is:
 *
 *		[	prev sp		] <-------------
 *		[	  ...       	] 		|
 * sp (r1) --->	[    stack pointer	] --------------
39
 *		[   nv gpr save area	] 5*8
40
 *		[    tail_call_cnt	] 8
41
 *		[    local_tmp_var	] 16
42 43 44 45 46
 *		[   unused red zone	] 208 bytes protected
 */
static int bpf_jit_stack_local(struct codegen_context *ctx)
{
	if (bpf_has_stack_frame(ctx))
47
		return STACK_FRAME_MIN_SIZE + ctx->stack_size;
48
	else
49
		return -(BPF_PPC_STACK_SAVE + 24);
50 51
}

52 53
static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
{
54
	return bpf_jit_stack_local(ctx) + 16;
55 56
}

57 58 59
static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
{
	if (reg >= BPF_PPC_NVR_MIN && reg < 32)
60 61 62
		return (bpf_has_stack_frame(ctx) ?
			(BPF_PPC_STACKFRAME + ctx->stack_size) : 0)
				- (8 * (32 - reg));
63 64 65 66 67

	pr_err("BPF JIT is asking about unknown registers");
	BUG();
}

68 69 70 71
void bpf_jit_realloc_regs(struct codegen_context *ctx)
{
}

72
void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
73
{
74 75
	int i;

76
	/*
77 78 79
	 * Initialize tail_call_cnt if we do tail calls.
	 * Otherwise, put in NOPs so that it can be skipped when we are
	 * invoked through a tail call.
80
	 */
81
	if (ctx->seen & SEEN_TAILCALL) {
82
		EMIT(PPC_RAW_LI(b2p[TMP_REG_1], 0));
83 84 85
		/* this goes in the redzone */
		PPC_BPF_STL(b2p[TMP_REG_1], 1, -(BPF_PPC_STACK_SAVE + 8));
	} else {
86 87
		EMIT(PPC_RAW_NOP());
		EMIT(PPC_RAW_NOP());
88
	}
89

90
#define BPF_TAILCALL_PROLOGUE_SIZE	8
91

92
	if (bpf_has_stack_frame(ctx)) {
93 94 95 96 97
		/*
		 * We need a stack frame, but we don't necessarily need to
		 * save/restore LR unless we call other functions
		 */
		if (ctx->seen & SEEN_FUNC) {
98
			EMIT(PPC_RAW_MFLR(_R0));
99 100 101
			PPC_BPF_STL(0, 1, PPC_LR_STKOFF);
		}

102
		PPC_BPF_STLU(1, 1, -(BPF_PPC_STACKFRAME + ctx->stack_size));
103 104 105 106 107 108 109 110
	}

	/*
	 * Back up non-volatile regs -- BPF registers 6-10
	 * If we haven't created our own stack frame, we save these
	 * in the protected zone below the previous stack frame
	 */
	for (i = BPF_REG_6; i <= BPF_REG_10; i++)
111
		if (bpf_is_seen_register(ctx, b2p[i]))
112
			PPC_BPF_STL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
113 114

	/* Setup frame pointer to point to the bpf stack area */
115
	if (bpf_is_seen_register(ctx, b2p[BPF_REG_FP]))
116 117
		EMIT(PPC_RAW_ADDI(b2p[BPF_REG_FP], 1,
				STACK_FRAME_MIN_SIZE + ctx->stack_size));
118 119
}

120
static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
121 122 123 124 125
{
	int i;

	/* Restore NVRs */
	for (i = BPF_REG_6; i <= BPF_REG_10; i++)
126
		if (bpf_is_seen_register(ctx, b2p[i]))
127
			PPC_BPF_LL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
128 129

	/* Tear down our stack frame */
130
	if (bpf_has_stack_frame(ctx)) {
131
		EMIT(PPC_RAW_ADDI(1, 1, BPF_PPC_STACKFRAME + ctx->stack_size));
132 133
		if (ctx->seen & SEEN_FUNC) {
			PPC_BPF_LL(0, 1, PPC_LR_STKOFF);
134
			EMIT(PPC_RAW_MTLR(0));
135 136
		}
	}
137 138
}

139
void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
140 141 142 143
{
	bpf_jit_emit_common_epilogue(image, ctx);

	/* Move result to r3 */
144
	EMIT(PPC_RAW_MR(3, b2p[BPF_REG_0]));
145

146
	EMIT(PPC_RAW_BLR());
147 148
}

149 150 151 152 153 154 155 156
static void bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx,
				       u64 func)
{
#ifdef PPC64_ELF_ABI_v1
	/* func points to the function descriptor */
	PPC_LI64(b2p[TMP_REG_2], func);
	/* Load actual entry point from function descriptor */
	PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_2], 0);
157 158
	/* ... and move it to CTR */
	EMIT(PPC_RAW_MTCTR(b2p[TMP_REG_1]));
159 160 161 162 163 164 165 166 167 168
	/*
	 * Load TOC from function descriptor at offset 8.
	 * We can clobber r2 since we get called through a
	 * function pointer (so caller will save/restore r2)
	 * and since we don't use a TOC ourself.
	 */
	PPC_BPF_LL(2, b2p[TMP_REG_2], 8);
#else
	/* We can clobber r12 */
	PPC_FUNC_ADDR(12, func);
169
	EMIT(PPC_RAW_MTCTR(12));
170
#endif
171
	EMIT(PPC_RAW_BCTRL());
172 173
}

174
void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func)
175
{
176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191
	unsigned int i, ctx_idx = ctx->idx;

	/* Load function address into r12 */
	PPC_LI64(12, func);

	/* For bpf-to-bpf function calls, the callee's address is unknown
	 * until the last extra pass. As seen above, we use PPC_LI64() to
	 * load the callee's address, but this may optimize the number of
	 * instructions required based on the nature of the address.
	 *
	 * Since we don't want the number of instructions emitted to change,
	 * we pad the optimized PPC_LI64() call with NOPs to guarantee that
	 * we always have a five-instruction sequence, which is the maximum
	 * that PPC_LI64() can emit.
	 */
	for (i = ctx->idx - ctx_idx; i < 5; i++)
192
		EMIT(PPC_RAW_NOP());
193

194 195 196 197 198 199 200
#ifdef PPC64_ELF_ABI_v1
	/*
	 * Load TOC from function descriptor at offset 8.
	 * We can clobber r2 since we get called through a
	 * function pointer (so caller will save/restore r2)
	 * and since we don't use a TOC ourself.
	 */
201 202 203
	PPC_BPF_LL(2, 12, 8);
	/* Load actual entry point from function descriptor */
	PPC_BPF_LL(12, 12, 0);
204
#endif
205

206 207
	EMIT(PPC_RAW_MTCTR(12));
	EMIT(PPC_RAW_BCTRL());
208 209
}

210
static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
211 212 213 214 215 216 217 218 219 220 221 222 223 224
{
	/*
	 * By now, the eBPF program has already setup parameters in r3, r4 and r5
	 * r3/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
	 * r4/BPF_REG_2 - pointer to bpf_array
	 * r5/BPF_REG_3 - index in bpf_array
	 */
	int b2p_bpf_array = b2p[BPF_REG_2];
	int b2p_index = b2p[BPF_REG_3];

	/*
	 * if (index >= array->map.max_entries)
	 *   goto out;
	 */
225
	EMIT(PPC_RAW_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)));
226 227
	EMIT(PPC_RAW_RLWINM(b2p_index, b2p_index, 0, 0, 31));
	EMIT(PPC_RAW_CMPLW(b2p_index, b2p[TMP_REG_1]));
228 229 230 231 232 233
	PPC_BCC(COND_GE, out);

	/*
	 * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
	 *   goto out;
	 */
234
	PPC_BPF_LL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
235
	EMIT(PPC_RAW_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT));
236 237 238 239 240
	PPC_BCC(COND_GT, out);

	/*
	 * tail_call_cnt++;
	 */
241
	EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], 1));
242 243 244
	PPC_BPF_STL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));

	/* prog = array->ptrs[index]; */
245
	EMIT(PPC_RAW_MULI(b2p[TMP_REG_1], b2p_index, 8));
246
	EMIT(PPC_RAW_ADD(b2p[TMP_REG_1], b2p[TMP_REG_1], b2p_bpf_array));
247
	PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
248 249 250 251 252

	/*
	 * if (prog == NULL)
	 *   goto out;
	 */
253
	EMIT(PPC_RAW_CMPLDI(b2p[TMP_REG_1], 0));
254 255 256
	PPC_BCC(COND_EQ, out);

	/* goto *(prog->bpf_func + prologue_size); */
257
	PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
258 259
#ifdef PPC64_ELF_ABI_v1
	/* skip past the function descriptor */
260 261
	EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1],
			FUNCTION_DESCR_SIZE + BPF_TAILCALL_PROLOGUE_SIZE));
262
#else
263
	EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], BPF_TAILCALL_PROLOGUE_SIZE));
264
#endif
265
	EMIT(PPC_RAW_MTCTR(b2p[TMP_REG_1]));
266 267 268 269

	/* tear down stack, restore NVRs, ... */
	bpf_jit_emit_common_epilogue(image, ctx);

270
	EMIT(PPC_RAW_BCTR());
271

272
	/* out: */
273
	return 0;
274 275
}

276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297
/*
 * We spill into the redzone always, even if the bpf program has its own stackframe.
 * Offsets hardcoded based on BPF_PPC_STACK_SAVE -- see bpf_jit_stack_local()
 */
void bpf_stf_barrier(void);

asm (
"		.global bpf_stf_barrier		;"
"	bpf_stf_barrier:			;"
"		std	21,-64(1)		;"
"		std	22,-56(1)		;"
"		sync				;"
"		ld	21,-64(1)		;"
"		ld	22,-56(1)		;"
"		ori	31,31,0			;"
"		.rept 14			;"
"		b	1f			;"
"	1:					;"
"		.endr				;"
"		blr				;"
);

298
/* Assemble the body code between the prologue & epilogue */
299 300
int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
		       u32 *addrs, bool extra_pass)
301
{
302
	enum stf_barrier_type stf_barrier = stf_barrier_type_get();
303 304
	const struct bpf_insn *insn = fp->insnsi;
	int flen = fp->len;
305
	int i, ret;
306 307 308 309 310 311 312 313 314 315

	/* Start of epilogue code - will only be valid 2nd pass onwards */
	u32 exit_addr = addrs[flen];

	for (i = 0; i < flen; i++) {
		u32 code = insn[i].code;
		u32 dst_reg = b2p[insn[i].dst_reg];
		u32 src_reg = b2p[insn[i].src_reg];
		s16 off = insn[i].off;
		s32 imm = insn[i].imm;
316 317
		bool func_addr_fixed;
		u64 func_addr;
318 319
		u64 imm64;
		u32 true_cond;
320
		u32 tmp_idx;
321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338

		/*
		 * addrs[] maps a BPF bytecode address into a real offset from
		 * the start of the body code.
		 */
		addrs[i] = ctx->idx * 4;

		/*
		 * As an optimization, we note down which non-volatile registers
		 * are used so that we can only save/restore those in our
		 * prologue and epilogue. We do this here regardless of whether
		 * the actual BPF instruction uses src/dst registers or not
		 * (for instance, BPF_CALL does not use them). The expectation
		 * is that those instructions will have src_reg/dst_reg set to
		 * 0. Even otherwise, we just lose some prologue/epilogue
		 * optimization but everything else should work without
		 * any issues.
		 */
339
		if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32)
340
			bpf_set_seen_register(ctx, dst_reg);
341
		if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32)
342
			bpf_set_seen_register(ctx, src_reg);
343 344 345 346 347 348 349

		switch (code) {
		/*
		 * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
		 */
		case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
		case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
350
			EMIT(PPC_RAW_ADD(dst_reg, dst_reg, src_reg));
351 352 353
			goto bpf_alu32_trunc;
		case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
		case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
354
			EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg));
355 356 357
			goto bpf_alu32_trunc;
		case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
		case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
358 359 360 361 362 363 364 365 366 367
			if (!imm) {
				goto bpf_alu32_trunc;
			} else if (imm >= -32768 && imm < 32768) {
				EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
			} else {
				PPC_LI32(b2p[TMP_REG_1], imm);
				EMIT(PPC_RAW_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]));
			}
			goto bpf_alu32_trunc;
		case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
368
		case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
369 370 371 372 373 374 375
			if (!imm) {
				goto bpf_alu32_trunc;
			} else if (imm > -32768 && imm <= 32768) {
				EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(-imm)));
			} else {
				PPC_LI32(b2p[TMP_REG_1], imm);
				EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]));
376 377 378 379 380
			}
			goto bpf_alu32_trunc;
		case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
		case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
			if (BPF_CLASS(code) == BPF_ALU)
381
				EMIT(PPC_RAW_MULW(dst_reg, dst_reg, src_reg));
382
			else
383
				EMIT(PPC_RAW_MULD(dst_reg, dst_reg, src_reg));
384 385 386 387
			goto bpf_alu32_trunc;
		case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
		case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
			if (imm >= -32768 && imm < 32768)
388
				EMIT(PPC_RAW_MULI(dst_reg, dst_reg, IMM_L(imm)));
389 390 391
			else {
				PPC_LI32(b2p[TMP_REG_1], imm);
				if (BPF_CLASS(code) == BPF_ALU)
392 393
					EMIT(PPC_RAW_MULW(dst_reg, dst_reg,
							b2p[TMP_REG_1]));
394
				else
395 396
					EMIT(PPC_RAW_MULD(dst_reg, dst_reg,
							b2p[TMP_REG_1]));
397 398 399 400 401
			}
			goto bpf_alu32_trunc;
		case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
		case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
			if (BPF_OP(code) == BPF_MOD) {
402 403 404 405
				EMIT(PPC_RAW_DIVWU(b2p[TMP_REG_1], dst_reg, src_reg));
				EMIT(PPC_RAW_MULW(b2p[TMP_REG_1], src_reg,
						b2p[TMP_REG_1]));
				EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]));
406
			} else
407
				EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, src_reg));
408 409 410 411
			goto bpf_alu32_trunc;
		case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
		case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
			if (BPF_OP(code) == BPF_MOD) {
412 413 414 415
				EMIT(PPC_RAW_DIVDU(b2p[TMP_REG_1], dst_reg, src_reg));
				EMIT(PPC_RAW_MULD(b2p[TMP_REG_1], src_reg,
						b2p[TMP_REG_1]));
				EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]));
416
			} else
417
				EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, src_reg));
418 419 420 421 422 423 424
			break;
		case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
		case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
		case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
		case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
			if (imm == 0)
				return -EINVAL;
425 426 427 428 429 430 431 432
			if (imm == 1) {
				if (BPF_OP(code) == BPF_DIV) {
					goto bpf_alu32_trunc;
				} else {
					EMIT(PPC_RAW_LI(dst_reg, 0));
					break;
				}
			}
433 434 435 436 437

			PPC_LI32(b2p[TMP_REG_1], imm);
			switch (BPF_CLASS(code)) {
			case BPF_ALU:
				if (BPF_OP(code) == BPF_MOD) {
438 439 440 441
					EMIT(PPC_RAW_DIVWU(b2p[TMP_REG_2],
							dst_reg,
							b2p[TMP_REG_1]));
					EMIT(PPC_RAW_MULW(b2p[TMP_REG_1],
442
							b2p[TMP_REG_1],
443 444 445
							b2p[TMP_REG_2]));
					EMIT(PPC_RAW_SUB(dst_reg, dst_reg,
							b2p[TMP_REG_1]));
446
				} else
447 448
					EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg,
							b2p[TMP_REG_1]));
449 450 451
				break;
			case BPF_ALU64:
				if (BPF_OP(code) == BPF_MOD) {
452 453 454 455
					EMIT(PPC_RAW_DIVDU(b2p[TMP_REG_2],
							dst_reg,
							b2p[TMP_REG_1]));
					EMIT(PPC_RAW_MULD(b2p[TMP_REG_1],
456
							b2p[TMP_REG_1],
457 458 459
							b2p[TMP_REG_2]));
					EMIT(PPC_RAW_SUB(dst_reg, dst_reg,
							b2p[TMP_REG_1]));
460
				} else
461 462
					EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg,
							b2p[TMP_REG_1]));
463 464 465 466 467
				break;
			}
			goto bpf_alu32_trunc;
		case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
		case BPF_ALU64 | BPF_NEG: /* dst = -dst */
468
			EMIT(PPC_RAW_NEG(dst_reg, dst_reg));
469 470 471 472 473 474 475
			goto bpf_alu32_trunc;

		/*
		 * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
		 */
		case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
		case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
476
			EMIT(PPC_RAW_AND(dst_reg, dst_reg, src_reg));
477 478 479 480
			goto bpf_alu32_trunc;
		case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
		case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
			if (!IMM_H(imm))
481
				EMIT(PPC_RAW_ANDI(dst_reg, dst_reg, IMM_L(imm)));
482 483 484
			else {
				/* Sign-extended */
				PPC_LI32(b2p[TMP_REG_1], imm);
485
				EMIT(PPC_RAW_AND(dst_reg, dst_reg, b2p[TMP_REG_1]));
486 487 488 489
			}
			goto bpf_alu32_trunc;
		case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
		case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
490
			EMIT(PPC_RAW_OR(dst_reg, dst_reg, src_reg));
491 492 493 494 495 496
			goto bpf_alu32_trunc;
		case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
		case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
			if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
				/* Sign-extended */
				PPC_LI32(b2p[TMP_REG_1], imm);
497
				EMIT(PPC_RAW_OR(dst_reg, dst_reg, b2p[TMP_REG_1]));
498 499
			} else {
				if (IMM_L(imm))
500
					EMIT(PPC_RAW_ORI(dst_reg, dst_reg, IMM_L(imm)));
501
				if (IMM_H(imm))
502
					EMIT(PPC_RAW_ORIS(dst_reg, dst_reg, IMM_H(imm)));
503 504 505 506
			}
			goto bpf_alu32_trunc;
		case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
		case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
507
			EMIT(PPC_RAW_XOR(dst_reg, dst_reg, src_reg));
508 509 510 511 512 513
			goto bpf_alu32_trunc;
		case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
		case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
			if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
				/* Sign-extended */
				PPC_LI32(b2p[TMP_REG_1], imm);
514
				EMIT(PPC_RAW_XOR(dst_reg, dst_reg, b2p[TMP_REG_1]));
515 516
			} else {
				if (IMM_L(imm))
517
					EMIT(PPC_RAW_XORI(dst_reg, dst_reg, IMM_L(imm)));
518
				if (IMM_H(imm))
519
					EMIT(PPC_RAW_XORIS(dst_reg, dst_reg, IMM_H(imm)));
520 521 522 523
			}
			goto bpf_alu32_trunc;
		case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
			/* slw clears top 32 bits */
524
			EMIT(PPC_RAW_SLW(dst_reg, dst_reg, src_reg));
525 526 527
			/* skip zero extension move, but set address map. */
			if (insn_is_zext(&insn[i + 1]))
				addrs[++i] = ctx->idx * 4;
528 529
			break;
		case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
530
			EMIT(PPC_RAW_SLD(dst_reg, dst_reg, src_reg));
531 532 533
			break;
		case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */
			/* with imm 0, we still need to clear top 32 bits */
534
			EMIT(PPC_RAW_SLWI(dst_reg, dst_reg, imm));
535 536
			if (insn_is_zext(&insn[i + 1]))
				addrs[++i] = ctx->idx * 4;
537 538 539
			break;
		case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */
			if (imm != 0)
540
				EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, imm));
541 542
			break;
		case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
543
			EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg));
544 545
			if (insn_is_zext(&insn[i + 1]))
				addrs[++i] = ctx->idx * 4;
546 547
			break;
		case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
548
			EMIT(PPC_RAW_SRD(dst_reg, dst_reg, src_reg));
549 550
			break;
		case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
551
			EMIT(PPC_RAW_SRWI(dst_reg, dst_reg, imm));
552 553
			if (insn_is_zext(&insn[i + 1]))
				addrs[++i] = ctx->idx * 4;
554 555 556
			break;
		case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
			if (imm != 0)
557
				EMIT(PPC_RAW_SRDI(dst_reg, dst_reg, imm));
558
			break;
559
		case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
560
			EMIT(PPC_RAW_SRAW(dst_reg, dst_reg, src_reg));
561
			goto bpf_alu32_trunc;
562
		case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
563
			EMIT(PPC_RAW_SRAD(dst_reg, dst_reg, src_reg));
564
			break;
565
		case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */
566
			EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg, imm));
567
			goto bpf_alu32_trunc;
568 569
		case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
			if (imm != 0)
570
				EMIT(PPC_RAW_SRADI(dst_reg, dst_reg, imm));
571 572 573 574 575 576 577
			break;

		/*
		 * MOV
		 */
		case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
		case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
578 579
			if (imm == 1) {
				/* special mov32 for zext */
580
				EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
581 582
				break;
			}
583
			EMIT(PPC_RAW_MR(dst_reg, src_reg));
584 585 586 587 588 589
			goto bpf_alu32_trunc;
		case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
		case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
			PPC_LI32(dst_reg, imm);
			if (imm < 0)
				goto bpf_alu32_trunc;
590 591
			else if (insn_is_zext(&insn[i + 1]))
				addrs[++i] = ctx->idx * 4;
592 593 594 595
			break;

bpf_alu32_trunc:
		/* Truncate to 32-bits */
596
		if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext)
597
			EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614
		break;

		/*
		 * BPF_FROM_BE/LE
		 */
		case BPF_ALU | BPF_END | BPF_FROM_LE:
		case BPF_ALU | BPF_END | BPF_FROM_BE:
#ifdef __BIG_ENDIAN__
			if (BPF_SRC(code) == BPF_FROM_BE)
				goto emit_clear;
#else /* !__BIG_ENDIAN__ */
			if (BPF_SRC(code) == BPF_FROM_LE)
				goto emit_clear;
#endif
			switch (imm) {
			case 16:
				/* Rotate 8 bits left & mask with 0x0000ff00 */
615
				EMIT(PPC_RAW_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 16, 23));
616
				/* Rotate 8 bits right & insert LSB to reg */
617
				EMIT(PPC_RAW_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 24, 31));
618
				/* Move result back to dst_reg */
619
				EMIT(PPC_RAW_MR(dst_reg, b2p[TMP_REG_1]));
620 621 622 623 624 625 626
				break;
			case 32:
				/*
				 * Rotate word left by 8 bits:
				 * 2 bytes are already in their final position
				 * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
				 */
627
				EMIT(PPC_RAW_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 0, 31));
628
				/* Rotate 24 bits and insert byte 1 */
629
				EMIT(PPC_RAW_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 0, 7));
630
				/* Rotate 24 bits and insert byte 3 */
631 632
				EMIT(PPC_RAW_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 16, 23));
				EMIT(PPC_RAW_MR(dst_reg, b2p[TMP_REG_1]));
633 634 635 636 637 638 639 640 641 642
				break;
			case 64:
				/*
				 * Way easier and faster(?) to store the value
				 * into stack and then use ldbrx
				 *
				 * ctx->seen will be reliable in pass2, but
				 * the instructions generated will remain the
				 * same across all passes
				 */
643
				PPC_BPF_STL(dst_reg, 1, bpf_jit_stack_local(ctx));
644 645
				EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx)));
				EMIT(PPC_RAW_LDBRX(dst_reg, 0, b2p[TMP_REG_1]));
646 647 648 649 650 651 652 653
				break;
			}
			break;

emit_clear:
			switch (imm) {
			case 16:
				/* zero-extend 16 bits into 64 bits */
654
				EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 48));
655 656
				if (insn_is_zext(&insn[i + 1]))
					addrs[++i] = ctx->idx * 4;
657 658
				break;
			case 32:
659 660
				if (!fp->aux->verifier_zext)
					/* zero-extend 32 bits into 64 bits */
661
					EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 32));
662 663 664 665 666 667 668
				break;
			case 64:
				/* nop */
				break;
			}
			break;

669 670 671 672
		/*
		 * BPF_ST NOSPEC (speculation barrier)
		 */
		case BPF_ST | BPF_NOSPEC:
673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695
			if (!security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) ||
					!security_ftr_enabled(SEC_FTR_STF_BARRIER))
				break;

			switch (stf_barrier) {
			case STF_BARRIER_EIEIO:
				EMIT(PPC_RAW_EIEIO() | 0x02000000);
				break;
			case STF_BARRIER_SYNC_ORI:
				EMIT(PPC_RAW_SYNC());
				EMIT(PPC_RAW_LD(b2p[TMP_REG_1], _R13, 0));
				EMIT(PPC_RAW_ORI(_R31, _R31, 0));
				break;
			case STF_BARRIER_FALLBACK:
				EMIT(PPC_RAW_MFLR(b2p[TMP_REG_1]));
				PPC_LI64(12, dereference_kernel_function_descriptor(bpf_stf_barrier));
				EMIT(PPC_RAW_MTCTR(12));
				EMIT(PPC_RAW_BCTRL());
				EMIT(PPC_RAW_MTLR(b2p[TMP_REG_1]));
				break;
			case STF_BARRIER_NONE:
				break;
			}
696 697
			break;

698 699 700 701 702 703
		/*
		 * BPF_ST(X)
		 */
		case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
		case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
			if (BPF_CLASS(code) == BPF_ST) {
704
				EMIT(PPC_RAW_LI(b2p[TMP_REG_1], imm));
705 706
				src_reg = b2p[TMP_REG_1];
			}
707
			EMIT(PPC_RAW_STB(src_reg, dst_reg, off));
708 709 710 711
			break;
		case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
		case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
			if (BPF_CLASS(code) == BPF_ST) {
712
				EMIT(PPC_RAW_LI(b2p[TMP_REG_1], imm));
713 714
				src_reg = b2p[TMP_REG_1];
			}
715
			EMIT(PPC_RAW_STH(src_reg, dst_reg, off));
716 717 718 719 720 721 722
			break;
		case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
		case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
			if (BPF_CLASS(code) == BPF_ST) {
				PPC_LI32(b2p[TMP_REG_1], imm);
				src_reg = b2p[TMP_REG_1];
			}
723
			EMIT(PPC_RAW_STW(src_reg, dst_reg, off));
724 725 726 727 728 729 730
			break;
		case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
		case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
			if (BPF_CLASS(code) == BPF_ST) {
				PPC_LI32(b2p[TMP_REG_1], imm);
				src_reg = b2p[TMP_REG_1];
			}
731
			PPC_BPF_STL(src_reg, dst_reg, off);
732 733 734
			break;

		/*
735
		 * BPF_STX ATOMIC (atomic ops)
736
		 */
737
		case BPF_STX | BPF_ATOMIC | BPF_W:
738
			if (imm != BPF_ADD) {
739 740 741 742 743 744 745 746
				pr_err_ratelimited(
					"eBPF filter atomic op code %02x (@%d) unsupported\n",
					code, i);
				return -ENOTSUPP;
			}

			/* *(u32 *)(dst + off) += src */

747
			/* Get EA into TMP_REG_1 */
748
			EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], dst_reg, off));
749
			tmp_idx = ctx->idx * 4;
750
			/* load value from memory into TMP_REG_2 */
751
			EMIT(PPC_RAW_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0));
752
			/* add value from src_reg into this */
753
			EMIT(PPC_RAW_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg));
754
			/* store result back */
755
			EMIT(PPC_RAW_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]));
756
			/* we're done if this succeeded */
757
			PPC_BCC_SHORT(COND_NE, tmp_idx);
758
			break;
759
		case BPF_STX | BPF_ATOMIC | BPF_DW:
760
			if (imm != BPF_ADD) {
761 762 763 764 765 766 767
				pr_err_ratelimited(
					"eBPF filter atomic op code %02x (@%d) unsupported\n",
					code, i);
				return -ENOTSUPP;
			}
			/* *(u64 *)(dst + off) += src */

768
			EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], dst_reg, off));
769
			tmp_idx = ctx->idx * 4;
770 771 772
			EMIT(PPC_RAW_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0));
			EMIT(PPC_RAW_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg));
			EMIT(PPC_RAW_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]));
773
			PPC_BCC_SHORT(COND_NE, tmp_idx);
774 775 776 777 778 779 780
			break;

		/*
		 * BPF_LDX
		 */
		/* dst = *(u8 *)(ul) (src + off) */
		case BPF_LDX | BPF_MEM | BPF_B:
781
			EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
782 783
			if (insn_is_zext(&insn[i + 1]))
				addrs[++i] = ctx->idx * 4;
784 785 786
			break;
		/* dst = *(u16 *)(ul) (src + off) */
		case BPF_LDX | BPF_MEM | BPF_H:
787
			EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
788 789
			if (insn_is_zext(&insn[i + 1]))
				addrs[++i] = ctx->idx * 4;
790 791 792
			break;
		/* dst = *(u32 *)(ul) (src + off) */
		case BPF_LDX | BPF_MEM | BPF_W:
793
			EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
794 795
			if (insn_is_zext(&insn[i + 1]))
				addrs[++i] = ctx->idx * 4;
796 797 798
			break;
		/* dst = *(u64 *)(ul) (src + off) */
		case BPF_LDX | BPF_MEM | BPF_DW:
799
			PPC_BPF_LL(dst_reg, src_reg, off);
800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828
			break;

		/*
		 * Doubleword load
		 * 16 byte instruction that uses two 'struct bpf_insn'
		 */
		case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
			imm64 = ((u64)(u32) insn[i].imm) |
				    (((u64)(u32) insn[i+1].imm) << 32);
			/* Adjust for two bpf instructions */
			addrs[++i] = ctx->idx * 4;
			PPC_LI64(dst_reg, imm64);
			break;

		/*
		 * Return/Exit
		 */
		case BPF_JMP | BPF_EXIT:
			/*
			 * If this isn't the very last instruction, branch to
			 * the epilogue. If we _are_ the last instruction,
			 * we'll just fall through to the epilogue.
			 */
			if (i != flen - 1)
				PPC_JMP(exit_addr);
			/* else fall through to the epilogue */
			break;

		/*
829
		 * Call kernel helper or bpf function
830 831 832
		 */
		case BPF_JMP | BPF_CALL:
			ctx->seen |= SEEN_FUNC;
833

834 835 836 837
			ret = bpf_jit_get_func_addr(fp, &insn[i], extra_pass,
						    &func_addr, &func_addr_fixed);
			if (ret < 0)
				return ret;
838

839 840 841 842
			if (func_addr_fixed)
				bpf_jit_emit_func_call_hlp(image, ctx, func_addr);
			else
				bpf_jit_emit_func_call_rel(image, ctx, func_addr);
843
			/* move return value from r3 to BPF_REG_0 */
844
			EMIT(PPC_RAW_MR(b2p[BPF_REG_0], 3));
845 846 847 848 849 850 851 852 853 854 855 856 857
			break;

		/*
		 * Jumps and branches
		 */
		case BPF_JMP | BPF_JA:
			PPC_JMP(addrs[i + 1 + off]);
			break;

		case BPF_JMP | BPF_JGT | BPF_K:
		case BPF_JMP | BPF_JGT | BPF_X:
		case BPF_JMP | BPF_JSGT | BPF_K:
		case BPF_JMP | BPF_JSGT | BPF_X:
858 859 860 861
		case BPF_JMP32 | BPF_JGT | BPF_K:
		case BPF_JMP32 | BPF_JGT | BPF_X:
		case BPF_JMP32 | BPF_JSGT | BPF_K:
		case BPF_JMP32 | BPF_JSGT | BPF_X:
862 863
			true_cond = COND_GT;
			goto cond_branch;
864 865 866 867
		case BPF_JMP | BPF_JLT | BPF_K:
		case BPF_JMP | BPF_JLT | BPF_X:
		case BPF_JMP | BPF_JSLT | BPF_K:
		case BPF_JMP | BPF_JSLT | BPF_X:
868 869 870 871
		case BPF_JMP32 | BPF_JLT | BPF_K:
		case BPF_JMP32 | BPF_JLT | BPF_X:
		case BPF_JMP32 | BPF_JSLT | BPF_K:
		case BPF_JMP32 | BPF_JSLT | BPF_X:
872 873
			true_cond = COND_LT;
			goto cond_branch;
874 875 876 877
		case BPF_JMP | BPF_JGE | BPF_K:
		case BPF_JMP | BPF_JGE | BPF_X:
		case BPF_JMP | BPF_JSGE | BPF_K:
		case BPF_JMP | BPF_JSGE | BPF_X:
878 879 880 881
		case BPF_JMP32 | BPF_JGE | BPF_K:
		case BPF_JMP32 | BPF_JGE | BPF_X:
		case BPF_JMP32 | BPF_JSGE | BPF_K:
		case BPF_JMP32 | BPF_JSGE | BPF_X:
882 883
			true_cond = COND_GE;
			goto cond_branch;
884 885 886 887
		case BPF_JMP | BPF_JLE | BPF_K:
		case BPF_JMP | BPF_JLE | BPF_X:
		case BPF_JMP | BPF_JSLE | BPF_K:
		case BPF_JMP | BPF_JSLE | BPF_X:
888 889 890 891
		case BPF_JMP32 | BPF_JLE | BPF_K:
		case BPF_JMP32 | BPF_JLE | BPF_X:
		case BPF_JMP32 | BPF_JSLE | BPF_K:
		case BPF_JMP32 | BPF_JSLE | BPF_X:
892 893
			true_cond = COND_LE;
			goto cond_branch;
894 895
		case BPF_JMP | BPF_JEQ | BPF_K:
		case BPF_JMP | BPF_JEQ | BPF_X:
896 897
		case BPF_JMP32 | BPF_JEQ | BPF_K:
		case BPF_JMP32 | BPF_JEQ | BPF_X:
898 899 900 901
			true_cond = COND_EQ;
			goto cond_branch;
		case BPF_JMP | BPF_JNE | BPF_K:
		case BPF_JMP | BPF_JNE | BPF_X:
902 903
		case BPF_JMP32 | BPF_JNE | BPF_K:
		case BPF_JMP32 | BPF_JNE | BPF_X:
904 905 906 907
			true_cond = COND_NE;
			goto cond_branch;
		case BPF_JMP | BPF_JSET | BPF_K:
		case BPF_JMP | BPF_JSET | BPF_X:
908 909
		case BPF_JMP32 | BPF_JSET | BPF_K:
		case BPF_JMP32 | BPF_JSET | BPF_X:
910 911 912 913 914 915
			true_cond = COND_NE;
			/* Fall through */

cond_branch:
			switch (code) {
			case BPF_JMP | BPF_JGT | BPF_X:
916
			case BPF_JMP | BPF_JLT | BPF_X:
917
			case BPF_JMP | BPF_JGE | BPF_X:
918
			case BPF_JMP | BPF_JLE | BPF_X:
919 920
			case BPF_JMP | BPF_JEQ | BPF_X:
			case BPF_JMP | BPF_JNE | BPF_X:
921 922 923 924 925 926
			case BPF_JMP32 | BPF_JGT | BPF_X:
			case BPF_JMP32 | BPF_JLT | BPF_X:
			case BPF_JMP32 | BPF_JGE | BPF_X:
			case BPF_JMP32 | BPF_JLE | BPF_X:
			case BPF_JMP32 | BPF_JEQ | BPF_X:
			case BPF_JMP32 | BPF_JNE | BPF_X:
927
				/* unsigned comparison */
928
				if (BPF_CLASS(code) == BPF_JMP32)
929
					EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
930
				else
931
					EMIT(PPC_RAW_CMPLD(dst_reg, src_reg));
932 933
				break;
			case BPF_JMP | BPF_JSGT | BPF_X:
934
			case BPF_JMP | BPF_JSLT | BPF_X:
935
			case BPF_JMP | BPF_JSGE | BPF_X:
936
			case BPF_JMP | BPF_JSLE | BPF_X:
937 938 939 940
			case BPF_JMP32 | BPF_JSGT | BPF_X:
			case BPF_JMP32 | BPF_JSLT | BPF_X:
			case BPF_JMP32 | BPF_JSGE | BPF_X:
			case BPF_JMP32 | BPF_JSLE | BPF_X:
941
				/* signed comparison */
942
				if (BPF_CLASS(code) == BPF_JMP32)
943
					EMIT(PPC_RAW_CMPW(dst_reg, src_reg));
944
				else
945
					EMIT(PPC_RAW_CMPD(dst_reg, src_reg));
946 947
				break;
			case BPF_JMP | BPF_JSET | BPF_X:
948 949
			case BPF_JMP32 | BPF_JSET | BPF_X:
				if (BPF_CLASS(code) == BPF_JMP) {
950 951
					EMIT(PPC_RAW_AND_DOT(b2p[TMP_REG_1], dst_reg,
						    src_reg));
952 953 954
				} else {
					int tmp_reg = b2p[TMP_REG_1];

955 956 957
					EMIT(PPC_RAW_AND(tmp_reg, dst_reg, src_reg));
					EMIT(PPC_RAW_RLWINM_DOT(tmp_reg, tmp_reg, 0, 0,
						       31));
958
				}
959 960 961 962
				break;
			case BPF_JMP | BPF_JNE | BPF_K:
			case BPF_JMP | BPF_JEQ | BPF_K:
			case BPF_JMP | BPF_JGT | BPF_K:
963
			case BPF_JMP | BPF_JLT | BPF_K:
964
			case BPF_JMP | BPF_JGE | BPF_K:
965
			case BPF_JMP | BPF_JLE | BPF_K:
966 967 968 969 970 971 972 973 974
			case BPF_JMP32 | BPF_JNE | BPF_K:
			case BPF_JMP32 | BPF_JEQ | BPF_K:
			case BPF_JMP32 | BPF_JGT | BPF_K:
			case BPF_JMP32 | BPF_JLT | BPF_K:
			case BPF_JMP32 | BPF_JGE | BPF_K:
			case BPF_JMP32 | BPF_JLE | BPF_K:
			{
				bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;

975 976 977 978
				/*
				 * Need sign-extended load, so only positive
				 * values can be used as imm in cmpldi
				 */
979 980
				if (imm >= 0 && imm < 32768) {
					if (is_jmp32)
981
						EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
982
					else
983
						EMIT(PPC_RAW_CMPLDI(dst_reg, imm));
984
				} else {
985 986 987
					/* sign-extending load */
					PPC_LI32(b2p[TMP_REG_1], imm);
					/* ... but unsigned comparison */
988
					if (is_jmp32)
989 990
						EMIT(PPC_RAW_CMPLW(dst_reg,
							  b2p[TMP_REG_1]));
991
					else
992 993
						EMIT(PPC_RAW_CMPLD(dst_reg,
							  b2p[TMP_REG_1]));
994 995
				}
				break;
996
			}
997
			case BPF_JMP | BPF_JSGT | BPF_K:
998
			case BPF_JMP | BPF_JSLT | BPF_K:
999
			case BPF_JMP | BPF_JSGE | BPF_K:
1000
			case BPF_JMP | BPF_JSLE | BPF_K:
1001 1002 1003 1004 1005 1006 1007
			case BPF_JMP32 | BPF_JSGT | BPF_K:
			case BPF_JMP32 | BPF_JSLT | BPF_K:
			case BPF_JMP32 | BPF_JSGE | BPF_K:
			case BPF_JMP32 | BPF_JSLE | BPF_K:
			{
				bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;

1008 1009 1010 1011
				/*
				 * signed comparison, so any 16-bit value
				 * can be used in cmpdi
				 */
1012 1013
				if (imm >= -32768 && imm < 32768) {
					if (is_jmp32)
1014
						EMIT(PPC_RAW_CMPWI(dst_reg, imm));
1015
					else
1016
						EMIT(PPC_RAW_CMPDI(dst_reg, imm));
1017
				} else {
1018
					PPC_LI32(b2p[TMP_REG_1], imm);
1019
					if (is_jmp32)
1020 1021
						EMIT(PPC_RAW_CMPW(dst_reg,
							 b2p[TMP_REG_1]));
1022
					else
1023 1024
						EMIT(PPC_RAW_CMPD(dst_reg,
							 b2p[TMP_REG_1]));
1025 1026
				}
				break;
1027
			}
1028
			case BPF_JMP | BPF_JSET | BPF_K:
1029
			case BPF_JMP32 | BPF_JSET | BPF_K:
1030 1031 1032
				/* andi does not sign-extend the immediate */
				if (imm >= 0 && imm < 32768)
					/* PPC_ANDI is _only/always_ dot-form */
1033
					EMIT(PPC_RAW_ANDI(b2p[TMP_REG_1], dst_reg, imm));
1034
				else {
1035 1036 1037 1038
					int tmp_reg = b2p[TMP_REG_1];

					PPC_LI32(tmp_reg, imm);
					if (BPF_CLASS(code) == BPF_JMP) {
1039 1040
						EMIT(PPC_RAW_AND_DOT(tmp_reg, dst_reg,
							    tmp_reg));
1041
					} else {
1042 1043 1044 1045
						EMIT(PPC_RAW_AND(tmp_reg, dst_reg,
							tmp_reg));
						EMIT(PPC_RAW_RLWINM_DOT(tmp_reg, tmp_reg,
							       0, 0, 31));
1046
					}
1047 1048 1049 1050 1051 1052 1053
				}
				break;
			}
			PPC_BCC(true_cond, addrs[i + 1 + off]);
			break;

		/*
1054
		 * Tail call
1055
		 */
1056
		case BPF_JMP | BPF_TAIL_CALL:
1057
			ctx->seen |= SEEN_TAILCALL;
1058 1059 1060
			ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
			if (ret < 0)
				return ret;
1061
			break;
1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079

		default:
			/*
			 * The filter contains something cruel & unusual.
			 * We don't handle it, but also there shouldn't be
			 * anything missing from our list.
			 */
			pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n",
					code, i);
			return -ENOTSUPP;
		}
	}

	/* Set end-of-body-code address for exit. */
	addrs[i] = ctx->idx * 4;

	return 0;
}