提交 5e581dad 编写于 作者: D Daniel Borkmann 提交者: Alexei Starovoitov

bpf: make unknown opcode handling more robust

Recent findings by syzcaller fixed in 7891a87e ("bpf: arsh is
not supported in 32 bit alu thus reject it") triggered a warning
in the interpreter due to unknown opcode not being rejected by
the verifier. The 'return 0' for an unknown opcode is really not
optimal, since with BPF to BPF calls, this would go untracked by
the verifier.

Do two things here to improve the situation: i) perform basic insn
sanity check early on in the verification phase and reject every
non-uapi insn right there. The bpf_opcode_in_insntable() table
reuses the same mapping as the jumptable in ___bpf_prog_run() sans
the non-public mappings. And ii) in ___bpf_prog_run() we do need
to BUG in the case where the verifier would ever create an unknown
opcode due to some rewrites.

Note that JITs do not have such issues since they would punt to
interpreter in these situations. Moreover, the BPF_JIT_ALWAYS_ON
would also help to avoid such unknown opcodes in the first place.
Signed-off-by: NDaniel Borkmann <daniel@iogearbox.net>
Acked-by: NAlexei Starovoitov <ast@kernel.org>
Signed-off-by: NAlexei Starovoitov <ast@kernel.org>
上级 2a5418a1
...@@ -688,6 +688,8 @@ static inline int sk_filter(struct sock *sk, struct sk_buff *skb) ...@@ -688,6 +688,8 @@ static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err); struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err);
void bpf_prog_free(struct bpf_prog *fp); void bpf_prog_free(struct bpf_prog *fp);
bool bpf_opcode_in_insntable(u8 code);
struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags); struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
gfp_t gfp_extra_flags); gfp_t gfp_extra_flags);
......
...@@ -782,6 +782,137 @@ noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) ...@@ -782,6 +782,137 @@ noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
} }
EXPORT_SYMBOL_GPL(__bpf_call_base); EXPORT_SYMBOL_GPL(__bpf_call_base);
/* All UAPI available opcodes. */
#define BPF_INSN_MAP(INSN_2, INSN_3) \
/* 32 bit ALU operations. */ \
/* Register based. */ \
INSN_3(ALU, ADD, X), \
INSN_3(ALU, SUB, X), \
INSN_3(ALU, AND, X), \
INSN_3(ALU, OR, X), \
INSN_3(ALU, LSH, X), \
INSN_3(ALU, RSH, X), \
INSN_3(ALU, XOR, X), \
INSN_3(ALU, MUL, X), \
INSN_3(ALU, MOV, X), \
INSN_3(ALU, DIV, X), \
INSN_3(ALU, MOD, X), \
INSN_2(ALU, NEG), \
INSN_3(ALU, END, TO_BE), \
INSN_3(ALU, END, TO_LE), \
/* Immediate based. */ \
INSN_3(ALU, ADD, K), \
INSN_3(ALU, SUB, K), \
INSN_3(ALU, AND, K), \
INSN_3(ALU, OR, K), \
INSN_3(ALU, LSH, K), \
INSN_3(ALU, RSH, K), \
INSN_3(ALU, XOR, K), \
INSN_3(ALU, MUL, K), \
INSN_3(ALU, MOV, K), \
INSN_3(ALU, DIV, K), \
INSN_3(ALU, MOD, K), \
/* 64 bit ALU operations. */ \
/* Register based. */ \
INSN_3(ALU64, ADD, X), \
INSN_3(ALU64, SUB, X), \
INSN_3(ALU64, AND, X), \
INSN_3(ALU64, OR, X), \
INSN_3(ALU64, LSH, X), \
INSN_3(ALU64, RSH, X), \
INSN_3(ALU64, XOR, X), \
INSN_3(ALU64, MUL, X), \
INSN_3(ALU64, MOV, X), \
INSN_3(ALU64, ARSH, X), \
INSN_3(ALU64, DIV, X), \
INSN_3(ALU64, MOD, X), \
INSN_2(ALU64, NEG), \
/* Immediate based. */ \
INSN_3(ALU64, ADD, K), \
INSN_3(ALU64, SUB, K), \
INSN_3(ALU64, AND, K), \
INSN_3(ALU64, OR, K), \
INSN_3(ALU64, LSH, K), \
INSN_3(ALU64, RSH, K), \
INSN_3(ALU64, XOR, K), \
INSN_3(ALU64, MUL, K), \
INSN_3(ALU64, MOV, K), \
INSN_3(ALU64, ARSH, K), \
INSN_3(ALU64, DIV, K), \
INSN_3(ALU64, MOD, K), \
/* Call instruction. */ \
INSN_2(JMP, CALL), \
/* Exit instruction. */ \
INSN_2(JMP, EXIT), \
/* Jump instructions. */ \
/* Register based. */ \
INSN_3(JMP, JEQ, X), \
INSN_3(JMP, JNE, X), \
INSN_3(JMP, JGT, X), \
INSN_3(JMP, JLT, X), \
INSN_3(JMP, JGE, X), \
INSN_3(JMP, JLE, X), \
INSN_3(JMP, JSGT, X), \
INSN_3(JMP, JSLT, X), \
INSN_3(JMP, JSGE, X), \
INSN_3(JMP, JSLE, X), \
INSN_3(JMP, JSET, X), \
/* Immediate based. */ \
INSN_3(JMP, JEQ, K), \
INSN_3(JMP, JNE, K), \
INSN_3(JMP, JGT, K), \
INSN_3(JMP, JLT, K), \
INSN_3(JMP, JGE, K), \
INSN_3(JMP, JLE, K), \
INSN_3(JMP, JSGT, K), \
INSN_3(JMP, JSLT, K), \
INSN_3(JMP, JSGE, K), \
INSN_3(JMP, JSLE, K), \
INSN_3(JMP, JSET, K), \
INSN_2(JMP, JA), \
/* Store instructions. */ \
/* Register based. */ \
INSN_3(STX, MEM, B), \
INSN_3(STX, MEM, H), \
INSN_3(STX, MEM, W), \
INSN_3(STX, MEM, DW), \
INSN_3(STX, XADD, W), \
INSN_3(STX, XADD, DW), \
/* Immediate based. */ \
INSN_3(ST, MEM, B), \
INSN_3(ST, MEM, H), \
INSN_3(ST, MEM, W), \
INSN_3(ST, MEM, DW), \
/* Load instructions. */ \
/* Register based. */ \
INSN_3(LDX, MEM, B), \
INSN_3(LDX, MEM, H), \
INSN_3(LDX, MEM, W), \
INSN_3(LDX, MEM, DW), \
/* Immediate based. */ \
INSN_3(LD, IMM, DW), \
/* Misc (old cBPF carry-over). */ \
INSN_3(LD, ABS, B), \
INSN_3(LD, ABS, H), \
INSN_3(LD, ABS, W), \
INSN_3(LD, IND, B), \
INSN_3(LD, IND, H), \
INSN_3(LD, IND, W)
bool bpf_opcode_in_insntable(u8 code)
{
#define BPF_INSN_2_TBL(x, y) [BPF_##x | BPF_##y] = true
#define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
static const bool public_insntable[256] = {
[0 ... 255] = false,
/* Now overwrite non-defaults ... */
BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
};
#undef BPF_INSN_3_TBL
#undef BPF_INSN_2_TBL
return public_insntable[code];
}
#ifndef CONFIG_BPF_JIT_ALWAYS_ON #ifndef CONFIG_BPF_JIT_ALWAYS_ON
/** /**
* __bpf_prog_run - run eBPF program on a given context * __bpf_prog_run - run eBPF program on a given context
...@@ -793,115 +924,18 @@ EXPORT_SYMBOL_GPL(__bpf_call_base); ...@@ -793,115 +924,18 @@ EXPORT_SYMBOL_GPL(__bpf_call_base);
static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack) static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
{ {
u64 tmp; u64 tmp;
#define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y
#define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
static const void *jumptable[256] = { static const void *jumptable[256] = {
[0 ... 255] = &&default_label, [0 ... 255] = &&default_label,
/* Now overwrite non-defaults ... */ /* Now overwrite non-defaults ... */
/* 32 bit ALU operations */ BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
[BPF_ALU | BPF_ADD | BPF_X] = &&ALU_ADD_X, /* Non-UAPI available opcodes. */
[BPF_ALU | BPF_ADD | BPF_K] = &&ALU_ADD_K,
[BPF_ALU | BPF_SUB | BPF_X] = &&ALU_SUB_X,
[BPF_ALU | BPF_SUB | BPF_K] = &&ALU_SUB_K,
[BPF_ALU | BPF_AND | BPF_X] = &&ALU_AND_X,
[BPF_ALU | BPF_AND | BPF_K] = &&ALU_AND_K,
[BPF_ALU | BPF_OR | BPF_X] = &&ALU_OR_X,
[BPF_ALU | BPF_OR | BPF_K] = &&ALU_OR_K,
[BPF_ALU | BPF_LSH | BPF_X] = &&ALU_LSH_X,
[BPF_ALU | BPF_LSH | BPF_K] = &&ALU_LSH_K,
[BPF_ALU | BPF_RSH | BPF_X] = &&ALU_RSH_X,
[BPF_ALU | BPF_RSH | BPF_K] = &&ALU_RSH_K,
[BPF_ALU | BPF_XOR | BPF_X] = &&ALU_XOR_X,
[BPF_ALU | BPF_XOR | BPF_K] = &&ALU_XOR_K,
[BPF_ALU | BPF_MUL | BPF_X] = &&ALU_MUL_X,
[BPF_ALU | BPF_MUL | BPF_K] = &&ALU_MUL_K,
[BPF_ALU | BPF_MOV | BPF_X] = &&ALU_MOV_X,
[BPF_ALU | BPF_MOV | BPF_K] = &&ALU_MOV_K,
[BPF_ALU | BPF_DIV | BPF_X] = &&ALU_DIV_X,
[BPF_ALU | BPF_DIV | BPF_K] = &&ALU_DIV_K,
[BPF_ALU | BPF_MOD | BPF_X] = &&ALU_MOD_X,
[BPF_ALU | BPF_MOD | BPF_K] = &&ALU_MOD_K,
[BPF_ALU | BPF_NEG] = &&ALU_NEG,
[BPF_ALU | BPF_END | BPF_TO_BE] = &&ALU_END_TO_BE,
[BPF_ALU | BPF_END | BPF_TO_LE] = &&ALU_END_TO_LE,
/* 64 bit ALU operations */
[BPF_ALU64 | BPF_ADD | BPF_X] = &&ALU64_ADD_X,
[BPF_ALU64 | BPF_ADD | BPF_K] = &&ALU64_ADD_K,
[BPF_ALU64 | BPF_SUB | BPF_X] = &&ALU64_SUB_X,
[BPF_ALU64 | BPF_SUB | BPF_K] = &&ALU64_SUB_K,
[BPF_ALU64 | BPF_AND | BPF_X] = &&ALU64_AND_X,
[BPF_ALU64 | BPF_AND | BPF_K] = &&ALU64_AND_K,
[BPF_ALU64 | BPF_OR | BPF_X] = &&ALU64_OR_X,
[BPF_ALU64 | BPF_OR | BPF_K] = &&ALU64_OR_K,
[BPF_ALU64 | BPF_LSH | BPF_X] = &&ALU64_LSH_X,
[BPF_ALU64 | BPF_LSH | BPF_K] = &&ALU64_LSH_K,
[BPF_ALU64 | BPF_RSH | BPF_X] = &&ALU64_RSH_X,
[BPF_ALU64 | BPF_RSH | BPF_K] = &&ALU64_RSH_K,
[BPF_ALU64 | BPF_XOR | BPF_X] = &&ALU64_XOR_X,
[BPF_ALU64 | BPF_XOR | BPF_K] = &&ALU64_XOR_K,
[BPF_ALU64 | BPF_MUL | BPF_X] = &&ALU64_MUL_X,
[BPF_ALU64 | BPF_MUL | BPF_K] = &&ALU64_MUL_K,
[BPF_ALU64 | BPF_MOV | BPF_X] = &&ALU64_MOV_X,
[BPF_ALU64 | BPF_MOV | BPF_K] = &&ALU64_MOV_K,
[BPF_ALU64 | BPF_ARSH | BPF_X] = &&ALU64_ARSH_X,
[BPF_ALU64 | BPF_ARSH | BPF_K] = &&ALU64_ARSH_K,
[BPF_ALU64 | BPF_DIV | BPF_X] = &&ALU64_DIV_X,
[BPF_ALU64 | BPF_DIV | BPF_K] = &&ALU64_DIV_K,
[BPF_ALU64 | BPF_MOD | BPF_X] = &&ALU64_MOD_X,
[BPF_ALU64 | BPF_MOD | BPF_K] = &&ALU64_MOD_K,
[BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
/* Call instruction */
[BPF_JMP | BPF_CALL] = &&JMP_CALL,
[BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS, [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
[BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL, [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
/* Jumps */
[BPF_JMP | BPF_JA] = &&JMP_JA,
[BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
[BPF_JMP | BPF_JEQ | BPF_K] = &&JMP_JEQ_K,
[BPF_JMP | BPF_JNE | BPF_X] = &&JMP_JNE_X,
[BPF_JMP | BPF_JNE | BPF_K] = &&JMP_JNE_K,
[BPF_JMP | BPF_JGT | BPF_X] = &&JMP_JGT_X,
[BPF_JMP | BPF_JGT | BPF_K] = &&JMP_JGT_K,
[BPF_JMP | BPF_JLT | BPF_X] = &&JMP_JLT_X,
[BPF_JMP | BPF_JLT | BPF_K] = &&JMP_JLT_K,
[BPF_JMP | BPF_JGE | BPF_X] = &&JMP_JGE_X,
[BPF_JMP | BPF_JGE | BPF_K] = &&JMP_JGE_K,
[BPF_JMP | BPF_JLE | BPF_X] = &&JMP_JLE_X,
[BPF_JMP | BPF_JLE | BPF_K] = &&JMP_JLE_K,
[BPF_JMP | BPF_JSGT | BPF_X] = &&JMP_JSGT_X,
[BPF_JMP | BPF_JSGT | BPF_K] = &&JMP_JSGT_K,
[BPF_JMP | BPF_JSLT | BPF_X] = &&JMP_JSLT_X,
[BPF_JMP | BPF_JSLT | BPF_K] = &&JMP_JSLT_K,
[BPF_JMP | BPF_JSGE | BPF_X] = &&JMP_JSGE_X,
[BPF_JMP | BPF_JSGE | BPF_K] = &&JMP_JSGE_K,
[BPF_JMP | BPF_JSLE | BPF_X] = &&JMP_JSLE_X,
[BPF_JMP | BPF_JSLE | BPF_K] = &&JMP_JSLE_K,
[BPF_JMP | BPF_JSET | BPF_X] = &&JMP_JSET_X,
[BPF_JMP | BPF_JSET | BPF_K] = &&JMP_JSET_K,
/* Program return */
[BPF_JMP | BPF_EXIT] = &&JMP_EXIT,
/* Store instructions */
[BPF_STX | BPF_MEM | BPF_B] = &&STX_MEM_B,
[BPF_STX | BPF_MEM | BPF_H] = &&STX_MEM_H,
[BPF_STX | BPF_MEM | BPF_W] = &&STX_MEM_W,
[BPF_STX | BPF_MEM | BPF_DW] = &&STX_MEM_DW,
[BPF_STX | BPF_XADD | BPF_W] = &&STX_XADD_W,
[BPF_STX | BPF_XADD | BPF_DW] = &&STX_XADD_DW,
[BPF_ST | BPF_MEM | BPF_B] = &&ST_MEM_B,
[BPF_ST | BPF_MEM | BPF_H] = &&ST_MEM_H,
[BPF_ST | BPF_MEM | BPF_W] = &&ST_MEM_W,
[BPF_ST | BPF_MEM | BPF_DW] = &&ST_MEM_DW,
/* Load instructions */
[BPF_LDX | BPF_MEM | BPF_B] = &&LDX_MEM_B,
[BPF_LDX | BPF_MEM | BPF_H] = &&LDX_MEM_H,
[BPF_LDX | BPF_MEM | BPF_W] = &&LDX_MEM_W,
[BPF_LDX | BPF_MEM | BPF_DW] = &&LDX_MEM_DW,
[BPF_LD | BPF_ABS | BPF_W] = &&LD_ABS_W,
[BPF_LD | BPF_ABS | BPF_H] = &&LD_ABS_H,
[BPF_LD | BPF_ABS | BPF_B] = &&LD_ABS_B,
[BPF_LD | BPF_IND | BPF_W] = &&LD_IND_W,
[BPF_LD | BPF_IND | BPF_H] = &&LD_IND_H,
[BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B,
[BPF_LD | BPF_IMM | BPF_DW] = &&LD_IMM_DW,
}; };
#undef BPF_INSN_3_LBL
#undef BPF_INSN_2_LBL
u32 tail_call_cnt = 0; u32 tail_call_cnt = 0;
void *ptr; void *ptr;
int off; int off;
...@@ -1302,8 +1336,14 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack) ...@@ -1302,8 +1336,14 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
goto load_byte; goto load_byte;
default_label: default_label:
/* If we ever reach this, we have a bug somewhere. */ /* If we ever reach this, we have a bug somewhere. Die hard here
WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code); * instead of just returning 0; we could be somewhere in a subprog,
* so execution could continue otherwise which we do /not/ want.
*
* Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
*/
pr_warn("BPF interpreter: unknown opcode %02x\n", insn->code);
BUG_ON(1);
return 0; return 0;
} }
STACK_FRAME_NON_STANDARD(___bpf_prog_run); /* jump table */ STACK_FRAME_NON_STANDARD(___bpf_prog_run); /* jump table */
......
...@@ -4981,6 +4981,13 @@ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env) ...@@ -4981,6 +4981,13 @@ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
next_insn: next_insn:
insn++; insn++;
i++; i++;
continue;
}
/* Basic sanity check before we invest more work here. */
if (!bpf_opcode_in_insntable(insn->code)) {
verbose(env, "unknown opcode %02x\n", insn->code);
return -EINVAL;
} }
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册