提交 46144839 编写于 作者: J Jiong Wang 提交者: Alexei Starovoitov

nfp: bpf: implement jitting of JMP32

This patch implements code-gen for new JMP32 instructions on NFP.
Reviewed-by: NJakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: NJiong Wang <jiong.wang@netronome.com>
Signed-off-by: NAlexei Starovoitov <ast@kernel.org>
上级 626a5f66
......@@ -1334,8 +1334,9 @@ wrp_test_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
wrp_test_reg_one(nfp_prog, insn->dst_reg * 2, alu_op,
insn->src_reg * 2, br_mask, insn->off);
wrp_test_reg_one(nfp_prog, insn->dst_reg * 2 + 1, alu_op,
insn->src_reg * 2 + 1, br_mask, insn->off);
if (is_mbpf_jmp64(meta))
wrp_test_reg_one(nfp_prog, insn->dst_reg * 2 + 1, alu_op,
insn->src_reg * 2 + 1, br_mask, insn->off);
return 0;
}
......@@ -1390,13 +1391,15 @@ static int cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
else
emit_alu(nfp_prog, reg_none(), tmp_reg, alu_op, reg_a(reg));
tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
if (!code->swap)
emit_alu(nfp_prog, reg_none(),
reg_a(reg + 1), carry_op, tmp_reg);
else
emit_alu(nfp_prog, reg_none(),
tmp_reg, carry_op, reg_a(reg + 1));
if (is_mbpf_jmp64(meta)) {
tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
if (!code->swap)
emit_alu(nfp_prog, reg_none(),
reg_a(reg + 1), carry_op, tmp_reg);
else
emit_alu(nfp_prog, reg_none(),
tmp_reg, carry_op, reg_a(reg + 1));
}
emit_br(nfp_prog, code->br_mask, insn->off, 0);
......@@ -1423,8 +1426,9 @@ static int cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
}
emit_alu(nfp_prog, reg_none(), reg_a(areg), ALU_OP_SUB, reg_b(breg));
emit_alu(nfp_prog, reg_none(),
reg_a(areg + 1), ALU_OP_SUB_C, reg_b(breg + 1));
if (is_mbpf_jmp64(meta))
emit_alu(nfp_prog, reg_none(),
reg_a(areg + 1), ALU_OP_SUB_C, reg_b(breg + 1));
emit_br(nfp_prog, code->br_mask, insn->off, 0);
return 0;
......@@ -3048,6 +3052,19 @@ static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return 0;
}
static int jeq32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
swreg tmp_reg;
tmp_reg = ur_load_imm_any(nfp_prog, insn->imm, imm_b(nfp_prog));
emit_alu(nfp_prog, reg_none(),
reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg);
emit_br(nfp_prog, BR_BEQ, insn->off, 0);
return 0;
}
static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
......@@ -3061,9 +3078,10 @@ static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
/* Upper word of the mask can only be 0 or ~0 from sign extension,
* so either ignore it or OR the whole thing in.
*/
if (imm >> 32)
if (is_mbpf_jmp64(meta) && imm >> 32) {
emit_alu(nfp_prog, reg_none(),
reg_a(dst_gpr + 1), ALU_OP_OR, imm_b(nfp_prog));
}
emit_br(nfp_prog, BR_BNE, insn->off, 0);
return 0;
......@@ -3073,11 +3091,16 @@ static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
u64 imm = insn->imm; /* sign extend */
bool is_jmp32 = is_mbpf_jmp32(meta);
swreg tmp_reg;
if (!imm) {
emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2),
ALU_OP_OR, reg_b(insn->dst_reg * 2 + 1));
if (is_jmp32)
emit_alu(nfp_prog, reg_none(), reg_none(), ALU_OP_NONE,
reg_b(insn->dst_reg * 2));
else
emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2),
ALU_OP_OR, reg_b(insn->dst_reg * 2 + 1));
emit_br(nfp_prog, BR_BNE, insn->off, 0);
return 0;
}
......@@ -3087,6 +3110,9 @@ static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg);
emit_br(nfp_prog, BR_BNE, insn->off, 0);
if (is_jmp32)
return 0;
tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
emit_alu(nfp_prog, reg_none(),
reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg);
......@@ -3101,10 +3127,13 @@ static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(insn->dst_reg * 2),
ALU_OP_XOR, reg_b(insn->src_reg * 2));
emit_alu(nfp_prog, imm_b(nfp_prog), reg_a(insn->dst_reg * 2 + 1),
ALU_OP_XOR, reg_b(insn->src_reg * 2 + 1));
emit_alu(nfp_prog, reg_none(),
imm_a(nfp_prog), ALU_OP_OR, imm_b(nfp_prog));
if (is_mbpf_jmp64(meta)) {
emit_alu(nfp_prog, imm_b(nfp_prog),
reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR,
reg_b(insn->src_reg * 2 + 1));
emit_alu(nfp_prog, reg_none(), imm_a(nfp_prog), ALU_OP_OR,
imm_b(nfp_prog));
}
emit_br(nfp_prog, BR_BEQ, insn->off, 0);
return 0;
......@@ -3369,6 +3398,28 @@ static const instr_cb_t instr_cb[256] = {
[BPF_JMP | BPF_JSLE | BPF_X] = cmp_reg,
[BPF_JMP | BPF_JSET | BPF_X] = jset_reg,
[BPF_JMP | BPF_JNE | BPF_X] = jne_reg,
[BPF_JMP32 | BPF_JEQ | BPF_K] = jeq32_imm,
[BPF_JMP32 | BPF_JGT | BPF_K] = cmp_imm,
[BPF_JMP32 | BPF_JGE | BPF_K] = cmp_imm,
[BPF_JMP32 | BPF_JLT | BPF_K] = cmp_imm,
[BPF_JMP32 | BPF_JLE | BPF_K] = cmp_imm,
[BPF_JMP32 | BPF_JSGT | BPF_K] =cmp_imm,
[BPF_JMP32 | BPF_JSGE | BPF_K] =cmp_imm,
[BPF_JMP32 | BPF_JSLT | BPF_K] =cmp_imm,
[BPF_JMP32 | BPF_JSLE | BPF_K] =cmp_imm,
[BPF_JMP32 | BPF_JSET | BPF_K] =jset_imm,
[BPF_JMP32 | BPF_JNE | BPF_K] = jne_imm,
[BPF_JMP32 | BPF_JEQ | BPF_X] = jeq_reg,
[BPF_JMP32 | BPF_JGT | BPF_X] = cmp_reg,
[BPF_JMP32 | BPF_JGE | BPF_X] = cmp_reg,
[BPF_JMP32 | BPF_JLT | BPF_X] = cmp_reg,
[BPF_JMP32 | BPF_JLE | BPF_X] = cmp_reg,
[BPF_JMP32 | BPF_JSGT | BPF_X] =cmp_reg,
[BPF_JMP32 | BPF_JSGE | BPF_X] =cmp_reg,
[BPF_JMP32 | BPF_JSLT | BPF_X] =cmp_reg,
[BPF_JMP32 | BPF_JSLE | BPF_X] =cmp_reg,
[BPF_JMP32 | BPF_JSET | BPF_X] =jset_reg,
[BPF_JMP32 | BPF_JNE | BPF_X] = jne_reg,
[BPF_JMP | BPF_CALL] = call,
[BPF_JMP | BPF_EXIT] = jmp_exit,
};
......@@ -3397,7 +3448,7 @@ static int nfp_fixup_branches(struct nfp_prog *nfp_prog)
list_for_each_entry(meta, &nfp_prog->insns, l) {
if (meta->flags & FLAG_INSN_SKIP_MASK)
continue;
if (BPF_CLASS(meta->insn.code) != BPF_JMP)
if (!is_mbpf_jmp(meta))
continue;
if (meta->insn.code == (BPF_JMP | BPF_EXIT) &&
!nfp_is_main_function(meta))
......@@ -3758,16 +3809,14 @@ static void nfp_bpf_opt_neg_add_sub(struct nfp_prog *nfp_prog)
if (meta->flags & FLAG_INSN_SKIP_MASK)
continue;
if (BPF_CLASS(insn.code) != BPF_ALU &&
BPF_CLASS(insn.code) != BPF_ALU64 &&
BPF_CLASS(insn.code) != BPF_JMP)
if (!is_mbpf_alu(meta) && !is_mbpf_jmp(meta))
continue;
if (BPF_SRC(insn.code) != BPF_K)
continue;
if (insn.imm >= 0)
continue;
if (BPF_CLASS(insn.code) == BPF_JMP) {
if (is_mbpf_jmp(meta)) {
switch (BPF_OP(insn.code)) {
case BPF_JGE:
case BPF_JSGE:
......@@ -4338,7 +4387,7 @@ void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog)
unsigned int dst_idx;
bool pseudo_call;
if (BPF_CLASS(code) != BPF_JMP)
if (!is_mbpf_jmp(meta))
continue;
if (BPF_OP(code) == BPF_EXIT)
continue;
......
......@@ -365,6 +365,21 @@ static inline bool is_mbpf_load(const struct nfp_insn_meta *meta)
return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM);
}
static inline bool is_mbpf_jmp32(const struct nfp_insn_meta *meta)
{
return mbpf_class(meta) == BPF_JMP32;
}
static inline bool is_mbpf_jmp64(const struct nfp_insn_meta *meta)
{
return mbpf_class(meta) == BPF_JMP;
}
static inline bool is_mbpf_jmp(const struct nfp_insn_meta *meta)
{
return is_mbpf_jmp32(meta) || is_mbpf_jmp64(meta);
}
static inline bool is_mbpf_store(const struct nfp_insn_meta *meta)
{
return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_MEM);
......@@ -419,10 +434,13 @@ static inline bool is_mbpf_cond_jump(const struct nfp_insn_meta *meta)
{
u8 op;
if (BPF_CLASS(meta->insn.code) != BPF_JMP)
if (is_mbpf_jmp32(meta))
return true;
if (!is_mbpf_jmp64(meta))
return false;
op = BPF_OP(meta->insn.code);
op = mbpf_op(meta);
return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册