提交 77a3d311 编写于 作者: J Jakub Kicinski 提交者: Daniel Borkmann

nfp: bpf: add verification and codegen for map lookups

Verify our current constraints on the location of the key are
met and generate the code for calling map lookup on the datapath.

New relocation types have to be added - for helpers and return
addresses.
Signed-off-by: NJakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: NDaniel Borkmann <daniel@iogearbox.net>
上级 ce4ebfd8
......@@ -483,6 +483,21 @@ static void wrp_immed(struct nfp_prog *nfp_prog, swreg dst, u32 imm)
}
}
static void
wrp_immed_relo(struct nfp_prog *nfp_prog, swreg dst, u32 imm,
enum nfp_relo_type relo)
{
if (imm > 0xffff) {
pr_err("relocation of a large immediate!\n");
nfp_prog->error = -EFAULT;
return;
}
emit_immed(nfp_prog, dst, imm, IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B);
nfp_prog->prog[nfp_prog->prog_len - 1] |=
FIELD_PREP(OP_RELO_TYPE, relo);
}
/* ur_load_imm_any() - encode immediate or use tmp register (unrestricted)
* If the @imm is small enough encode it directly in operand and return
* otherwise load @imm to a spare register and return its encoding.
......@@ -1279,6 +1294,56 @@ static int adjust_head(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return 0;
}
static int
map_lookup_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
struct bpf_offloaded_map *offmap;
struct nfp_bpf_map *nfp_map;
bool load_lm_ptr;
u32 ret_tgt;
s64 lm_off;
swreg tid;
offmap = (struct bpf_offloaded_map *)meta->arg1.map_ptr;
nfp_map = offmap->dev_priv;
/* We only have to reload LM0 if the key is not at start of stack */
lm_off = nfp_prog->stack_depth;
lm_off += meta->arg2.var_off.value + meta->arg2.off;
load_lm_ptr = meta->arg2_var_off || lm_off;
/* Set LM0 to start of key */
if (load_lm_ptr)
emit_csr_wr(nfp_prog, reg_b(2 * 2), NFP_CSR_ACT_LM_ADDR0);
/* Load map ID into a register, it should actually fit as an immediate
* but in case it doesn't deal with it here, not in the delay slots.
*/
tid = ur_load_imm_any(nfp_prog, nfp_map->tid, imm_a(nfp_prog));
emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + BPF_FUNC_map_lookup_elem,
2, RELO_BR_HELPER);
ret_tgt = nfp_prog_current_offset(nfp_prog) + 2;
/* Load map ID into A0 */
wrp_mov(nfp_prog, reg_a(0), tid);
/* Load the return address into B0 */
wrp_immed_relo(nfp_prog, reg_b(0), ret_tgt, RELO_IMMED_REL);
if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt))
return -EINVAL;
/* Reset the LM0 pointer */
if (!load_lm_ptr)
return 0;
emit_csr_wr(nfp_prog, stack_reg(nfp_prog), NFP_CSR_ACT_LM_ADDR0);
wrp_nops(nfp_prog, 3);
return 0;
}
/* --- Callbacks --- */
static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
......@@ -2058,6 +2123,8 @@ static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
switch (meta->insn.imm) {
case BPF_FUNC_xdp_adjust_head:
return adjust_head(nfp_prog, meta);
case BPF_FUNC_map_lookup_elem:
return map_lookup_stack(nfp_prog, meta);
default:
WARN_ONCE(1, "verifier allowed unsupported function\n");
return -EOPNOTSUPP;
......@@ -2794,6 +2861,7 @@ void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv)
for (i = 0; i < nfp_prog->prog_len; i++) {
enum nfp_relo_type special;
u32 val;
special = FIELD_GET(OP_RELO_TYPE, prog[i]);
switch (special) {
......@@ -2813,6 +2881,24 @@ void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv)
case RELO_BR_NEXT_PKT:
br_set_offset(&prog[i], bv->tgt_done);
break;
case RELO_BR_HELPER:
val = br_get_offset(prog[i]);
val -= BR_OFF_RELO;
switch (val) {
case BPF_FUNC_map_lookup_elem:
val = nfp_prog->bpf->helpers.map_lookup;
break;
default:
pr_err("relocation of unknown helper %d\n",
val);
err = -EINVAL;
goto err_free_prog;
}
br_set_offset(&prog[i], val);
break;
case RELO_IMMED_REL:
immed_add_value(&prog[i], bv->start_off);
break;
}
prog[i] &= ~OP_RELO_TYPE;
......
......@@ -60,6 +60,9 @@ enum nfp_relo_type {
RELO_BR_GO_ABORT,
/* external jumps to fixed addresses */
RELO_BR_NEXT_PKT,
RELO_BR_HELPER,
/* immediate relocation against load address */
RELO_IMMED_REL,
};
/* To make absolute relocated branches (branches other than RELO_BR_REL)
......@@ -191,9 +194,12 @@ typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *);
* @ptr: pointer type for memory operations
* @ldst_gather_len: memcpy length gathered from load/store sequence
* @paired_st: the paired store insn at the head of the sequence
* @arg2: arg2 for call instructions
* @ptr_not_const: pointer is not always constant
* @jmp_dst: destination info for jump instructions
* @func_id: function id for call instructions
* @arg1: arg1 for call instructions
* @arg2: arg2 for call instructions
* @arg2_var_off: arg2 changes stack offset on different paths
* @off: index of first generated machine instruction (in nfp_prog.prog)
* @n: eBPF instruction number
* @flags: eBPF instruction extra optimization flags
......@@ -211,7 +217,12 @@ struct nfp_insn_meta {
bool ptr_not_const;
};
struct nfp_insn_meta *jmp_dst;
struct {
u32 func_id;
struct bpf_reg_state arg1;
struct bpf_reg_state arg2;
bool arg2_var_off;
};
};
unsigned int off;
unsigned short n;
......
......@@ -110,9 +110,11 @@ static int
nfp_bpf_check_call(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env,
struct nfp_insn_meta *meta)
{
const struct bpf_reg_state *reg1 = cur_regs(env) + BPF_REG_1;
const struct bpf_reg_state *reg2 = cur_regs(env) + BPF_REG_2;
struct nfp_app_bpf *bpf = nfp_prog->bpf;
u32 func_id = meta->insn.imm;
s64 off, old_off;
switch (func_id) {
case BPF_FUNC_xdp_adjust_head:
......@@ -127,11 +129,48 @@ nfp_bpf_check_call(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env,
nfp_record_adjust_head(bpf, nfp_prog, meta, reg2);
break;
case BPF_FUNC_map_lookup_elem:
if (!bpf->helpers.map_lookup) {
pr_info("map_lookup: not supported by FW\n");
return -EOPNOTSUPP;
}
if (reg2->type != PTR_TO_STACK) {
pr_info("map_lookup: unsupported key ptr type %d\n",
reg2->type);
return -EOPNOTSUPP;
}
if (!tnum_is_const(reg2->var_off)) {
pr_info("map_lookup: variable key pointer\n");
return -EOPNOTSUPP;
}
off = reg2->var_off.value + reg2->off;
if (-off % 4) {
pr_info("map_lookup: unaligned stack pointer %lld\n",
-off);
return -EOPNOTSUPP;
}
/* Rest of the checks is only if we re-parse the same insn */
if (!meta->func_id)
break;
old_off = meta->arg2.var_off.value + meta->arg2.off;
meta->arg2_var_off |= off != old_off;
if (meta->arg1.map_ptr != reg1->map_ptr) {
pr_info("map_lookup: called for different map\n");
return -EOPNOTSUPP;
}
break;
default:
pr_vlog(env, "unsupported function id: %d\n", func_id);
return -EOPNOTSUPP;
}
meta->func_id = func_id;
meta->arg1 = *reg1;
meta->arg2 = *reg2;
return 0;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册