提交 71189fa9 编写于 作者: A Alexei Starovoitov 提交者: David S. Miller

bpf: free up BPF_JMP | BPF_CALL | BPF_X opcode

free up BPF_JMP | BPF_CALL | BPF_X opcode to be used by actual
indirect call by register and use kernel internal opcode to
mark call instruction into bpf_tail_call() helper.
Signed-off-by: NAlexei Starovoitov <ast@kernel.org>
Acked-by: NDaniel Borkmann <daniel@iogearbox.net>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 d2e0ef49
...@@ -586,7 +586,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) ...@@ -586,7 +586,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
break; break;
} }
/* tail call */ /* tail call */
case BPF_JMP | BPF_CALL | BPF_X: case BPF_JMP | BPF_TAIL_CALL:
if (emit_bpf_tail_call(ctx)) if (emit_bpf_tail_call(ctx))
return -EFAULT; return -EFAULT;
break; break;
......
...@@ -938,7 +938,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, ...@@ -938,7 +938,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
/* /*
* Tail call * Tail call
*/ */
case BPF_JMP | BPF_CALL | BPF_X: case BPF_JMP | BPF_TAIL_CALL:
ctx->seen |= SEEN_TAILCALL; ctx->seen |= SEEN_TAILCALL;
bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]); bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
break; break;
......
...@@ -991,7 +991,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i ...@@ -991,7 +991,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
} }
break; break;
} }
case BPF_JMP | BPF_CALL | BPF_X: case BPF_JMP | BPF_TAIL_CALL:
/* /*
* Implicit input: * Implicit input:
* B1: pointer to ctx * B1: pointer to ctx
......
...@@ -1217,7 +1217,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) ...@@ -1217,7 +1217,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
} }
/* tail call */ /* tail call */
case BPF_JMP | BPF_CALL |BPF_X: case BPF_JMP | BPF_TAIL_CALL:
emit_tail_call(ctx); emit_tail_call(ctx);
break; break;
......
...@@ -877,7 +877,7 @@ xadd: if (is_imm8(insn->off)) ...@@ -877,7 +877,7 @@ xadd: if (is_imm8(insn->off))
} }
break; break;
case BPF_JMP | BPF_CALL | BPF_X: case BPF_JMP | BPF_TAIL_CALL:
emit_bpf_tail_call(&prog); emit_bpf_tail_call(&prog);
break; break;
......
...@@ -57,6 +57,9 @@ struct bpf_prog_aux; ...@@ -57,6 +57,9 @@ struct bpf_prog_aux;
#define BPF_REG_AX MAX_BPF_REG #define BPF_REG_AX MAX_BPF_REG
#define MAX_BPF_JIT_REG (MAX_BPF_REG + 1) #define MAX_BPF_JIT_REG (MAX_BPF_REG + 1)
/* unused opcode to mark special call to bpf_tail_call() helper */
#define BPF_TAIL_CALL 0xf0
/* As per nm, we expose JITed images as text (code) section for /* As per nm, we expose JITed images as text (code) section for
* kallsyms. That way, tools like perf can find it to match * kallsyms. That way, tools like perf can find it to match
* addresses. * addresses.
......
...@@ -824,7 +824,7 @@ static unsigned int __bpf_prog_run(void *ctx, const struct bpf_insn *insn) ...@@ -824,7 +824,7 @@ static unsigned int __bpf_prog_run(void *ctx, const struct bpf_insn *insn)
[BPF_ALU64 | BPF_NEG] = &&ALU64_NEG, [BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
/* Call instruction */ /* Call instruction */
[BPF_JMP | BPF_CALL] = &&JMP_CALL, [BPF_JMP | BPF_CALL] = &&JMP_CALL,
[BPF_JMP | BPF_CALL | BPF_X] = &&JMP_TAIL_CALL, [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
/* Jumps */ /* Jumps */
[BPF_JMP | BPF_JA] = &&JMP_JA, [BPF_JMP | BPF_JA] = &&JMP_JA,
[BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X, [BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
......
...@@ -3469,7 +3469,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) ...@@ -3469,7 +3469,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
* that doesn't support bpf_tail_call yet * that doesn't support bpf_tail_call yet
*/ */
insn->imm = 0; insn->imm = 0;
insn->code |= BPF_X; insn->code = BPF_JMP | BPF_TAIL_CALL;
continue; continue;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册