提交 aafe6ae9 编写于 作者: D Daniel Borkmann 提交者: David S. Miller

bpf: dynamically allocate digest scratch buffer

Geert rightfully complained that 7bd509e3 ("bpf: add prog_digest
and expose it via fdinfo/netlink") added a too large allocation of
variable 'raw' from bss section, and should instead be done dynamically:

  # ./scripts/bloat-o-meter kernel/bpf/core.o.1 kernel/bpf/core.o.2
  add/remove: 3/0 grow/shrink: 0/0 up/down: 33291/0 (33291)
  function                                     old     new   delta
  raw                                            -   32832  +32832
  [...]

Since this is only relevant during program creation path, which can be
considered slow-path anyway, lets allocate that dynamically and be not
implicitly dependent on verifier mutex. Move bpf_prog_calc_digest() at
the beginning of replace_map_fd_with_map_ptr() and also error handling
stays straight forward.
Reported-by: NGeert Uytterhoeven <geert@linux-m68k.org>
Signed-off-by: NDaniel Borkmann <daniel@iogearbox.net>
Acked-by: NAlexei Starovoitov <ast@kernel.org>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 40e972ab
...@@ -216,7 +216,7 @@ u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5); ...@@ -216,7 +216,7 @@ u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
void bpf_prog_calc_digest(struct bpf_prog *fp); int bpf_prog_calc_digest(struct bpf_prog *fp);
const struct bpf_func_proto *bpf_get_trace_printk_proto(void); const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
......
...@@ -57,9 +57,6 @@ struct bpf_prog_aux; ...@@ -57,9 +57,6 @@ struct bpf_prog_aux;
/* BPF program can access up to 512 bytes of stack space. */ /* BPF program can access up to 512 bytes of stack space. */
#define MAX_BPF_STACK 512 #define MAX_BPF_STACK 512
/* Maximum BPF program size in bytes. */
#define MAX_BPF_SIZE (BPF_MAXINSNS * sizeof(struct bpf_insn))
/* Helper macros for filter block array initializers. */ /* Helper macros for filter block array initializers. */
/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */ /* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
...@@ -517,6 +514,17 @@ static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog, ...@@ -517,6 +514,17 @@ static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
return BPF_PROG_RUN(prog, xdp); return BPF_PROG_RUN(prog, xdp);
} }
static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog)
{
return prog->len * sizeof(struct bpf_insn);
}
static inline u32 bpf_prog_digest_scratch_size(const struct bpf_prog *prog)
{
return round_up(bpf_prog_insn_size(prog) +
sizeof(__be64) + 1, SHA_MESSAGE_BYTES);
}
static inline unsigned int bpf_prog_size(unsigned int proglen) static inline unsigned int bpf_prog_size(unsigned int proglen)
{ {
return max(sizeof(struct bpf_prog), return max(sizeof(struct bpf_prog),
......
...@@ -136,28 +136,29 @@ void __bpf_prog_free(struct bpf_prog *fp) ...@@ -136,28 +136,29 @@ void __bpf_prog_free(struct bpf_prog *fp)
vfree(fp); vfree(fp);
} }
#define SHA_BPF_RAW_SIZE \ int bpf_prog_calc_digest(struct bpf_prog *fp)
round_up(MAX_BPF_SIZE + sizeof(__be64) + 1, SHA_MESSAGE_BYTES)
/* Called under verifier mutex. */
void bpf_prog_calc_digest(struct bpf_prog *fp)
{ {
const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64); const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64);
static u32 ws[SHA_WORKSPACE_WORDS]; u32 raw_size = bpf_prog_digest_scratch_size(fp);
static u8 raw[SHA_BPF_RAW_SIZE]; u32 ws[SHA_WORKSPACE_WORDS];
struct bpf_insn *dst = (void *)raw;
u32 i, bsize, psize, blocks; u32 i, bsize, psize, blocks;
struct bpf_insn *dst;
bool was_ld_map; bool was_ld_map;
u8 *todo = raw; u8 *raw, *todo;
__be32 *result; __be32 *result;
__be64 *bits; __be64 *bits;
raw = vmalloc(raw_size);
if (!raw)
return -ENOMEM;
sha_init(fp->digest); sha_init(fp->digest);
memset(ws, 0, sizeof(ws)); memset(ws, 0, sizeof(ws));
/* We need to take out the map fd for the digest calculation /* We need to take out the map fd for the digest calculation
* since they are unstable from user space side. * since they are unstable from user space side.
*/ */
dst = (void *)raw;
for (i = 0, was_ld_map = false; i < fp->len; i++) { for (i = 0, was_ld_map = false; i < fp->len; i++) {
dst[i] = fp->insnsi[i]; dst[i] = fp->insnsi[i];
if (!was_ld_map && if (!was_ld_map &&
...@@ -177,12 +178,13 @@ void bpf_prog_calc_digest(struct bpf_prog *fp) ...@@ -177,12 +178,13 @@ void bpf_prog_calc_digest(struct bpf_prog *fp)
} }
} }
psize = fp->len * sizeof(struct bpf_insn); psize = bpf_prog_insn_size(fp);
memset(&raw[psize], 0, sizeof(raw) - psize); memset(&raw[psize], 0, raw_size - psize);
raw[psize++] = 0x80; raw[psize++] = 0x80;
bsize = round_up(psize, SHA_MESSAGE_BYTES); bsize = round_up(psize, SHA_MESSAGE_BYTES);
blocks = bsize / SHA_MESSAGE_BYTES; blocks = bsize / SHA_MESSAGE_BYTES;
todo = raw;
if (bsize - psize >= sizeof(__be64)) { if (bsize - psize >= sizeof(__be64)) {
bits = (__be64 *)(todo + bsize - sizeof(__be64)); bits = (__be64 *)(todo + bsize - sizeof(__be64));
} else { } else {
...@@ -199,6 +201,9 @@ void bpf_prog_calc_digest(struct bpf_prog *fp) ...@@ -199,6 +201,9 @@ void bpf_prog_calc_digest(struct bpf_prog *fp)
result = (__force __be32 *)fp->digest; result = (__force __be32 *)fp->digest;
for (i = 0; i < SHA_DIGEST_WORDS; i++) for (i = 0; i < SHA_DIGEST_WORDS; i++)
result[i] = cpu_to_be32(fp->digest[i]); result[i] = cpu_to_be32(fp->digest[i]);
vfree(raw);
return 0;
} }
static bool bpf_is_jmp_and_has_target(const struct bpf_insn *insn) static bool bpf_is_jmp_and_has_target(const struct bpf_insn *insn)
......
...@@ -811,7 +811,7 @@ static int bpf_prog_load(union bpf_attr *attr) ...@@ -811,7 +811,7 @@ static int bpf_prog_load(union bpf_attr *attr)
err = -EFAULT; err = -EFAULT;
if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns), if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
prog->len * sizeof(struct bpf_insn)) != 0) bpf_prog_insn_size(prog)) != 0)
goto free_prog; goto free_prog;
prog->orig_prog = NULL; prog->orig_prog = NULL;
......
...@@ -2931,6 +2931,10 @@ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env) ...@@ -2931,6 +2931,10 @@ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
int insn_cnt = env->prog->len; int insn_cnt = env->prog->len;
int i, j, err; int i, j, err;
err = bpf_prog_calc_digest(env->prog);
if (err)
return err;
for (i = 0; i < insn_cnt; i++, insn++) { for (i = 0; i < insn_cnt; i++, insn++) {
if (BPF_CLASS(insn->code) == BPF_LDX && if (BPF_CLASS(insn->code) == BPF_LDX &&
(BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) { (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
...@@ -3178,8 +3182,6 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr) ...@@ -3178,8 +3182,6 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
log_level = 0; log_level = 0;
} }
bpf_prog_calc_digest(env->prog);
ret = replace_map_fd_with_map_ptr(env); ret = replace_map_fd_with_map_ptr(env);
if (ret < 0) if (ret < 0)
goto skip_full_check; goto skip_full_check;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册