提交 3ad00405 编写于 作者: D Daniel Borkmann 提交者: David S. Miller

bpf: split state from prandom_u32() and consolidate {c, e}BPF prngs

While recently arguing on a seccomp discussion that raw prandom_u32()
access shouldn't be exposed to unpriviledged user space, I forgot the
fact that SKF_AD_RANDOM extension actually already does it for some time
in cBPF via commit 4cd3675e ("filter: added BPF random opcode").

Since prandom_u32() is being used in a lot of critical networking code,
lets be more conservative and split their states. Furthermore, consolidate
eBPF and cBPF prandom handlers to use the new internal PRNG. For eBPF,
bpf_get_prandom_u32() was only accessible for priviledged users, but
should that change one day, we also don't want to leak raw sequences
through things like eBPF maps.

One thought was also to have own per bpf_prog states, but due to ABI
reasons this is not easily possible, i.e. the program code currently
cannot access bpf_prog itself, and copying the rnd_state to/from the
stack scratch space whenever a program uses the prng seems not really
worth the trouble and seems too hacky. If needed, taus113 could in such
cases be implemented within eBPF using a map entry to keep the state
space, or get_random_bytes() could become a second helper in cases where
performance would not be critical.

Both sides can trigger a one-time late init via prandom_init_once() on
the shared state. Performance-wise, there should even be a tiny gain
as bpf_user_rnd_u32() saves one function call. The PRNG needs to live
inside the BPF core since kernels could have a NET-less config as well.
Signed-off-by: NDaniel Borkmann <daniel@iogearbox.net>
Acked-by: NHannes Frederic Sowa <hannes@stressinduktion.org>
Acked-by: NAlexei Starovoitov <ast@plumgrid.com>
Cc: Chema Gonzalez <chema@google.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 897ece56
...@@ -200,4 +200,8 @@ extern const struct bpf_func_proto bpf_get_current_comm_proto; ...@@ -200,4 +200,8 @@ extern const struct bpf_func_proto bpf_get_current_comm_proto;
extern const struct bpf_func_proto bpf_skb_vlan_push_proto; extern const struct bpf_func_proto bpf_skb_vlan_push_proto;
extern const struct bpf_func_proto bpf_skb_vlan_pop_proto; extern const struct bpf_func_proto bpf_skb_vlan_pop_proto;
/* Shared helpers among cBPF and eBPF. */
void bpf_user_rnd_init_once(void);
u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
#endif /* _LINUX_BPF_H */ #endif /* _LINUX_BPF_H */
...@@ -731,6 +731,32 @@ void bpf_prog_free(struct bpf_prog *fp) ...@@ -731,6 +731,32 @@ void bpf_prog_free(struct bpf_prog *fp)
} }
EXPORT_SYMBOL_GPL(bpf_prog_free); EXPORT_SYMBOL_GPL(bpf_prog_free);
/* RNG for unpriviledged user space with separated state from prandom_u32(). */
static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
void bpf_user_rnd_init_once(void)
{
prandom_init_once(&bpf_user_rnd_state);
}
u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
{
/* Should someone ever have the rather unwise idea to use some
* of the registers passed into this function, then note that
* this function is called from native eBPF and classic-to-eBPF
* transformations. Register assignments from both sides are
* different, f.e. classic always sets fn(ctx, A, X) here.
*/
struct rnd_state *state;
u32 res;
state = &get_cpu_var(bpf_user_rnd_state);
res = prandom_u32_state(state);
put_cpu_var(state);
return res;
}
/* Weak definitions of helper functions in case we don't have bpf syscall. */ /* Weak definitions of helper functions in case we don't have bpf syscall. */
const struct bpf_func_proto bpf_map_lookup_elem_proto __weak; const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
const struct bpf_func_proto bpf_map_update_elem_proto __weak; const struct bpf_func_proto bpf_map_update_elem_proto __weak;
......
...@@ -93,13 +93,8 @@ const struct bpf_func_proto bpf_map_delete_elem_proto = { ...@@ -93,13 +93,8 @@ const struct bpf_func_proto bpf_map_delete_elem_proto = {
.arg2_type = ARG_PTR_TO_MAP_KEY, .arg2_type = ARG_PTR_TO_MAP_KEY,
}; };
static u64 bpf_get_prandom_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
{
return prandom_u32();
}
const struct bpf_func_proto bpf_get_prandom_u32_proto = { const struct bpf_func_proto bpf_get_prandom_u32_proto = {
.func = bpf_get_prandom_u32, .func = bpf_user_rnd_u32,
.gpl_only = false, .gpl_only = false,
.ret_type = RET_INTEGER, .ret_type = RET_INTEGER,
}; };
......
...@@ -404,6 +404,8 @@ static void fixup_bpf_calls(struct bpf_prog *prog) ...@@ -404,6 +404,8 @@ static void fixup_bpf_calls(struct bpf_prog *prog)
if (insn->imm == BPF_FUNC_get_route_realm) if (insn->imm == BPF_FUNC_get_route_realm)
prog->dst_needed = 1; prog->dst_needed = 1;
if (insn->imm == BPF_FUNC_get_prandom_u32)
bpf_user_rnd_init_once();
if (insn->imm == BPF_FUNC_tail_call) { if (insn->imm == BPF_FUNC_tail_call) {
/* mark bpf_tail_call as different opcode /* mark bpf_tail_call as different opcode
* to avoid conditional branch in * to avoid conditional branch in
......
...@@ -149,12 +149,6 @@ static u64 __get_raw_cpu_id(u64 ctx, u64 a, u64 x, u64 r4, u64 r5) ...@@ -149,12 +149,6 @@ static u64 __get_raw_cpu_id(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
return raw_smp_processor_id(); return raw_smp_processor_id();
} }
/* note that this only generates 32-bit random numbers */
static u64 __get_random_u32(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
{
return prandom_u32();
}
static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg, static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg,
struct bpf_insn *insn_buf) struct bpf_insn *insn_buf)
{ {
...@@ -313,7 +307,8 @@ static bool convert_bpf_extensions(struct sock_filter *fp, ...@@ -313,7 +307,8 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
*insn = BPF_EMIT_CALL(__get_raw_cpu_id); *insn = BPF_EMIT_CALL(__get_raw_cpu_id);
break; break;
case SKF_AD_OFF + SKF_AD_RANDOM: case SKF_AD_OFF + SKF_AD_RANDOM:
*insn = BPF_EMIT_CALL(__get_random_u32); *insn = BPF_EMIT_CALL(bpf_user_rnd_u32);
bpf_user_rnd_init_once();
break; break;
} }
break; break;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册