提交 b082af73 编写于 作者: D David S. Miller

Merge branch 'bpf-ctx-info-out-of-verifier'

Jakub Kicinski says:

====================
bpf: move context info out of the verifier

Daniel pointed out during the review of my previous patchset that
the knowledge about context doesn't really belong directly in the
verifier.  This patch set takes a bit of a drastic approach to
move the info out of there.  I want to be able to use different
set of verifier_ops for program analysis.  To do that, I have
to first move the test_run callback to a separate structure.  Then
verifier ops can be declared in the verifier directly and
different sets can be picked for verification vs analysis.
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
......@@ -157,6 +157,11 @@ bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size)
aux->ctx_field_size = size;
}
struct bpf_prog_ops {
int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr,
union bpf_attr __user *uattr);
};
struct bpf_verifier_ops {
/* return eBPF function prototype for verification */
const struct bpf_func_proto *(*get_func_proto)(enum bpf_func_id func_id);
......@@ -172,8 +177,6 @@ struct bpf_verifier_ops {
const struct bpf_insn *src,
struct bpf_insn *dst,
struct bpf_prog *prog, u32 *target_size);
int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr,
union bpf_attr __user *uattr);
};
struct bpf_prog_aux {
......@@ -184,7 +187,7 @@ struct bpf_prog_aux {
u32 id;
struct latch_tree_node ksym_tnode;
struct list_head ksym_lnode;
const struct bpf_verifier_ops *ops;
const struct bpf_prog_ops *ops;
struct bpf_map **used_maps;
struct bpf_prog *prog;
struct user_struct *user;
......@@ -279,14 +282,18 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
#ifdef CONFIG_BPF_SYSCALL
DECLARE_PER_CPU(int, bpf_prog_active);
#define BPF_PROG_TYPE(_id, _ops) \
extern const struct bpf_verifier_ops _ops;
#define BPF_PROG_TYPE(_id, _name) \
extern const struct bpf_prog_ops _name ## _prog_ops; \
extern const struct bpf_verifier_ops _name ## _verifier_ops;
#define BPF_MAP_TYPE(_id, _ops) \
extern const struct bpf_map_ops _ops;
#include <linux/bpf_types.h>
#undef BPF_PROG_TYPE
#undef BPF_MAP_TYPE
extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops;
extern const struct bpf_verifier_ops xdp_analyzer_ops;
struct bpf_prog *bpf_prog_get(u32 ufd);
struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type);
struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, int i);
......
/* internal file - do not include directly */
#ifdef CONFIG_NET
BPF_PROG_TYPE(BPF_PROG_TYPE_SOCKET_FILTER, sk_filter_prog_ops)
BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_CLS, tc_cls_act_prog_ops)
BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_ACT, tc_cls_act_prog_ops)
BPF_PROG_TYPE(BPF_PROG_TYPE_XDP, xdp_prog_ops)
BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SKB, cg_skb_prog_ops)
BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK, cg_sock_prog_ops)
BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_IN, lwt_inout_prog_ops)
BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_OUT, lwt_inout_prog_ops)
BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_XMIT, lwt_xmit_prog_ops)
BPF_PROG_TYPE(BPF_PROG_TYPE_SOCK_OPS, sock_ops_prog_ops)
BPF_PROG_TYPE(BPF_PROG_TYPE_SK_SKB, sk_skb_prog_ops)
BPF_PROG_TYPE(BPF_PROG_TYPE_SOCKET_FILTER, sk_filter)
BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_CLS, tc_cls_act)
BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_ACT, tc_cls_act)
BPF_PROG_TYPE(BPF_PROG_TYPE_XDP, xdp)
BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SKB, cg_skb)
BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK, cg_sock)
BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_IN, lwt_inout)
BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_OUT, lwt_inout)
BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_XMIT, lwt_xmit)
BPF_PROG_TYPE(BPF_PROG_TYPE_SOCK_OPS, sock_ops)
BPF_PROG_TYPE(BPF_PROG_TYPE_SK_SKB, sk_skb)
#endif
#ifdef CONFIG_BPF_EVENTS
BPF_PROG_TYPE(BPF_PROG_TYPE_KPROBE, kprobe_prog_ops)
BPF_PROG_TYPE(BPF_PROG_TYPE_TRACEPOINT, tracepoint_prog_ops)
BPF_PROG_TYPE(BPF_PROG_TYPE_PERF_EVENT, perf_event_prog_ops)
BPF_PROG_TYPE(BPF_PROG_TYPE_KPROBE, kprobe)
BPF_PROG_TYPE(BPF_PROG_TYPE_TRACEPOINT, tracepoint)
BPF_PROG_TYPE(BPF_PROG_TYPE_PERF_EVENT, perf_event)
#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY, array_map_ops)
......
......@@ -141,6 +141,7 @@ struct bpf_ext_analyzer_ops {
*/
struct bpf_verifier_env {
struct bpf_prog *prog; /* eBPF program being verified */
const struct bpf_verifier_ops *ops;
struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */
int stack_size; /* number of states to be processed */
bool strict_alignment; /* perform strict pointer alignment checks */
......
......@@ -739,9 +739,9 @@ static int map_get_next_key(union bpf_attr *attr)
return err;
}
static const struct bpf_verifier_ops * const bpf_prog_types[] = {
#define BPF_PROG_TYPE(_id, _ops) \
[_id] = &_ops,
static const struct bpf_prog_ops * const bpf_prog_types[] = {
#define BPF_PROG_TYPE(_id, _name) \
[_id] = & _name ## _prog_ops,
#define BPF_MAP_TYPE(_id, _ops)
#include <linux/bpf_types.h>
#undef BPF_PROG_TYPE
......
......@@ -23,6 +23,15 @@
#include "disasm.h"
static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {
#define BPF_PROG_TYPE(_id, _name) \
[_id] = & _name ## _verifier_ops,
#define BPF_MAP_TYPE(_id, _ops)
#include <linux/bpf_types.h>
#undef BPF_PROG_TYPE
#undef BPF_MAP_TYPE
};
/* bpf_check() is a static code analyzer that walks eBPF program
* instruction by instruction and updates register/stack state.
* All paths of conditional branches are analyzed until 'bpf_exit' insn.
......@@ -813,36 +822,6 @@ static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
return err;
}
static bool analyzer_is_valid_access(struct bpf_verifier_env *env, int off,
struct bpf_insn_access_aux *info)
{
switch (env->prog->type) {
case BPF_PROG_TYPE_XDP:
switch (off) {
case offsetof(struct xdp_buff, data):
info->reg_type = PTR_TO_PACKET;
return true;
case offsetof(struct xdp_buff, data_end):
info->reg_type = PTR_TO_PACKET_END;
return true;
}
return false;
case BPF_PROG_TYPE_SCHED_CLS:
switch (off) {
case offsetof(struct sk_buff, data):
info->reg_type = PTR_TO_PACKET;
return true;
case offsetof(struct sk_buff, cb) +
offsetof(struct bpf_skb_data_end, data_end):
info->reg_type = PTR_TO_PACKET_END;
return true;
}
return false;
default:
return false;
}
}
/* check access to 'struct bpf_context' fields. Supports fixed offsets only */
static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
enum bpf_access_type t, enum bpf_reg_type *reg_type)
......@@ -851,13 +830,8 @@ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off,
.reg_type = *reg_type,
};
if (env->analyzer_ops) {
if (analyzer_is_valid_access(env, off, &info)) {
*reg_type = info.reg_type;
return 0;
}
} else if (env->prog->aux->ops->is_valid_access &&
env->prog->aux->ops->is_valid_access(off, size, t, &info)) {
if (env->ops->is_valid_access &&
env->ops->is_valid_access(off, size, t, &info)) {
/* A non zero info.ctx_field_size indicates that this field is a
* candidate for later verifier transformation to load the whole
* field and then apply a mask when accessed with a narrower
......@@ -865,9 +839,12 @@ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off,
* will only allow for whole field access and rejects any other
* type of narrower access.
*/
env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
*reg_type = info.reg_type;
if (env->analyzer_ops)
return 0;
env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
/* remember the offset of last byte accessed in ctx */
if (env->prog->aux->max_ctx_offset < off + size)
env->prog->aux->max_ctx_offset = off + size;
......@@ -1565,8 +1542,8 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
return -EINVAL;
}
if (env->prog->aux->ops->get_func_proto)
fn = env->prog->aux->ops->get_func_proto(func_id);
if (env->ops->get_func_proto)
fn = env->ops->get_func_proto(func_id);
if (!fn) {
verbose(env, "unknown func %s#%d\n", func_id_name(func_id),
......@@ -4035,7 +4012,7 @@ static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 of
*/
static int convert_ctx_accesses(struct bpf_verifier_env *env)
{
const struct bpf_verifier_ops *ops = env->prog->aux->ops;
const struct bpf_verifier_ops *ops = env->ops;
int i, cnt, size, ctx_field_size, delta = 0;
const int insn_cnt = env->prog->len;
struct bpf_insn insn_buf[16], *insn;
......@@ -4236,7 +4213,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
insn = new_prog->insnsi + i + delta;
}
patch_call_imm:
fn = prog->aux->ops->get_func_proto(insn->imm);
fn = env->ops->get_func_proto(insn->imm);
/* all functions that have prototype and verifier allowed
* programs to call them, must be real in-kernel functions
*/
......@@ -4294,6 +4271,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
if (!env->insn_aux_data)
goto err_free_env;
env->prog = *prog;
env->ops = bpf_verifier_ops[env->prog->type];
/* grab the mutex to protect few globals used by verifier */
mutex_lock(&bpf_verifier_lock);
......@@ -4390,12 +4368,21 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
return ret;
}
static const struct bpf_verifier_ops * const bpf_analyzer_ops[] = {
[BPF_PROG_TYPE_XDP] = &xdp_analyzer_ops,
[BPF_PROG_TYPE_SCHED_CLS] = &tc_cls_act_analyzer_ops,
};
int bpf_analyzer(struct bpf_prog *prog, const struct bpf_ext_analyzer_ops *ops,
void *priv)
{
struct bpf_verifier_env *env;
int ret;
if (prog->type >= ARRAY_SIZE(bpf_analyzer_ops) ||
!bpf_analyzer_ops[prog->type])
return -EOPNOTSUPP;
env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
if (!env)
return -ENOMEM;
......@@ -4406,6 +4393,7 @@ int bpf_analyzer(struct bpf_prog *prog, const struct bpf_ext_analyzer_ops *ops,
if (!env->insn_aux_data)
goto err_free_env;
env->prog = prog;
env->ops = bpf_analyzer_ops[env->prog->type];
env->analyzer_ops = ops;
env->analyzer_priv = priv;
......
......@@ -561,11 +561,14 @@ static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type
return true;
}
const struct bpf_verifier_ops kprobe_prog_ops = {
const struct bpf_verifier_ops kprobe_verifier_ops = {
.get_func_proto = kprobe_prog_func_proto,
.is_valid_access = kprobe_prog_is_valid_access,
};
const struct bpf_prog_ops kprobe_prog_ops = {
};
BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
u64, flags, void *, data, u64, size)
{
......@@ -667,11 +670,14 @@ static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type
return true;
}
const struct bpf_verifier_ops tracepoint_prog_ops = {
const struct bpf_verifier_ops tracepoint_verifier_ops = {
.get_func_proto = tp_prog_func_proto,
.is_valid_access = tp_prog_is_valid_access,
};
const struct bpf_prog_ops tracepoint_prog_ops = {
};
static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
struct bpf_insn_access_aux *info)
{
......@@ -727,8 +733,11 @@ static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
return insn - insn_buf;
}
const struct bpf_verifier_ops perf_event_prog_ops = {
const struct bpf_verifier_ops perf_event_verifier_ops = {
.get_func_proto = tp_prog_func_proto,
.is_valid_access = pe_prog_is_valid_access,
.convert_ctx_access = pe_prog_convert_ctx_access,
};
const struct bpf_prog_ops perf_event_prog_ops = {
};
......@@ -3732,6 +3732,25 @@ static bool tc_cls_act_is_valid_access(int off, int size,
return bpf_skb_is_valid_access(off, size, type, info);
}
static bool
tc_cls_act_is_valid_access_analyzer(int off, int size,
enum bpf_access_type type,
struct bpf_insn_access_aux *info)
{
switch (off) {
case offsetof(struct sk_buff, len):
return true;
case offsetof(struct sk_buff, data):
info->reg_type = PTR_TO_PACKET;
return true;
case offsetof(struct sk_buff, cb) +
offsetof(struct bpf_skb_data_end, data_end):
info->reg_type = PTR_TO_PACKET_END;
return true;
}
return false;
}
static bool __is_valid_xdp_access(int off, int size)
{
if (off < 0 || off >= sizeof(struct xdp_md))
......@@ -3766,6 +3785,21 @@ static bool xdp_is_valid_access(int off, int size,
return __is_valid_xdp_access(off, size);
}
static bool xdp_is_valid_access_analyzer(int off, int size,
enum bpf_access_type type,
struct bpf_insn_access_aux *info)
{
switch (off) {
case offsetof(struct xdp_buff, data):
info->reg_type = PTR_TO_PACKET;
return true;
case offsetof(struct xdp_buff, data_end):
info->reg_type = PTR_TO_PACKET_END;
return true;
}
return false;
}
void bpf_warn_invalid_xdp_action(u32 act)
{
const u32 act_max = XDP_REDIRECT;
......@@ -4395,68 +4429,103 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
return insn - insn_buf;
}
const struct bpf_verifier_ops sk_filter_prog_ops = {
const struct bpf_verifier_ops sk_filter_verifier_ops = {
.get_func_proto = sk_filter_func_proto,
.is_valid_access = sk_filter_is_valid_access,
.convert_ctx_access = bpf_convert_ctx_access,
};
const struct bpf_verifier_ops tc_cls_act_prog_ops = {
const struct bpf_prog_ops sk_filter_prog_ops = {
};
const struct bpf_verifier_ops tc_cls_act_verifier_ops = {
.get_func_proto = tc_cls_act_func_proto,
.is_valid_access = tc_cls_act_is_valid_access,
.convert_ctx_access = tc_cls_act_convert_ctx_access,
.gen_prologue = tc_cls_act_prologue,
};
const struct bpf_verifier_ops tc_cls_act_analyzer_ops = {
.is_valid_access = tc_cls_act_is_valid_access_analyzer,
};
const struct bpf_prog_ops tc_cls_act_prog_ops = {
.test_run = bpf_prog_test_run_skb,
};
const struct bpf_verifier_ops xdp_prog_ops = {
const struct bpf_verifier_ops xdp_verifier_ops = {
.get_func_proto = xdp_func_proto,
.is_valid_access = xdp_is_valid_access,
.convert_ctx_access = xdp_convert_ctx_access,
};
const struct bpf_verifier_ops xdp_analyzer_ops = {
.is_valid_access = xdp_is_valid_access_analyzer,
};
const struct bpf_prog_ops xdp_prog_ops = {
.test_run = bpf_prog_test_run_xdp,
};
const struct bpf_verifier_ops cg_skb_prog_ops = {
const struct bpf_verifier_ops cg_skb_verifier_ops = {
.get_func_proto = sk_filter_func_proto,
.is_valid_access = sk_filter_is_valid_access,
.convert_ctx_access = bpf_convert_ctx_access,
};
const struct bpf_prog_ops cg_skb_prog_ops = {
.test_run = bpf_prog_test_run_skb,
};
const struct bpf_verifier_ops lwt_inout_prog_ops = {
const struct bpf_verifier_ops lwt_inout_verifier_ops = {
.get_func_proto = lwt_inout_func_proto,
.is_valid_access = lwt_is_valid_access,
.convert_ctx_access = bpf_convert_ctx_access,
};
const struct bpf_prog_ops lwt_inout_prog_ops = {
.test_run = bpf_prog_test_run_skb,
};
const struct bpf_verifier_ops lwt_xmit_prog_ops = {
const struct bpf_verifier_ops lwt_xmit_verifier_ops = {
.get_func_proto = lwt_xmit_func_proto,
.is_valid_access = lwt_is_valid_access,
.convert_ctx_access = bpf_convert_ctx_access,
.gen_prologue = tc_cls_act_prologue,
};
const struct bpf_prog_ops lwt_xmit_prog_ops = {
.test_run = bpf_prog_test_run_skb,
};
const struct bpf_verifier_ops cg_sock_prog_ops = {
const struct bpf_verifier_ops cg_sock_verifier_ops = {
.get_func_proto = sock_filter_func_proto,
.is_valid_access = sock_filter_is_valid_access,
.convert_ctx_access = sock_filter_convert_ctx_access,
};
const struct bpf_verifier_ops sock_ops_prog_ops = {
const struct bpf_prog_ops cg_sock_prog_ops = {
};
const struct bpf_verifier_ops sock_ops_verifier_ops = {
.get_func_proto = sock_ops_func_proto,
.is_valid_access = sock_ops_is_valid_access,
.convert_ctx_access = sock_ops_convert_ctx_access,
};
const struct bpf_verifier_ops sk_skb_prog_ops = {
const struct bpf_prog_ops sock_ops_prog_ops = {
};
const struct bpf_verifier_ops sk_skb_verifier_ops = {
.get_func_proto = sk_skb_func_proto,
.is_valid_access = sk_skb_is_valid_access,
.convert_ctx_access = bpf_convert_ctx_access,
.gen_prologue = sk_skb_prologue,
};
const struct bpf_prog_ops sk_skb_prog_ops = {
};
int sk_detach_filter(struct sock *sk)
{
int ret = -ENOENT;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册