提交 32bbe007 编写于 作者: A Alexei Starovoitov 提交者: David S. Miller

bpf: sanitize bpf tracepoint access

during bpf program loading remember the last byte of ctx access
and at the time of attaching the program to tracepoint check that
the program doesn't access bytes beyond defined in tracepoint fields

This also disallows access to __dynamic_array fields, but can be
relaxed in the future.
Signed-off-by: NAlexei Starovoitov <ast@kernel.org>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 9940d67c
...@@ -131,6 +131,7 @@ struct bpf_prog_type_list { ...@@ -131,6 +131,7 @@ struct bpf_prog_type_list {
struct bpf_prog_aux { struct bpf_prog_aux {
atomic_t refcnt; atomic_t refcnt;
u32 used_map_cnt; u32 used_map_cnt;
u32 max_ctx_offset;
const struct bpf_verifier_ops *ops; const struct bpf_verifier_ops *ops;
struct bpf_map **used_maps; struct bpf_map **used_maps;
struct bpf_prog *prog; struct bpf_prog *prog;
......
...@@ -569,6 +569,7 @@ extern int trace_define_field(struct trace_event_call *call, const char *type, ...@@ -569,6 +569,7 @@ extern int trace_define_field(struct trace_event_call *call, const char *type,
int is_signed, int filter_type); int is_signed, int filter_type);
extern int trace_add_event_call(struct trace_event_call *call); extern int trace_add_event_call(struct trace_event_call *call);
extern int trace_remove_event_call(struct trace_event_call *call); extern int trace_remove_event_call(struct trace_event_call *call);
extern int trace_event_get_offsets(struct trace_event_call *call);
#define is_signed_type(type) (((type)(-1)) < (type)1) #define is_signed_type(type) (((type)(-1)) < (type)1)
......
...@@ -652,8 +652,12 @@ static int check_ctx_access(struct verifier_env *env, int off, int size, ...@@ -652,8 +652,12 @@ static int check_ctx_access(struct verifier_env *env, int off, int size,
enum bpf_access_type t) enum bpf_access_type t)
{ {
if (env->prog->aux->ops->is_valid_access && if (env->prog->aux->ops->is_valid_access &&
env->prog->aux->ops->is_valid_access(off, size, t)) env->prog->aux->ops->is_valid_access(off, size, t)) {
/* remember the offset of last byte accessed in ctx */
if (env->prog->aux->max_ctx_offset < off + size)
env->prog->aux->max_ctx_offset = off + size;
return 0; return 0;
}
verbose("invalid bpf_context access off=%d size=%d\n", off, size); verbose("invalid bpf_context access off=%d size=%d\n", off, size);
return -EACCES; return -EACCES;
......
...@@ -7133,6 +7133,14 @@ static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd) ...@@ -7133,6 +7133,14 @@ static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
return -EINVAL; return -EINVAL;
} }
if (is_tracepoint) {
int off = trace_event_get_offsets(event->tp_event);
if (prog->aux->max_ctx_offset > off) {
bpf_prog_put(prog);
return -EACCES;
}
}
event->tp_event->prog = prog; event->tp_event->prog = prog;
return 0; return 0;
......
...@@ -204,6 +204,24 @@ static void trace_destroy_fields(struct trace_event_call *call) ...@@ -204,6 +204,24 @@ static void trace_destroy_fields(struct trace_event_call *call)
} }
} }
/*
* run-time version of trace_event_get_offsets_<call>() that returns the last
* accessible offset of trace fields excluding __dynamic_array bytes
*/
int trace_event_get_offsets(struct trace_event_call *call)
{
struct ftrace_event_field *tail;
struct list_head *head;
head = trace_get_fields(call);
/*
* head->next points to the last field with the largest offset,
* since it was added last by trace_define_field()
*/
tail = list_first_entry(head, struct ftrace_event_field, link);
return tail->offset + tail->size;
}
int trace_event_raw_init(struct trace_event_call *call) int trace_event_raw_init(struct trace_event_call *call)
{ {
int id; int id;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册