提交 571b8739 编写于 作者: A Alexei Starovoitov

Merge branch 'Follow ups for kptr series'

Kumar Kartikeya Dwivedi says:

====================

Fix a build time warning, and address comments from Alexei on the merged
version [0].

  [0]: https://lore.kernel.org/bpf/20220424214901.2743946-1-memxor@gmail.com

Changelog:
----------
v1 -> v2
v1: https://lore.kernel.org/bpf/20220510211727.575686-1-memxor@gmail.com

 * Add Fixes tag to patch 1
 * Fix test_progs-noalu32 failure in CI due to different alloc_insn (Alexei)
 * Remove per-CPU struct, use global struct (Alexei)
====================
Signed-off-by: NAlexei Starovoitov <ast@kernel.org>
......@@ -2246,6 +2246,7 @@ extern const struct bpf_func_proto bpf_find_vma_proto;
extern const struct bpf_func_proto bpf_loop_proto;
extern const struct bpf_func_proto bpf_strncmp_proto;
extern const struct bpf_func_proto bpf_copy_from_user_task_proto;
extern const struct bpf_func_proto bpf_kptr_xchg_proto;
const struct bpf_func_proto *tracing_prog_func_proto(
enum bpf_func_id func_id, const struct bpf_prog *prog);
......
......@@ -564,31 +564,36 @@ struct prog_test_ref_kfunc {
int b;
struct prog_test_member memb;
struct prog_test_ref_kfunc *next;
refcount_t cnt;
};
static struct prog_test_ref_kfunc prog_test_struct = {
.a = 42,
.b = 108,
.next = &prog_test_struct,
.cnt = REFCOUNT_INIT(1),
};
noinline struct prog_test_ref_kfunc *
bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr)
{
/* randomly return NULL */
if (get_jiffies_64() % 2)
return NULL;
refcount_inc(&prog_test_struct.cnt);
return &prog_test_struct;
}
noinline struct prog_test_member *
bpf_kfunc_call_memb_acquire(void)
{
return &prog_test_struct.memb;
WARN_ON_ONCE(1);
return NULL;
}
noinline void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p)
{
if (!p)
return;
refcount_dec(&p->cnt);
}
noinline void bpf_kfunc_call_memb_release(struct prog_test_member *p)
......@@ -597,12 +602,18 @@ noinline void bpf_kfunc_call_memb_release(struct prog_test_member *p)
noinline void bpf_kfunc_call_memb1_release(struct prog_test_member1 *p)
{
WARN_ON_ONCE(1);
}
noinline struct prog_test_ref_kfunc *
bpf_kfunc_call_test_kptr_get(struct prog_test_ref_kfunc **p, int a, int b)
bpf_kfunc_call_test_kptr_get(struct prog_test_ref_kfunc **pp, int a, int b)
{
return &prog_test_struct;
struct prog_test_ref_kfunc *p = READ_ONCE(*pp);
if (!p)
return NULL;
refcount_inc(&p->cnt);
return p;
}
struct prog_test_pass1 {
......
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include <network_helpers.h>
#include "map_kptr.skel.h"
#include "map_kptr_fail.skel.h"
void test_map_kptr(void)
static char log_buf[1024 * 1024];
struct {
const char *prog_name;
const char *err_msg;
} map_kptr_fail_tests[] = {
{ "size_not_bpf_dw", "kptr access size must be BPF_DW" },
{ "non_const_var_off", "kptr access cannot have variable offset" },
{ "non_const_var_off_kptr_xchg", "R1 doesn't have constant offset. kptr has to be" },
{ "misaligned_access_write", "kptr access misaligned expected=8 off=7" },
{ "misaligned_access_read", "kptr access misaligned expected=8 off=1" },
{ "reject_var_off_store", "variable untrusted_ptr_ access var_off=(0x0; 0x1e0)" },
{ "reject_bad_type_match", "invalid kptr access, R1 type=untrusted_ptr_prog_test_ref_kfunc" },
{ "marked_as_untrusted_or_null", "R1 type=untrusted_ptr_or_null_ expected=percpu_ptr_" },
{ "correct_btf_id_check_size", "access beyond struct prog_test_ref_kfunc at off 32 size 4" },
{ "inherit_untrusted_on_walk", "R1 type=untrusted_ptr_ expected=percpu_ptr_" },
{ "reject_kptr_xchg_on_unref", "off=8 kptr isn't referenced kptr" },
{ "reject_kptr_get_no_map_val", "arg#0 expected pointer to map value" },
{ "reject_kptr_get_no_null_map_val", "arg#0 expected pointer to map value" },
{ "reject_kptr_get_no_kptr", "arg#0 no referenced kptr at map value offset=0" },
{ "reject_kptr_get_on_unref", "arg#0 no referenced kptr at map value offset=8" },
{ "reject_kptr_get_bad_type_match", "kernel function bpf_kfunc_call_test_kptr_get args#0" },
{ "mark_ref_as_untrusted_or_null", "R1 type=untrusted_ptr_or_null_ expected=percpu_ptr_" },
{ "reject_untrusted_store_to_ref", "store to referenced kptr disallowed" },
{ "reject_bad_type_xchg", "invalid kptr access, R2 type=ptr_prog_test_ref_kfunc expected=ptr_prog_test_member" },
{ "reject_untrusted_xchg", "R2 type=untrusted_ptr_ expected=ptr_" },
{ "reject_member_of_ref_xchg", "invalid kptr access, R2 type=ptr_prog_test_ref_kfunc" },
{ "reject_indirect_helper_access", "kptr cannot be accessed indirectly by helper" },
{ "reject_indirect_global_func_access", "kptr cannot be accessed indirectly by helper" },
{ "kptr_xchg_ref_state", "Unreleased reference id=5 alloc_insn=" },
{ "kptr_get_ref_state", "Unreleased reference id=3 alloc_insn=" },
};
static void test_map_kptr_fail_prog(const char *prog_name, const char *err_msg)
{
LIBBPF_OPTS(bpf_object_open_opts, opts, .kernel_log_buf = log_buf,
.kernel_log_size = sizeof(log_buf),
.kernel_log_level = 1);
struct map_kptr_fail *skel;
struct bpf_program *prog;
int ret;
skel = map_kptr_fail__open_opts(&opts);
if (!ASSERT_OK_PTR(skel, "map_kptr_fail__open_opts"))
return;
prog = bpf_object__find_program_by_name(skel->obj, prog_name);
if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
goto end;
bpf_program__set_autoload(prog, true);
ret = map_kptr_fail__load(skel);
if (!ASSERT_ERR(ret, "map_kptr__load must fail"))
goto end;
if (!ASSERT_OK_PTR(strstr(log_buf, err_msg), "expected error message")) {
fprintf(stderr, "Expected: %s\n", err_msg);
fprintf(stderr, "Verifier: %s\n", log_buf);
}
end:
map_kptr_fail__destroy(skel);
}
static void test_map_kptr_fail(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(map_kptr_fail_tests); i++) {
if (!test__start_subtest(map_kptr_fail_tests[i].prog_name))
continue;
test_map_kptr_fail_prog(map_kptr_fail_tests[i].prog_name,
map_kptr_fail_tests[i].err_msg);
}
}
static void test_map_kptr_success(bool test_run)
{
LIBBPF_OPTS(bpf_test_run_opts, opts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 1,
);
struct map_kptr *skel;
int key = 0, ret;
char buf[24];
......@@ -13,6 +97,16 @@ void test_map_kptr(void)
if (!ASSERT_OK_PTR(skel, "map_kptr__open_and_load"))
return;
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref), &opts);
ASSERT_OK(ret, "test_map_kptr_ref refcount");
ASSERT_OK(opts.retval, "test_map_kptr_ref retval");
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref2), &opts);
ASSERT_OK(ret, "test_map_kptr_ref2 refcount");
ASSERT_OK(opts.retval, "test_map_kptr_ref2 retval");
if (test_run)
return;
ret = bpf_map_update_elem(bpf_map__fd(skel->maps.array_map), &key, buf, 0);
ASSERT_OK(ret, "array_map update");
ret = bpf_map_update_elem(bpf_map__fd(skel->maps.array_map), &key, buf, 0);
......@@ -35,3 +129,15 @@ void test_map_kptr(void)
map_kptr__destroy(skel);
}
void test_map_kptr(void)
{
if (test__start_subtest("success")) {
test_map_kptr_success(false);
/* Do test_run twice, so that we see refcount going back to 1
* after we leave it in map from first iteration.
*/
test_map_kptr_success(true);
}
test_map_kptr_fail();
}
......@@ -141,7 +141,7 @@ SEC("tc")
int test_map_kptr(struct __sk_buff *ctx)
{
struct map_value *v;
int i, key = 0;
int key = 0;
#define TEST(map) \
v = bpf_map_lookup_elem(&map, &key); \
......@@ -162,7 +162,7 @@ SEC("tc")
int test_map_in_map_kptr(struct __sk_buff *ctx)
{
struct map_value *v;
int i, key = 0;
int key = 0;
void *map;
#define TEST(map_in_map) \
......@@ -187,4 +187,106 @@ int test_map_in_map_kptr(struct __sk_buff *ctx)
return 0;
}
SEC("tc")
int test_map_kptr_ref(struct __sk_buff *ctx)
{
struct prog_test_ref_kfunc *p, *p_st;
unsigned long arg = 0;
struct map_value *v;
int key = 0, ret;
p = bpf_kfunc_call_test_acquire(&arg);
if (!p)
return 1;
p_st = p->next;
if (p_st->cnt.refs.counter != 2) {
ret = 2;
goto end;
}
v = bpf_map_lookup_elem(&array_map, &key);
if (!v) {
ret = 3;
goto end;
}
p = bpf_kptr_xchg(&v->ref_ptr, p);
if (p) {
ret = 4;
goto end;
}
if (p_st->cnt.refs.counter != 2)
return 5;
p = bpf_kfunc_call_test_kptr_get(&v->ref_ptr, 0, 0);
if (!p)
return 6;
if (p_st->cnt.refs.counter != 3) {
ret = 7;
goto end;
}
bpf_kfunc_call_test_release(p);
if (p_st->cnt.refs.counter != 2)
return 8;
p = bpf_kptr_xchg(&v->ref_ptr, NULL);
if (!p)
return 9;
bpf_kfunc_call_test_release(p);
if (p_st->cnt.refs.counter != 1)
return 10;
p = bpf_kfunc_call_test_acquire(&arg);
if (!p)
return 11;
p = bpf_kptr_xchg(&v->ref_ptr, p);
if (p) {
ret = 12;
goto end;
}
if (p_st->cnt.refs.counter != 2)
return 13;
/* Leave in map */
return 0;
end:
bpf_kfunc_call_test_release(p);
return ret;
}
SEC("tc")
int test_map_kptr_ref2(struct __sk_buff *ctx)
{
struct prog_test_ref_kfunc *p, *p_st;
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 1;
p_st = v->ref_ptr;
if (!p_st || p_st->cnt.refs.counter != 2)
return 2;
p = bpf_kptr_xchg(&v->ref_ptr, NULL);
if (!p)
return 3;
if (p_st->cnt.refs.counter != 2) {
bpf_kfunc_call_test_release(p);
return 4;
}
p = bpf_kptr_xchg(&v->ref_ptr, p);
if (p) {
bpf_kfunc_call_test_release(p);
return 5;
}
if (p_st->cnt.refs.counter != 2)
return 6;
return 0;
}
char _license[] SEC("license") = "GPL";
// SPDX-License-Identifier: GPL-2.0
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
struct map_value {
char buf[8];
struct prog_test_ref_kfunc __kptr *unref_ptr;
struct prog_test_ref_kfunc __kptr_ref *ref_ptr;
struct prog_test_member __kptr_ref *ref_memb_ptr;
};
struct array_map {
__uint(type, BPF_MAP_TYPE_ARRAY);
__type(key, int);
__type(value, struct map_value);
__uint(max_entries, 1);
} array_map SEC(".maps");
extern struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire(unsigned long *sp) __ksym;
extern struct prog_test_ref_kfunc *
bpf_kfunc_call_test_kptr_get(struct prog_test_ref_kfunc **p, int a, int b) __ksym;
SEC("?tc")
int size_not_bpf_dw(struct __sk_buff *ctx)
{
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
*(u32 *)&v->unref_ptr = 0;
return 0;
}
SEC("?tc")
int non_const_var_off(struct __sk_buff *ctx)
{
struct map_value *v;
int key = 0, id;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
id = ctx->protocol;
if (id < 4 || id > 12)
return 0;
*(u64 *)((void *)v + id) = 0;
return 0;
}
SEC("?tc")
int non_const_var_off_kptr_xchg(struct __sk_buff *ctx)
{
struct map_value *v;
int key = 0, id;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
id = ctx->protocol;
if (id < 4 || id > 12)
return 0;
bpf_kptr_xchg((void *)v + id, NULL);
return 0;
}
SEC("?tc")
int misaligned_access_write(struct __sk_buff *ctx)
{
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
*(void **)((void *)v + 7) = NULL;
return 0;
}
SEC("?tc")
int misaligned_access_read(struct __sk_buff *ctx)
{
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
return *(u64 *)((void *)v + 1);
}
SEC("?tc")
int reject_var_off_store(struct __sk_buff *ctx)
{
struct prog_test_ref_kfunc *unref_ptr;
struct map_value *v;
int key = 0, id;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
unref_ptr = v->unref_ptr;
if (!unref_ptr)
return 0;
id = ctx->protocol;
if (id < 4 || id > 12)
return 0;
unref_ptr += id;
v->unref_ptr = unref_ptr;
return 0;
}
SEC("?tc")
int reject_bad_type_match(struct __sk_buff *ctx)
{
struct prog_test_ref_kfunc *unref_ptr;
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
unref_ptr = v->unref_ptr;
if (!unref_ptr)
return 0;
unref_ptr = (void *)unref_ptr + 4;
v->unref_ptr = unref_ptr;
return 0;
}
SEC("?tc")
int marked_as_untrusted_or_null(struct __sk_buff *ctx)
{
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
bpf_this_cpu_ptr(v->unref_ptr);
return 0;
}
SEC("?tc")
int correct_btf_id_check_size(struct __sk_buff *ctx)
{
struct prog_test_ref_kfunc *p;
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
p = v->unref_ptr;
if (!p)
return 0;
return *(int *)((void *)p + bpf_core_type_size(struct prog_test_ref_kfunc));
}
SEC("?tc")
int inherit_untrusted_on_walk(struct __sk_buff *ctx)
{
struct prog_test_ref_kfunc *unref_ptr;
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
unref_ptr = v->unref_ptr;
if (!unref_ptr)
return 0;
unref_ptr = unref_ptr->next;
bpf_this_cpu_ptr(unref_ptr);
return 0;
}
SEC("?tc")
int reject_kptr_xchg_on_unref(struct __sk_buff *ctx)
{
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
bpf_kptr_xchg(&v->unref_ptr, NULL);
return 0;
}
SEC("?tc")
int reject_kptr_get_no_map_val(struct __sk_buff *ctx)
{
bpf_kfunc_call_test_kptr_get((void *)&ctx, 0, 0);
return 0;
}
SEC("?tc")
int reject_kptr_get_no_null_map_val(struct __sk_buff *ctx)
{
bpf_kfunc_call_test_kptr_get(bpf_map_lookup_elem(&array_map, &(int){0}), 0, 0);
return 0;
}
SEC("?tc")
int reject_kptr_get_no_kptr(struct __sk_buff *ctx)
{
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
bpf_kfunc_call_test_kptr_get((void *)v, 0, 0);
return 0;
}
SEC("?tc")
int reject_kptr_get_on_unref(struct __sk_buff *ctx)
{
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
bpf_kfunc_call_test_kptr_get(&v->unref_ptr, 0, 0);
return 0;
}
SEC("?tc")
int reject_kptr_get_bad_type_match(struct __sk_buff *ctx)
{
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
bpf_kfunc_call_test_kptr_get((void *)&v->ref_memb_ptr, 0, 0);
return 0;
}
SEC("?tc")
int mark_ref_as_untrusted_or_null(struct __sk_buff *ctx)
{
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
bpf_this_cpu_ptr(v->ref_ptr);
return 0;
}
SEC("?tc")
int reject_untrusted_store_to_ref(struct __sk_buff *ctx)
{
struct prog_test_ref_kfunc *p;
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
p = v->ref_ptr;
if (!p)
return 0;
/* Checkmate, clang */
*(struct prog_test_ref_kfunc * volatile *)&v->ref_ptr = p;
return 0;
}
SEC("?tc")
int reject_untrusted_xchg(struct __sk_buff *ctx)
{
struct prog_test_ref_kfunc *p;
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
p = v->ref_ptr;
if (!p)
return 0;
bpf_kptr_xchg(&v->ref_ptr, p);
return 0;
}
SEC("?tc")
int reject_bad_type_xchg(struct __sk_buff *ctx)
{
struct prog_test_ref_kfunc *ref_ptr;
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
ref_ptr = bpf_kfunc_call_test_acquire(&(unsigned long){0});
if (!ref_ptr)
return 0;
bpf_kptr_xchg(&v->ref_memb_ptr, ref_ptr);
return 0;
}
SEC("?tc")
int reject_member_of_ref_xchg(struct __sk_buff *ctx)
{
struct prog_test_ref_kfunc *ref_ptr;
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
ref_ptr = bpf_kfunc_call_test_acquire(&(unsigned long){0});
if (!ref_ptr)
return 0;
bpf_kptr_xchg(&v->ref_memb_ptr, &ref_ptr->memb);
return 0;
}
SEC("?syscall")
int reject_indirect_helper_access(struct __sk_buff *ctx)
{
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
bpf_get_current_comm(v, sizeof(v->buf) + 1);
return 0;
}
__noinline
int write_func(int *p)
{
return p ? *p = 42 : 0;
}
SEC("?tc")
int reject_indirect_global_func_access(struct __sk_buff *ctx)
{
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
return write_func((void *)v + 5);
}
SEC("?tc")
int kptr_xchg_ref_state(struct __sk_buff *ctx)
{
struct prog_test_ref_kfunc *p;
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
p = bpf_kfunc_call_test_acquire(&(unsigned long){0});
if (!p)
return 0;
bpf_kptr_xchg(&v->ref_ptr, p);
return 0;
}
SEC("?tc")
int kptr_get_ref_state(struct __sk_buff *ctx)
{
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
bpf_kfunc_call_test_kptr_get(&v->ref_ptr, 0, 0);
return 0;
}
char _license[] SEC("license") = "GPL";
......@@ -212,13 +212,13 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 24),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_kptr = { 1 },
.result = REJECT,
.errstr = "access beyond struct prog_test_ref_kfunc at off 24 size 8",
.errstr = "access beyond struct prog_test_ref_kfunc at off 32 size 8",
},
{
"map_kptr: unref: inherit PTR_UNTRUSTED on struct walk",
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册