提交 fd7dc3d1 编写于 作者: L Lorenz Bauer 提交者: Zheng Zengkai

bpf: Consolidate shared test timing code

mainline inclusion
from mainline-5.13-rc1
commit 607b9cc9
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I5EUVD
CVE: NA

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=607b9cc92bd7208338d714a22b8082fe83bcb177

-------------------------------------------------

Share the timing / signal interruption logic between different
implementations of PROG_TEST_RUN. There is a change in behaviour
as well. We check the loop exit condition before checking for
pending signals. This resolves an edge case where a signal
arrives during the last iteration. Instead of aborting with
EINTR we return the successful result to user space.
Signed-off-by: NLorenz Bauer <lmb@cloudflare.com>
Signed-off-by: NAlexei Starovoitov <ast@kernel.org>
Acked-by: NAndrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20210303101816.36774-2-lmb@cloudflare.com
(cherry picked from commit 607b9cc9)
Signed-off-by: NWang Yufen <wangyufen@huawei.com>

Conflicts:
	net/bpf/test_run.c
Signed-off-by: NWang Yufen <wangyufen@huawei.com>
上级 03ebaa52
...@@ -16,16 +16,80 @@ ...@@ -16,16 +16,80 @@
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include <trace/events/bpf_test_run.h> #include <trace/events/bpf_test_run.h>
struct bpf_test_timer {
enum { NO_PREEMPT, NO_MIGRATE } mode;
u32 i;
u64 time_start, time_spent;
};
static void bpf_test_timer_enter(struct bpf_test_timer *t)
__acquires(rcu)
{
rcu_read_lock();
if (t->mode == NO_PREEMPT)
preempt_disable();
else
migrate_disable();
t->time_start = ktime_get_ns();
}
static void bpf_test_timer_leave(struct bpf_test_timer *t)
__releases(rcu)
{
t->time_start = 0;
if (t->mode == NO_PREEMPT)
preempt_enable();
else
migrate_enable();
rcu_read_unlock();
}
static bool bpf_test_timer_continue(struct bpf_test_timer *t, u32 repeat, int *err, u32 *duration)
__must_hold(rcu)
{
t->i++;
if (t->i >= repeat) {
/* We're done. */
t->time_spent += ktime_get_ns() - t->time_start;
do_div(t->time_spent, t->i);
*duration = t->time_spent > U32_MAX ? U32_MAX : (u32)t->time_spent;
*err = 0;
goto reset;
}
if (signal_pending(current)) {
/* During iteration: we've been cancelled, abort. */
*err = -EINTR;
goto reset;
}
if (need_resched()) {
/* During iteration: we need to reschedule between runs. */
t->time_spent += ktime_get_ns() - t->time_start;
bpf_test_timer_leave(t);
cond_resched();
bpf_test_timer_enter(t);
}
/* Do another round. */
return true;
reset:
t->i = 0;
return false;
}
static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
u32 *retval, u32 *time, bool xdp) u32 *retval, u32 *time, bool xdp)
{ {
struct bpf_prog_array_item item = {.prog = prog}; struct bpf_prog_array_item item = {.prog = prog};
struct bpf_run_ctx *old_ctx; struct bpf_run_ctx *old_ctx;
struct bpf_cg_run_ctx run_ctx; struct bpf_cg_run_ctx run_ctx;
struct bpf_test_timer t = { NO_MIGRATE };
enum bpf_cgroup_storage_type stype; enum bpf_cgroup_storage_type stype;
u64 time_start, time_spent = 0; int ret;
int ret = 0;
u32 i;
for_each_cgroup_storage_type(stype) { for_each_cgroup_storage_type(stype) {
item.cgroup_storage[stype] = bpf_cgroup_storage_alloc(prog, stype); item.cgroup_storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
...@@ -40,42 +104,17 @@ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, ...@@ -40,42 +104,17 @@ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
if (!repeat) if (!repeat)
repeat = 1; repeat = 1;
rcu_read_lock(); bpf_test_timer_enter(&t);
migrate_disable();
time_start = ktime_get_ns();
old_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); old_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
for (i = 0; i < repeat; i++) { do {
run_ctx.prog_item = &item; run_ctx.prog_item = &item;
if (xdp) if (xdp)
*retval = bpf_prog_run_xdp(prog, ctx); *retval = bpf_prog_run_xdp(prog, ctx);
else else
*retval = BPF_PROG_RUN(prog, ctx); *retval = BPF_PROG_RUN(prog, ctx);
} while (bpf_test_timer_continue(&t, repeat, &ret, time));
if (signal_pending(current)) {
ret = -EINTR;
break;
}
if (need_resched()) {
time_spent += ktime_get_ns() - time_start;
migrate_enable();
rcu_read_unlock();
cond_resched();
rcu_read_lock();
migrate_disable();
time_start = ktime_get_ns();
}
}
bpf_reset_run_ctx(old_ctx); bpf_reset_run_ctx(old_ctx);
time_spent += ktime_get_ns() - time_start; bpf_test_timer_leave(&t);
migrate_enable();
rcu_read_unlock();
do_div(time_spent, repeat);
*time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
for_each_cgroup_storage_type(stype) for_each_cgroup_storage_type(stype)
bpf_cgroup_storage_free(item.cgroup_storage[stype]); bpf_cgroup_storage_free(item.cgroup_storage[stype]);
...@@ -691,18 +730,17 @@ int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, ...@@ -691,18 +730,17 @@ int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
const union bpf_attr *kattr, const union bpf_attr *kattr,
union bpf_attr __user *uattr) union bpf_attr __user *uattr)
{ {
struct bpf_test_timer t = { NO_PREEMPT };
u32 size = kattr->test.data_size_in; u32 size = kattr->test.data_size_in;
struct bpf_flow_dissector ctx = {}; struct bpf_flow_dissector ctx = {};
u32 repeat = kattr->test.repeat; u32 repeat = kattr->test.repeat;
struct bpf_flow_keys *user_ctx; struct bpf_flow_keys *user_ctx;
struct bpf_flow_keys flow_keys; struct bpf_flow_keys flow_keys;
u64 time_start, time_spent = 0;
const struct ethhdr *eth; const struct ethhdr *eth;
unsigned int flags = 0; unsigned int flags = 0;
u32 retval, duration; u32 retval, duration;
void *data; void *data;
int ret; int ret;
u32 i;
if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR) if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
return -EINVAL; return -EINVAL;
...@@ -738,39 +776,15 @@ int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, ...@@ -738,39 +776,15 @@ int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
ctx.data = data; ctx.data = data;
ctx.data_end = (__u8 *)data + size; ctx.data_end = (__u8 *)data + size;
rcu_read_lock(); bpf_test_timer_enter(&t);
preempt_disable(); do {
time_start = ktime_get_ns();
for (i = 0; i < repeat; i++) {
retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN, retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN,
size, flags); size, flags);
} while (bpf_test_timer_continue(&t, repeat, &ret, &duration));
bpf_test_timer_leave(&t);
if (signal_pending(current)) { if (ret < 0)
preempt_enable();
rcu_read_unlock();
ret = -EINTR;
goto out; goto out;
}
if (need_resched()) {
time_spent += ktime_get_ns() - time_start;
preempt_enable();
rcu_read_unlock();
cond_resched();
rcu_read_lock();
preempt_disable();
time_start = ktime_get_ns();
}
}
time_spent += ktime_get_ns() - time_start;
preempt_enable();
rcu_read_unlock();
do_div(time_spent, repeat);
duration = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys), ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
retval, duration); retval, duration);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册