提交 86dfa985 编写于 作者: A Alexei Starovoitov 提交者: Zheng Zengkai

bpf: Compute program stats for sleepable programs

mainline inclusion
from mainline-5.12-rc1
commit f2dd3b39
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I5EUVD
CVE: NA

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=f2dd3b39467411c53703125a111f45b3672c1771

-------------------------------------------------

Since sleepable programs don't migrate from the cpu the excution stats can be
computed for them as well. Reuse the same infrastructure for both sleepable and
non-sleepable programs.

run_cnt     -> the number of times the program was executed.
run_time_ns -> the program execution time in nanoseconds including the
               off-cpu time when the program was sleeping.
Signed-off-by: NAlexei Starovoitov <ast@kernel.org>
Signed-off-by: NDaniel Borkmann <daniel@iogearbox.net>
Acked-by: NAndrii Nakryiko <andrii@kernel.org>
Acked-by: NKP Singh <kpsingh@kernel.org>
Link: https://lore.kernel.org/bpf/20210210033634.62081-4-alexei.starovoitov@gmail.com
(cherry picked from commit f2dd3b39)
Signed-off-by: NWang Yufen <wangyufen@huawei.com>

Conflicts:
	include/linux/bpf.h
Signed-off-by: NWang Yufen <wangyufen@huawei.com>
上级 c82a344b
...@@ -1735,15 +1735,12 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog, ...@@ -1735,15 +1735,12 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
u8 *prog = *pprog; u8 *prog = *pprog;
int cnt = 0; int cnt = 0;
if (p->aux->sleepable) { if (emit_call(&prog,
if (emit_call(&prog, __bpf_prog_enter_sleepable, prog)) p->aux->sleepable ? __bpf_prog_enter_sleepable :
__bpf_prog_enter, prog))
return -EINVAL; return -EINVAL;
} else { /* remember prog start time returned by __bpf_prog_enter */
if (emit_call(&prog, __bpf_prog_enter, prog)) emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
return -EINVAL;
/* remember prog start time returned by __bpf_prog_enter */
emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
}
/* arg1: lea rdi, [rbp - stack_size] */ /* arg1: lea rdi, [rbp - stack_size] */
EMIT4(0x48, 0x8D, 0x7D, -stack_size); EMIT4(0x48, 0x8D, 0x7D, -stack_size);
...@@ -1767,18 +1764,14 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog, ...@@ -1767,18 +1764,14 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
if (save_ret) if (save_ret)
emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
if (p->aux->sleepable) { /* arg1: mov rdi, progs[i] */
if (emit_call(&prog, __bpf_prog_exit_sleepable, prog)) emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
/* arg2: mov rsi, rbx <- start time in nsec */
emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
if (emit_call(&prog,
p->aux->sleepable ? __bpf_prog_exit_sleepable :
__bpf_prog_exit, prog))
return -EINVAL; return -EINVAL;
} else {
/* arg1: mov rdi, progs[i] */
emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32,
(u32) (long) p);
/* arg2: mov rsi, rbx <- start time in nsec */
emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
if (emit_call(&prog, __bpf_prog_exit, prog))
return -EINVAL;
}
*pprog = prog; *pprog = prog;
return 0; return 0;
......
...@@ -633,8 +633,8 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *i ...@@ -633,8 +633,8 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *i
/* these two functions are called from generated trampoline */ /* these two functions are called from generated trampoline */
u64 notrace __bpf_prog_enter(void); u64 notrace __bpf_prog_enter(void);
void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start); void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start);
void notrace __bpf_prog_enter_sleepable(void); u64 notrace __bpf_prog_enter_sleepable(void);
void notrace __bpf_prog_exit_sleepable(void); void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start);
void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr); void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr);
void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr); void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr);
......
...@@ -490,56 +490,70 @@ void bpf_trampoline_put(struct bpf_trampoline *tr) ...@@ -490,56 +490,70 @@ void bpf_trampoline_put(struct bpf_trampoline *tr)
mutex_unlock(&trampoline_mutex); mutex_unlock(&trampoline_mutex);
} }
#define NO_START_TIME 0
static u64 notrace bpf_prog_start_time(void)
{
u64 start = NO_START_TIME;
if (static_branch_unlikely(&bpf_stats_enabled_key))
start = sched_clock();
return start;
}
/* The logic is similar to BPF_PROG_RUN, but with an explicit /* The logic is similar to BPF_PROG_RUN, but with an explicit
* rcu_read_lock() and migrate_disable() which are required * rcu_read_lock() and migrate_disable() which are required
* for the trampoline. The macro is split into * for the trampoline. The macro is split into
* call _bpf_prog_enter * call __bpf_prog_enter
* call prog->bpf_func * call prog->bpf_func
* call __bpf_prog_exit * call __bpf_prog_exit
*/ */
u64 notrace __bpf_prog_enter(void) u64 notrace __bpf_prog_enter(void)
__acquires(RCU) __acquires(RCU)
{ {
u64 start = 0;
rcu_read_lock(); rcu_read_lock();
migrate_disable(); migrate_disable();
if (static_branch_unlikely(&bpf_stats_enabled_key)) return bpf_prog_start_time();
start = sched_clock();
return start;
} }
void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start) static void notrace update_prog_stats(struct bpf_prog *prog,
__releases(RCU) u64 start)
{ {
struct bpf_prog_stats *stats; struct bpf_prog_stats *stats;
if (static_branch_unlikely(&bpf_stats_enabled_key) && if (static_branch_unlikely(&bpf_stats_enabled_key) &&
/* static_key could be enabled in __bpf_prog_enter /* static_key could be enabled in __bpf_prog_enter*
* and disabled in __bpf_prog_exit. * and disabled in __bpf_prog_exit*.
* And vice versa. * And vice versa.
* Hence check that 'start' is not zero. * Hence check that 'start' is valid.
*/ */
start) { start > NO_START_TIME) {
stats = this_cpu_ptr(prog->stats); stats = this_cpu_ptr(prog->stats);
u64_stats_update_begin(&stats->syncp); u64_stats_update_begin(&stats->syncp);
stats->cnt++; stats->cnt++;
stats->nsecs += sched_clock() - start; stats->nsecs += sched_clock() - start;
u64_stats_update_end(&stats->syncp); u64_stats_update_end(&stats->syncp);
} }
}
void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start)
__releases(RCU)
{
update_prog_stats(prog, start);
migrate_enable(); migrate_enable();
rcu_read_unlock(); rcu_read_unlock();
} }
void notrace __bpf_prog_enter_sleepable(void) u64 notrace __bpf_prog_enter_sleepable(void)
{ {
rcu_read_lock_trace(); rcu_read_lock_trace();
migrate_disable(); migrate_disable();
might_fault(); might_fault();
return bpf_prog_start_time();
} }
void notrace __bpf_prog_exit_sleepable(void) void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start)
{ {
update_prog_stats(prog, start);
migrate_enable(); migrate_enable();
rcu_read_unlock_trace(); rcu_read_unlock_trace();
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册