提交 148709bc 编写于 作者: D David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Alexei Starovoitov says:

====================
pull-request: bpf 2019-12-11

The following pull-request contains BPF updates for your *net* tree.

We've added 8 non-merge commits during the last 1 day(s) which contain
a total of 10 files changed, 126 insertions(+), 18 deletions(-).

The main changes are:

1) Make BPF trampoline co-exist with ftrace-based tracers, from Alexei.

2) Fix build in minimal configurations, from Arnd.

3) Fix mips, riscv bpf_tail_call limit, from Paul.

4) Fix bpftool segfault, from Toke.

5) Fix samples/bpf, from Daniel.
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
......@@ -604,6 +604,7 @@ static void emit_const_to_reg(struct jit_ctx *ctx, int dst, u64 value)
static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx)
{
int off, b_off;
int tcc_reg;
ctx->flags |= EBPF_SEEN_TC;
/*
......@@ -616,14 +617,14 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx)
b_off = b_imm(this_idx + 1, ctx);
emit_instr(ctx, bne, MIPS_R_AT, MIPS_R_ZERO, b_off);
/*
* if (--TCC < 0)
* if (TCC-- < 0)
* goto out;
*/
/* Delay slot */
emit_instr(ctx, daddiu, MIPS_R_T5,
(ctx->flags & EBPF_TCC_IN_V1) ? MIPS_R_V1 : MIPS_R_S4, -1);
tcc_reg = (ctx->flags & EBPF_TCC_IN_V1) ? MIPS_R_V1 : MIPS_R_S4;
emit_instr(ctx, daddiu, MIPS_R_T5, tcc_reg, -1);
b_off = b_imm(this_idx + 1, ctx);
emit_instr(ctx, bltz, MIPS_R_T5, b_off);
emit_instr(ctx, bltz, tcc_reg, b_off);
/*
* prog = array->ptrs[index];
* if (prog == NULL)
......
......@@ -631,14 +631,14 @@ static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx)
return -1;
emit(rv_bgeu(RV_REG_A2, RV_REG_T1, off >> 1), ctx);
/* if (--TCC < 0)
/* if (TCC-- < 0)
* goto out;
*/
emit(rv_addi(RV_REG_T1, tcc, -1), ctx);
off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2;
if (is_13b_check(off, insn))
return -1;
emit(rv_blt(RV_REG_T1, RV_REG_ZERO, off >> 1), ctx);
emit(rv_blt(tcc, RV_REG_ZERO, off >> 1), ctx);
/* prog = array->ptrs[index];
* if (!prog)
......
......@@ -461,6 +461,7 @@ struct bpf_trampoline {
struct {
struct btf_func_model model;
void *addr;
bool ftrace_managed;
} func;
/* list of BPF programs using this trampoline */
struct hlist_head progs_hlist[BPF_TRAMP_MAX];
......
......@@ -3470,6 +3470,7 @@ static u8 bpf_ctx_convert_map[] = {
[_id] = __ctx_convert##_id,
#include <linux/bpf_types.h>
#undef BPF_PROG_TYPE
0, /* avoid empty array */
};
#undef BPF_MAP_TYPE
......
......@@ -3,6 +3,7 @@
#include <linux/hash.h>
#include <linux/bpf.h>
#include <linux/filter.h>
#include <linux/ftrace.h>
/* btf_vmlinux has ~22k attachable functions. 1k htab is enough. */
#define TRAMPOLINE_HASH_BITS 10
......@@ -59,6 +60,60 @@ struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
return tr;
}
static int is_ftrace_location(void *ip)
{
long addr;
addr = ftrace_location((long)ip);
if (!addr)
return 0;
if (WARN_ON_ONCE(addr != (long)ip))
return -EFAULT;
return 1;
}
static int unregister_fentry(struct bpf_trampoline *tr, void *old_addr)
{
void *ip = tr->func.addr;
int ret;
if (tr->func.ftrace_managed)
ret = unregister_ftrace_direct((long)ip, (long)old_addr);
else
ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, NULL);
return ret;
}
static int modify_fentry(struct bpf_trampoline *tr, void *old_addr, void *new_addr)
{
void *ip = tr->func.addr;
int ret;
if (tr->func.ftrace_managed)
ret = modify_ftrace_direct((long)ip, (long)old_addr, (long)new_addr);
else
ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, new_addr);
return ret;
}
/* first time registering */
static int register_fentry(struct bpf_trampoline *tr, void *new_addr)
{
void *ip = tr->func.addr;
int ret;
ret = is_ftrace_location(ip);
if (ret < 0)
return ret;
tr->func.ftrace_managed = ret;
if (tr->func.ftrace_managed)
ret = register_ftrace_direct((long)ip, (long)new_addr);
else
ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, NULL, new_addr);
return ret;
}
/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
* bytes on x86. Pick a number to fit into PAGE_SIZE / 2
*/
......@@ -77,8 +132,7 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr)
int err;
if (fentry_cnt + fexit_cnt == 0) {
err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_CALL,
old_image, NULL);
err = unregister_fentry(tr, old_image);
tr->selector = 0;
goto out;
}
......@@ -105,12 +159,10 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr)
if (tr->selector)
/* progs already running at this address */
err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_CALL,
old_image, new_image);
err = modify_fentry(tr, old_image, new_image);
else
/* first time registering */
err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_CALL, NULL,
new_image);
err = register_fentry(tr, new_image);
if (err)
goto out;
tr->selector++;
......
......@@ -47,13 +47,27 @@ static __always_inline void count(void *map)
SEC("tracepoint/syscalls/sys_enter_open")
int trace_enter_open(struct syscalls_enter_open_args *ctx)
{
count((void *)&enter_open_map);
count(&enter_open_map);
return 0;
}
SEC("tracepoint/syscalls/sys_enter_openat")
int trace_enter_open_at(struct syscalls_enter_open_args *ctx)
{
count(&enter_open_map);
return 0;
}
SEC("tracepoint/syscalls/sys_exit_open")
int trace_enter_exit(struct syscalls_exit_open_args *ctx)
{
count((void *)&exit_open_map);
count(&exit_open_map);
return 0;
}
SEC("tracepoint/syscalls/sys_exit_openat")
int trace_enter_exit_at(struct syscalls_exit_open_args *ctx)
{
count(&exit_open_map);
return 0;
}
......@@ -37,9 +37,9 @@ static void print_ksym(__u64 addr)
}
printf("%s;", sym->name);
if (!strcmp(sym->name, "sys_read"))
if (!strstr(sym->name, "sys_read"))
sys_read_seen = true;
else if (!strcmp(sym->name, "sys_write"))
else if (!strstr(sym->name, "sys_write"))
sys_write_seen = true;
}
......
......@@ -493,7 +493,7 @@ static int do_dump(int argc, char **argv)
info = &info_linear->info;
if (mode == DUMP_JITED) {
if (info->jited_prog_len == 0) {
if (info->jited_prog_len == 0 || !info->jited_prog_insns) {
p_info("no instructions returned");
goto err_free;
}
......
......@@ -174,7 +174,7 @@ static const char *print_call(void *private_data,
struct kernel_sym *sym;
if (insn->src_reg == BPF_PSEUDO_CALL &&
(__u32) insn->imm < dd->nr_jited_ksyms)
(__u32) insn->imm < dd->nr_jited_ksyms && dd->jited_ksyms)
address = dd->jited_ksyms[insn->imm];
sym = kernel_syms_search(dd, address);
......
#!/bin/bash
TR=/sys/kernel/debug/tracing/
clear_trace() { # reset trace output
echo > $TR/trace
}
disable_tracing() { # stop trace recording
echo 0 > $TR/tracing_on
}
enable_tracing() { # start trace recording
echo 1 > $TR/tracing_on
}
reset_tracer() { # reset the current tracer
echo nop > $TR/current_tracer
}
disable_tracing
clear_trace
echo "" > $TR/set_ftrace_filter
echo '*printk* *console* *wake* *serial* *lock*' > $TR/set_ftrace_notrace
echo "bpf_prog_test*" > $TR/set_graph_function
echo "" > $TR/set_graph_notrace
echo function_graph > $TR/current_tracer
enable_tracing
./test_progs -t fentry
./test_progs -t fexit
disable_tracing
clear_trace
reset_tracer
exit 0
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册