提交 8e7a3920 编写于 作者: D Daniel Borkmann 提交者: David S. Miller

bpf, perf: split bpf_perf_event_output

Split the bpf_perf_event_output() helper as a preparation into
two parts. The new bpf_perf_event_output() will prepare the raw
record itself and test for unknown flags from BPF trace context,
where the __bpf_perf_event_output() does the core work. The
latter will be reused later on from bpf_event_output() directly.
Signed-off-by: NDaniel Borkmann <daniel@iogearbox.net>
Acked-by: NAlexei Starovoitov <ast@kernel.org>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 7e3f977e
...@@ -233,26 +233,17 @@ static const struct bpf_func_proto bpf_perf_event_read_proto = { ...@@ -233,26 +233,17 @@ static const struct bpf_func_proto bpf_perf_event_read_proto = {
.arg2_type = ARG_ANYTHING, .arg2_type = ARG_ANYTHING,
}; };
static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size) static __always_inline u64
__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
u64 flags, struct perf_raw_record *raw)
{ {
struct pt_regs *regs = (struct pt_regs *) (long) r1;
struct bpf_map *map = (struct bpf_map *) (long) r2;
struct bpf_array *array = container_of(map, struct bpf_array, map); struct bpf_array *array = container_of(map, struct bpf_array, map);
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
u64 index = flags & BPF_F_INDEX_MASK; u64 index = flags & BPF_F_INDEX_MASK;
void *data = (void *) (long) r4;
struct perf_sample_data sample_data; struct perf_sample_data sample_data;
struct bpf_event_entry *ee; struct bpf_event_entry *ee;
struct perf_event *event; struct perf_event *event;
struct perf_raw_record raw = {
.frag = {
.size = size,
.data = data,
},
};
if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
return -EINVAL;
if (index == BPF_F_CURRENT_CPU) if (index == BPF_F_CURRENT_CPU)
index = cpu; index = cpu;
if (unlikely(index >= array->map.max_entries)) if (unlikely(index >= array->map.max_entries))
...@@ -271,11 +262,29 @@ static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size) ...@@ -271,11 +262,29 @@ static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size)
return -EOPNOTSUPP; return -EOPNOTSUPP;
perf_sample_data_init(&sample_data, 0, 0); perf_sample_data_init(&sample_data, 0, 0);
sample_data.raw = &raw; sample_data.raw = raw;
perf_event_output(event, &sample_data, regs); perf_event_output(event, &sample_data, regs);
return 0; return 0;
} }
static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size)
{
struct pt_regs *regs = (struct pt_regs *)(long) r1;
struct bpf_map *map = (struct bpf_map *)(long) r2;
void *data = (void *)(long) r4;
struct perf_raw_record raw = {
.frag = {
.size = size,
.data = data,
},
};
if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
return -EINVAL;
return __bpf_perf_event_output(regs, map, flags, &raw);
}
static const struct bpf_func_proto bpf_perf_event_output_proto = { static const struct bpf_func_proto bpf_perf_event_output_proto = {
.func = bpf_perf_event_output, .func = bpf_perf_event_output,
.gpl_only = true, .gpl_only = true,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册