提交 97562633 编写于 作者: Y Yonghong Song 提交者: David S. Miller

bpf: perf event change needed for subsequent bpf helpers

This patch does not impact existing functionalities.
It contains the changes in perf event area needed for
subsequent bpf_perf_event_read_value and
bpf_perf_prog_read_value helpers.
Signed-off-by: NYonghong Song <yhs@fb.com>
Acked-by: NPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 bdc47641
...@@ -806,6 +806,7 @@ struct perf_output_handle { ...@@ -806,6 +806,7 @@ struct perf_output_handle {
struct bpf_perf_event_data_kern { struct bpf_perf_event_data_kern {
struct pt_regs *regs; struct pt_regs *regs;
struct perf_sample_data *data; struct perf_sample_data *data;
struct perf_event *event;
}; };
#ifdef CONFIG_CGROUP_PERF #ifdef CONFIG_CGROUP_PERF
...@@ -884,7 +885,8 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, ...@@ -884,7 +885,8 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr,
void *context); void *context);
extern void perf_pmu_migrate_context(struct pmu *pmu, extern void perf_pmu_migrate_context(struct pmu *pmu,
int src_cpu, int dst_cpu); int src_cpu, int dst_cpu);
int perf_event_read_local(struct perf_event *event, u64 *value); int perf_event_read_local(struct perf_event *event, u64 *value,
u64 *enabled, u64 *running);
extern u64 perf_event_read_value(struct perf_event *event, extern u64 perf_event_read_value(struct perf_event *event,
u64 *enabled, u64 *running); u64 *enabled, u64 *running);
...@@ -1286,7 +1288,8 @@ static inline const struct perf_event_attr *perf_event_attrs(struct perf_event * ...@@ -1286,7 +1288,8 @@ static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *
{ {
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
static inline int perf_event_read_local(struct perf_event *event, u64 *value) static inline int perf_event_read_local(struct perf_event *event, u64 *value,
u64 *enabled, u64 *running)
{ {
return -EINVAL; return -EINVAL;
} }
......
...@@ -492,7 +492,7 @@ static void *perf_event_fd_array_get_ptr(struct bpf_map *map, ...@@ -492,7 +492,7 @@ static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
ee = ERR_PTR(-EOPNOTSUPP); ee = ERR_PTR(-EOPNOTSUPP);
event = perf_file->private_data; event = perf_file->private_data;
if (perf_event_read_local(event, &value) == -EOPNOTSUPP) if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP)
goto err_out; goto err_out;
ee = bpf_event_entry_gen(perf_file, map_file); ee = bpf_event_entry_gen(perf_file, map_file);
......
...@@ -3684,10 +3684,12 @@ static inline u64 perf_event_count(struct perf_event *event) ...@@ -3684,10 +3684,12 @@ static inline u64 perf_event_count(struct perf_event *event)
* will not be local and we cannot read them atomically * will not be local and we cannot read them atomically
* - must not have a pmu::count method * - must not have a pmu::count method
*/ */
int perf_event_read_local(struct perf_event *event, u64 *value) int perf_event_read_local(struct perf_event *event, u64 *value,
u64 *enabled, u64 *running)
{ {
unsigned long flags; unsigned long flags;
int ret = 0; int ret = 0;
u64 now;
/* /*
* Disabling interrupts avoids all counter scheduling (context * Disabling interrupts avoids all counter scheduling (context
...@@ -3718,13 +3720,21 @@ int perf_event_read_local(struct perf_event *event, u64 *value) ...@@ -3718,13 +3720,21 @@ int perf_event_read_local(struct perf_event *event, u64 *value)
goto out; goto out;
} }
now = event->shadow_ctx_time + perf_clock();
if (enabled)
*enabled = now - event->tstamp_enabled;
/* /*
* If the event is currently on this CPU, its either a per-task event, * If the event is currently on this CPU, its either a per-task event,
* or local to this CPU. Furthermore it means its ACTIVE (otherwise * or local to this CPU. Furthermore it means its ACTIVE (otherwise
* oncpu == -1). * oncpu == -1).
*/ */
if (event->oncpu == smp_processor_id()) if (event->oncpu == smp_processor_id()) {
event->pmu->read(event); event->pmu->read(event);
if (running)
*running = now - event->tstamp_running;
} else if (running) {
*running = event->total_time_running;
}
*value = local64_read(&event->count); *value = local64_read(&event->count);
out: out:
...@@ -8072,6 +8082,7 @@ static void bpf_overflow_handler(struct perf_event *event, ...@@ -8072,6 +8082,7 @@ static void bpf_overflow_handler(struct perf_event *event,
struct bpf_perf_event_data_kern ctx = { struct bpf_perf_event_data_kern ctx = {
.data = data, .data = data,
.regs = regs, .regs = regs,
.event = event,
}; };
int ret = 0; int ret = 0;
......
...@@ -275,7 +275,7 @@ BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags) ...@@ -275,7 +275,7 @@ BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
if (!ee) if (!ee)
return -ENOENT; return -ENOENT;
err = perf_event_read_local(ee->event, &value); err = perf_event_read_local(ee->event, &value, NULL, NULL);
/* /*
* this api is ugly since we miss [-22..-2] range of valid * this api is ugly since we miss [-22..-2] range of valid
* counter values, but that's uapi * counter values, but that's uapi
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册