提交 f91840a3 编写于 作者: A Alexei Starovoitov 提交者: David S. Miller

perf, bpf: Add BPF support to all perf_event types

Allow BPF_PROG_TYPE_PERF_EVENT program types to attach to all
perf_event types, including HW_CACHE, RAW, and dynamic pmu events.
Only tracepoint/kprobe events are treated differently which require
BPF_PROG_TYPE_TRACEPOINT/BPF_PROG_TYPE_KPROBE program types accordingly.

Also add support for reading all event counters using
bpf_perf_event_read() helper.
Signed-off-by: NAlexei Starovoitov <ast@kernel.org>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 5071034e
...@@ -896,7 +896,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, ...@@ -896,7 +896,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr,
void *context); void *context);
extern void perf_pmu_migrate_context(struct pmu *pmu, extern void perf_pmu_migrate_context(struct pmu *pmu,
int src_cpu, int dst_cpu); int src_cpu, int dst_cpu);
extern u64 perf_event_read_local(struct perf_event *event); int perf_event_read_local(struct perf_event *event, u64 *value);
extern u64 perf_event_read_value(struct perf_event *event, extern u64 perf_event_read_value(struct perf_event *event,
u64 *enabled, u64 *running); u64 *enabled, u64 *running);
...@@ -1301,7 +1301,10 @@ static inline const struct perf_event_attr *perf_event_attrs(struct perf_event * ...@@ -1301,7 +1301,10 @@ static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *
{ {
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
static inline u64 perf_event_read_local(struct perf_event *event) { return -EINVAL; } static inline int perf_event_read_local(struct perf_event *event, u64 *value)
{
return -EINVAL;
}
static inline void perf_event_print_debug(void) { } static inline void perf_event_print_debug(void) { }
static inline int perf_event_task_disable(void) { return -EINVAL; } static inline int perf_event_task_disable(void) { return -EINVAL; }
static inline int perf_event_task_enable(void) { return -EINVAL; } static inline int perf_event_task_enable(void) { return -EINVAL; }
......
...@@ -452,38 +452,24 @@ static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee) ...@@ -452,38 +452,24 @@ static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
static void *perf_event_fd_array_get_ptr(struct bpf_map *map, static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
struct file *map_file, int fd) struct file *map_file, int fd)
{ {
const struct perf_event_attr *attr;
struct bpf_event_entry *ee; struct bpf_event_entry *ee;
struct perf_event *event; struct perf_event *event;
struct file *perf_file; struct file *perf_file;
u64 value;
perf_file = perf_event_get(fd); perf_file = perf_event_get(fd);
if (IS_ERR(perf_file)) if (IS_ERR(perf_file))
return perf_file; return perf_file;
ee = ERR_PTR(-EOPNOTSUPP);
event = perf_file->private_data; event = perf_file->private_data;
ee = ERR_PTR(-EINVAL); if (perf_event_read_local(event, &value) == -EOPNOTSUPP)
attr = perf_event_attrs(event);
if (IS_ERR(attr) || attr->inherit)
goto err_out; goto err_out;
switch (attr->type) {
case PERF_TYPE_SOFTWARE:
if (attr->config != PERF_COUNT_SW_BPF_OUTPUT)
goto err_out;
/* fall-through */
case PERF_TYPE_RAW:
case PERF_TYPE_HARDWARE:
ee = bpf_event_entry_gen(perf_file, map_file); ee = bpf_event_entry_gen(perf_file, map_file);
if (ee) if (ee)
return ee; return ee;
ee = ERR_PTR(-ENOMEM); ee = ERR_PTR(-ENOMEM);
/* fall-through */
default:
break;
}
err_out: err_out:
fput(perf_file); fput(perf_file);
return ee; return ee;
......
...@@ -3636,10 +3636,10 @@ static inline u64 perf_event_count(struct perf_event *event) ...@@ -3636,10 +3636,10 @@ static inline u64 perf_event_count(struct perf_event *event)
* will not be local and we cannot read them atomically * will not be local and we cannot read them atomically
* - must not have a pmu::count method * - must not have a pmu::count method
*/ */
u64 perf_event_read_local(struct perf_event *event) int perf_event_read_local(struct perf_event *event, u64 *value)
{ {
unsigned long flags; unsigned long flags;
u64 val; int ret = 0;
/* /*
* Disabling interrupts avoids all counter scheduling (context * Disabling interrupts avoids all counter scheduling (context
...@@ -3647,25 +3647,37 @@ u64 perf_event_read_local(struct perf_event *event) ...@@ -3647,25 +3647,37 @@ u64 perf_event_read_local(struct perf_event *event)
*/ */
local_irq_save(flags); local_irq_save(flags);
/* If this is a per-task event, it must be for current */
WARN_ON_ONCE((event->attach_state & PERF_ATTACH_TASK) &&
event->hw.target != current);
/* If this is a per-CPU event, it must be for this CPU */
WARN_ON_ONCE(!(event->attach_state & PERF_ATTACH_TASK) &&
event->cpu != smp_processor_id());
/* /*
* It must not be an event with inherit set, we cannot read * It must not be an event with inherit set, we cannot read
* all child counters from atomic context. * all child counters from atomic context.
*/ */
WARN_ON_ONCE(event->attr.inherit); if (event->attr.inherit) {
ret = -EOPNOTSUPP;
goto out;
}
/* /*
* It must not have a pmu::count method, those are not * It must not have a pmu::count method, those are not
* NMI safe. * NMI safe.
*/ */
WARN_ON_ONCE(event->pmu->count); if (event->pmu->count) {
ret = -EOPNOTSUPP;
goto out;
}
/* If this is a per-task event, it must be for current */
if ((event->attach_state & PERF_ATTACH_TASK) &&
event->hw.target != current) {
ret = -EINVAL;
goto out;
}
/* If this is a per-CPU event, it must be for this CPU */
if (!(event->attach_state & PERF_ATTACH_TASK) &&
event->cpu != smp_processor_id()) {
ret = -EINVAL;
goto out;
}
/* /*
* If the event is currently on this CPU, its either a per-task event, * If the event is currently on this CPU, its either a per-task event,
...@@ -3675,10 +3687,11 @@ u64 perf_event_read_local(struct perf_event *event) ...@@ -3675,10 +3687,11 @@ u64 perf_event_read_local(struct perf_event *event)
if (event->oncpu == smp_processor_id()) if (event->oncpu == smp_processor_id())
event->pmu->read(event); event->pmu->read(event);
val = local64_read(&event->count); *value = local64_read(&event->count);
out:
local_irq_restore(flags); local_irq_restore(flags);
return val; return ret;
} }
static int perf_event_read(struct perf_event *event, bool group) static int perf_event_read(struct perf_event *event, bool group)
...@@ -8037,12 +8050,8 @@ static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd) ...@@ -8037,12 +8050,8 @@ static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
bool is_kprobe, is_tracepoint; bool is_kprobe, is_tracepoint;
struct bpf_prog *prog; struct bpf_prog *prog;
if (event->attr.type == PERF_TYPE_HARDWARE ||
event->attr.type == PERF_TYPE_SOFTWARE)
return perf_event_set_bpf_handler(event, prog_fd);
if (event->attr.type != PERF_TYPE_TRACEPOINT) if (event->attr.type != PERF_TYPE_TRACEPOINT)
return -EINVAL; return perf_event_set_bpf_handler(event, prog_fd);
if (event->tp_event->prog) if (event->tp_event->prog)
return -EEXIST; return -EEXIST;
......
...@@ -234,7 +234,8 @@ BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags) ...@@ -234,7 +234,8 @@ BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
u64 index = flags & BPF_F_INDEX_MASK; u64 index = flags & BPF_F_INDEX_MASK;
struct bpf_event_entry *ee; struct bpf_event_entry *ee;
struct perf_event *event; u64 value = 0;
int err;
if (unlikely(flags & ~(BPF_F_INDEX_MASK))) if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
return -EINVAL; return -EINVAL;
...@@ -247,21 +248,14 @@ BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags) ...@@ -247,21 +248,14 @@ BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
if (!ee) if (!ee)
return -ENOENT; return -ENOENT;
event = ee->event; err = perf_event_read_local(ee->event, &value);
if (unlikely(event->attr.type != PERF_TYPE_HARDWARE &&
event->attr.type != PERF_TYPE_RAW))
return -EINVAL;
/* make sure event is local and doesn't have pmu::count */
if (unlikely(event->oncpu != cpu || event->pmu->count))
return -EINVAL;
/* /*
* we don't know if the function is run successfully by the * this api is ugly since we miss [-22..-2] range of valid
* return value. It can be judged in other places, such as * counter values, but that's uapi
* eBPF programs.
*/ */
return perf_event_read_local(event); if (err)
return err;
return value;
} }
static const struct bpf_func_proto bpf_perf_event_read_proto = { static const struct bpf_func_proto bpf_perf_event_read_proto = {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册