提交 766d6c07 编写于 作者: F Frederic Weisbecker 提交者: Ingo Molnar

perf: Factor out event accounting code to account_event()/__free_event()

Gather all the event accounting code to a single place,
once all the prerequisites are completed. This simplifies
the refcounting.
Original-patch-by: NPeter Zijlstra <peterz@infradead.org>
Signed-off-by: NFrederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Stephane Eranian <eranian@google.com>
Signed-off-by: NPeter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1374539466-4799-4-git-send-email-fweisbec@gmail.comSigned-off-by: NIngo Molnar <mingo@kernel.org>
上级 90983b16
...@@ -3128,6 +3128,21 @@ static void free_event_rcu(struct rcu_head *head) ...@@ -3128,6 +3128,21 @@ static void free_event_rcu(struct rcu_head *head)
static void ring_buffer_put(struct ring_buffer *rb); static void ring_buffer_put(struct ring_buffer *rb);
static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb); static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb);
static void __free_event(struct perf_event *event)
{
if (!event->parent) {
if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
put_callchain_buffers();
}
if (event->destroy)
event->destroy(event);
if (event->ctx)
put_ctx(event->ctx);
call_rcu(&event->rcu_head, free_event_rcu);
}
static void free_event(struct perf_event *event) static void free_event(struct perf_event *event)
{ {
irq_work_sync(&event->pending); irq_work_sync(&event->pending);
...@@ -3141,8 +3156,6 @@ static void free_event(struct perf_event *event) ...@@ -3141,8 +3156,6 @@ static void free_event(struct perf_event *event)
atomic_dec(&nr_comm_events); atomic_dec(&nr_comm_events);
if (event->attr.task) if (event->attr.task)
atomic_dec(&nr_task_events); atomic_dec(&nr_task_events);
if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
put_callchain_buffers();
if (is_cgroup_event(event)) { if (is_cgroup_event(event)) {
atomic_dec(&per_cpu(perf_cgroup_events, event->cpu)); atomic_dec(&per_cpu(perf_cgroup_events, event->cpu));
static_key_slow_dec_deferred(&perf_sched_events); static_key_slow_dec_deferred(&perf_sched_events);
...@@ -3180,13 +3193,8 @@ static void free_event(struct perf_event *event) ...@@ -3180,13 +3193,8 @@ static void free_event(struct perf_event *event)
if (is_cgroup_event(event)) if (is_cgroup_event(event))
perf_detach_cgroup(event); perf_detach_cgroup(event);
if (event->destroy)
event->destroy(event);
if (event->ctx)
put_ctx(event->ctx);
call_rcu(&event->rcu_head, free_event_rcu); __free_event(event);
} }
int perf_event_release_kernel(struct perf_event *event) int perf_event_release_kernel(struct perf_event *event)
...@@ -6443,6 +6451,29 @@ struct pmu *perf_init_event(struct perf_event *event) ...@@ -6443,6 +6451,29 @@ struct pmu *perf_init_event(struct perf_event *event)
return pmu; return pmu;
} }
static void account_event(struct perf_event *event)
{
if (event->attach_state & PERF_ATTACH_TASK)
static_key_slow_inc(&perf_sched_events.key);
if (event->attr.mmap || event->attr.mmap_data)
atomic_inc(&nr_mmap_events);
if (event->attr.comm)
atomic_inc(&nr_comm_events);
if (event->attr.task)
atomic_inc(&nr_task_events);
if (has_branch_stack(event)) {
static_key_slow_inc(&perf_sched_events.key);
if (!(event->attach_state & PERF_ATTACH_TASK))
atomic_inc(&per_cpu(perf_branch_stack_events,
event->cpu));
}
if (is_cgroup_event(event)) {
atomic_inc(&per_cpu(perf_cgroup_events, event->cpu));
static_key_slow_inc(&perf_sched_events.key);
}
}
/* /*
* Allocate and initialize a event structure * Allocate and initialize a event structure
*/ */
...@@ -6556,21 +6587,6 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, ...@@ -6556,21 +6587,6 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
if (err) if (err)
goto err_pmu; goto err_pmu;
} }
if (event->attach_state & PERF_ATTACH_TASK)
static_key_slow_inc(&perf_sched_events.key);
if (event->attr.mmap || event->attr.mmap_data)
atomic_inc(&nr_mmap_events);
if (event->attr.comm)
atomic_inc(&nr_comm_events);
if (event->attr.task)
atomic_inc(&nr_task_events);
if (has_branch_stack(event)) {
static_key_slow_inc(&perf_sched_events.key);
if (!(event->attach_state & PERF_ATTACH_TASK))
atomic_inc(&per_cpu(perf_branch_stack_events,
event->cpu));
}
} }
return event; return event;
...@@ -6865,17 +6881,14 @@ SYSCALL_DEFINE5(perf_event_open, ...@@ -6865,17 +6881,14 @@ SYSCALL_DEFINE5(perf_event_open,
if (flags & PERF_FLAG_PID_CGROUP) { if (flags & PERF_FLAG_PID_CGROUP) {
err = perf_cgroup_connect(pid, event, &attr, group_leader); err = perf_cgroup_connect(pid, event, &attr, group_leader);
if (err) if (err) {
goto err_alloc; __free_event(event);
/* goto err_task;
* one more event: }
* - that has cgroup constraint on event->cpu
* - that may need work on context switch
*/
atomic_inc(&per_cpu(perf_cgroup_events, event->cpu));
static_key_slow_inc(&perf_sched_events.key);
} }
account_event(event);
/* /*
* Special case software events and allow them to be part of * Special case software events and allow them to be part of
* any hardware group. * any hardware group.
...@@ -7071,6 +7084,8 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, ...@@ -7071,6 +7084,8 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
goto err; goto err;
} }
account_event(event);
ctx = find_get_context(event->pmu, task, cpu); ctx = find_get_context(event->pmu, task, cpu);
if (IS_ERR(ctx)) { if (IS_ERR(ctx)) {
err = PTR_ERR(ctx); err = PTR_ERR(ctx);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册