提交 4beb31f3 编写于 作者: F Frederic Weisbecker 提交者: Ingo Molnar

perf: Split the per-cpu accounting part of the event accounting code

This way we can use the per-cpu handling seperately.
This is going to be used by to fix the event migration
code accounting.
Original-patch-by: NPeter Zijlstra <peterz@infradead.org>
Signed-off-by: NFrederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Stephane Eranian <eranian@google.com>
Signed-off-by: NPeter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1374539466-4799-5-git-send-email-fweisbec@gmail.comSigned-off-by: NIngo Molnar <mingo@kernel.org>
上级 766d6c07
......@@ -3128,6 +3128,40 @@ static void free_event_rcu(struct rcu_head *head)
static void ring_buffer_put(struct ring_buffer *rb);
static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb);
static void unaccount_event_cpu(struct perf_event *event, int cpu)
{
if (event->parent)
return;
if (has_branch_stack(event)) {
if (!(event->attach_state & PERF_ATTACH_TASK))
atomic_dec(&per_cpu(perf_branch_stack_events, cpu));
}
if (is_cgroup_event(event))
atomic_dec(&per_cpu(perf_cgroup_events, cpu));
}
static void unaccount_event(struct perf_event *event)
{
if (event->parent)
return;
if (event->attach_state & PERF_ATTACH_TASK)
static_key_slow_dec_deferred(&perf_sched_events);
if (event->attr.mmap || event->attr.mmap_data)
atomic_dec(&nr_mmap_events);
if (event->attr.comm)
atomic_dec(&nr_comm_events);
if (event->attr.task)
atomic_dec(&nr_task_events);
if (is_cgroup_event(event))
static_key_slow_dec_deferred(&perf_sched_events);
if (has_branch_stack(event))
static_key_slow_dec_deferred(&perf_sched_events);
unaccount_event_cpu(event, event->cpu);
}
static void __free_event(struct perf_event *event)
{
if (!event->parent) {
......@@ -3147,29 +3181,7 @@ static void free_event(struct perf_event *event)
{
irq_work_sync(&event->pending);
if (!event->parent) {
if (event->attach_state & PERF_ATTACH_TASK)
static_key_slow_dec_deferred(&perf_sched_events);
if (event->attr.mmap || event->attr.mmap_data)
atomic_dec(&nr_mmap_events);
if (event->attr.comm)
atomic_dec(&nr_comm_events);
if (event->attr.task)
atomic_dec(&nr_task_events);
if (is_cgroup_event(event)) {
atomic_dec(&per_cpu(perf_cgroup_events, event->cpu));
static_key_slow_dec_deferred(&perf_sched_events);
}
if (has_branch_stack(event)) {
static_key_slow_dec_deferred(&perf_sched_events);
/* is system-wide event */
if (!(event->attach_state & PERF_ATTACH_TASK)) {
atomic_dec(&per_cpu(perf_branch_stack_events,
event->cpu));
}
}
}
unaccount_event(event);
if (event->rb) {
struct ring_buffer *rb;
......@@ -6451,8 +6463,24 @@ struct pmu *perf_init_event(struct perf_event *event)
return pmu;
}
static void account_event_cpu(struct perf_event *event, int cpu)
{
if (event->parent)
return;
if (has_branch_stack(event)) {
if (!(event->attach_state & PERF_ATTACH_TASK))
atomic_inc(&per_cpu(perf_branch_stack_events, cpu));
}
if (is_cgroup_event(event))
atomic_inc(&per_cpu(perf_cgroup_events, cpu));
}
static void account_event(struct perf_event *event)
{
if (event->parent)
return;
if (event->attach_state & PERF_ATTACH_TASK)
static_key_slow_inc(&perf_sched_events.key);
if (event->attr.mmap || event->attr.mmap_data)
......@@ -6461,17 +6489,12 @@ static void account_event(struct perf_event *event)
atomic_inc(&nr_comm_events);
if (event->attr.task)
atomic_inc(&nr_task_events);
if (has_branch_stack(event)) {
if (has_branch_stack(event))
static_key_slow_inc(&perf_sched_events.key);
if (!(event->attach_state & PERF_ATTACH_TASK))
atomic_inc(&per_cpu(perf_branch_stack_events,
event->cpu));
}
if (is_cgroup_event(event)) {
atomic_inc(&per_cpu(perf_cgroup_events, event->cpu));
if (is_cgroup_event(event))
static_key_slow_inc(&perf_sched_events.key);
}
account_event_cpu(event, event->cpu);
}
/*
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册