提交 8bc20959 编写于 作者: P Peter Zijlstra 提交者: Ingo Molnar

perf_counter: Fix inheritance cleanup code

Clean up code that open-coded the list_{add,del}_counter() code in
__perf_counter_exit_task() which consequently diverged. This could
lead to software counter crashes.

Also, fold the ctx->nr_counter inc/dec into those functions and clean
up some of the related code.

[ Impact: fix potential sw counter crash, cleanup ]
Signed-off-by: NPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Srivatsa Vaddagiri <vatsa@in.ibm.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 0bbd0d4b
...@@ -115,6 +115,7 @@ list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx) ...@@ -115,6 +115,7 @@ list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
} }
list_add_rcu(&counter->event_entry, &ctx->event_list); list_add_rcu(&counter->event_entry, &ctx->event_list);
ctx->nr_counters++;
} }
static void static void
...@@ -122,6 +123,8 @@ list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx) ...@@ -122,6 +123,8 @@ list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
{ {
struct perf_counter *sibling, *tmp; struct perf_counter *sibling, *tmp;
ctx->nr_counters--;
list_del_init(&counter->list_entry); list_del_init(&counter->list_entry);
list_del_rcu(&counter->event_entry); list_del_rcu(&counter->event_entry);
...@@ -209,7 +212,6 @@ static void __perf_counter_remove_from_context(void *info) ...@@ -209,7 +212,6 @@ static void __perf_counter_remove_from_context(void *info)
counter_sched_out(counter, cpuctx, ctx); counter_sched_out(counter, cpuctx, ctx);
counter->task = NULL; counter->task = NULL;
ctx->nr_counters--;
/* /*
* Protect the list operation against NMI by disabling the * Protect the list operation against NMI by disabling the
...@@ -276,7 +278,6 @@ static void perf_counter_remove_from_context(struct perf_counter *counter) ...@@ -276,7 +278,6 @@ static void perf_counter_remove_from_context(struct perf_counter *counter)
* succeed. * succeed.
*/ */
if (!list_empty(&counter->list_entry)) { if (!list_empty(&counter->list_entry)) {
ctx->nr_counters--;
list_del_counter(counter, ctx); list_del_counter(counter, ctx);
counter->task = NULL; counter->task = NULL;
} }
...@@ -544,7 +545,6 @@ static void add_counter_to_ctx(struct perf_counter *counter, ...@@ -544,7 +545,6 @@ static void add_counter_to_ctx(struct perf_counter *counter,
struct perf_counter_context *ctx) struct perf_counter_context *ctx)
{ {
list_add_counter(counter, ctx); list_add_counter(counter, ctx);
ctx->nr_counters++;
counter->prev_state = PERF_COUNTER_STATE_OFF; counter->prev_state = PERF_COUNTER_STATE_OFF;
counter->tstamp_enabled = ctx->time; counter->tstamp_enabled = ctx->time;
counter->tstamp_running = ctx->time; counter->tstamp_running = ctx->time;
...@@ -3206,9 +3206,8 @@ static int inherit_group(struct perf_counter *parent_counter, ...@@ -3206,9 +3206,8 @@ static int inherit_group(struct perf_counter *parent_counter,
static void sync_child_counter(struct perf_counter *child_counter, static void sync_child_counter(struct perf_counter *child_counter,
struct perf_counter *parent_counter) struct perf_counter *parent_counter)
{ {
u64 parent_val, child_val; u64 child_val;
parent_val = atomic64_read(&parent_counter->count);
child_val = atomic64_read(&child_counter->count); child_val = atomic64_read(&child_counter->count);
/* /*
...@@ -3240,7 +3239,6 @@ __perf_counter_exit_task(struct task_struct *child, ...@@ -3240,7 +3239,6 @@ __perf_counter_exit_task(struct task_struct *child,
struct perf_counter_context *child_ctx) struct perf_counter_context *child_ctx)
{ {
struct perf_counter *parent_counter; struct perf_counter *parent_counter;
struct perf_counter *sub, *tmp;
/* /*
* If we do not self-reap then we have to wait for the * If we do not self-reap then we have to wait for the
...@@ -3252,8 +3250,8 @@ __perf_counter_exit_task(struct task_struct *child, ...@@ -3252,8 +3250,8 @@ __perf_counter_exit_task(struct task_struct *child,
*/ */
if (child != current) { if (child != current) {
wait_task_inactive(child, 0); wait_task_inactive(child, 0);
list_del_init(&child_counter->list_entry);
update_counter_times(child_counter); update_counter_times(child_counter);
list_del_counter(child_counter, child_ctx);
} else { } else {
struct perf_cpu_context *cpuctx; struct perf_cpu_context *cpuctx;
unsigned long flags; unsigned long flags;
...@@ -3272,9 +3270,7 @@ __perf_counter_exit_task(struct task_struct *child, ...@@ -3272,9 +3270,7 @@ __perf_counter_exit_task(struct task_struct *child,
group_sched_out(child_counter, cpuctx, child_ctx); group_sched_out(child_counter, cpuctx, child_ctx);
update_counter_times(child_counter); update_counter_times(child_counter);
list_del_init(&child_counter->list_entry); list_del_counter(child_counter, child_ctx);
child_ctx->nr_counters--;
perf_enable(); perf_enable();
local_irq_restore(flags); local_irq_restore(flags);
...@@ -3288,13 +3284,6 @@ __perf_counter_exit_task(struct task_struct *child, ...@@ -3288,13 +3284,6 @@ __perf_counter_exit_task(struct task_struct *child,
*/ */
if (parent_counter) { if (parent_counter) {
sync_child_counter(child_counter, parent_counter); sync_child_counter(child_counter, parent_counter);
list_for_each_entry_safe(sub, tmp, &child_counter->sibling_list,
list_entry) {
if (sub->parent) {
sync_child_counter(sub, sub->parent);
free_counter(sub);
}
}
free_counter(child_counter); free_counter(child_counter);
} }
} }
...@@ -3315,9 +3304,18 @@ void perf_counter_exit_task(struct task_struct *child) ...@@ -3315,9 +3304,18 @@ void perf_counter_exit_task(struct task_struct *child)
if (likely(!child_ctx->nr_counters)) if (likely(!child_ctx->nr_counters))
return; return;
again:
list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list, list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
list_entry) list_entry)
__perf_counter_exit_task(child, child_counter, child_ctx); __perf_counter_exit_task(child, child_counter, child_ctx);
/*
* If the last counter was a group counter, it will have appended all
* its siblings to the list, but we obtained 'tmp' before that which
* will still point to the list head terminating the iteration.
*/
if (!list_empty(&child_ctx->counter_list))
goto again;
} }
/* /*
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册