提交 cde8e884 编写于 作者: P Peter Zijlstra 提交者: Ingo Molnar

perf: Sanitize the RCU logic

Simplify things and simply synchronize against two RCU variants for
PMU unregister -- we don't care about performance, its module unload
if anything.
Reported-by: NFrederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: NPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
LKML-Reference: <new-submission>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 b0b2072d
...@@ -3810,7 +3810,7 @@ static void perf_event_task_event(struct perf_task_event *task_event) ...@@ -3810,7 +3810,7 @@ static void perf_event_task_event(struct perf_task_event *task_event)
struct pmu *pmu; struct pmu *pmu;
int ctxn; int ctxn;
rcu_read_lock_sched(); rcu_read_lock();
list_for_each_entry_rcu(pmu, &pmus, entry) { list_for_each_entry_rcu(pmu, &pmus, entry) {
cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
perf_event_task_ctx(&cpuctx->ctx, task_event); perf_event_task_ctx(&cpuctx->ctx, task_event);
...@@ -3825,7 +3825,7 @@ static void perf_event_task_event(struct perf_task_event *task_event) ...@@ -3825,7 +3825,7 @@ static void perf_event_task_event(struct perf_task_event *task_event)
if (ctx) if (ctx)
perf_event_task_ctx(ctx, task_event); perf_event_task_ctx(ctx, task_event);
} }
rcu_read_unlock_sched(); rcu_read_unlock();
} }
static void perf_event_task(struct task_struct *task, static void perf_event_task(struct task_struct *task,
...@@ -3943,7 +3943,7 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event) ...@@ -3943,7 +3943,7 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
comm_event->event_id.header.size = sizeof(comm_event->event_id) + size; comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
rcu_read_lock_sched(); rcu_read_lock();
list_for_each_entry_rcu(pmu, &pmus, entry) { list_for_each_entry_rcu(pmu, &pmus, entry) {
cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
perf_event_comm_ctx(&cpuctx->ctx, comm_event); perf_event_comm_ctx(&cpuctx->ctx, comm_event);
...@@ -3956,7 +3956,7 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event) ...@@ -3956,7 +3956,7 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
if (ctx) if (ctx)
perf_event_comm_ctx(ctx, comm_event); perf_event_comm_ctx(ctx, comm_event);
} }
rcu_read_unlock_sched(); rcu_read_unlock();
} }
void perf_event_comm(struct task_struct *task) void perf_event_comm(struct task_struct *task)
...@@ -4126,7 +4126,7 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) ...@@ -4126,7 +4126,7 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size; mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
rcu_read_lock_sched(); rcu_read_lock();
list_for_each_entry_rcu(pmu, &pmus, entry) { list_for_each_entry_rcu(pmu, &pmus, entry) {
cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
perf_event_mmap_ctx(&cpuctx->ctx, mmap_event, perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
...@@ -4142,7 +4142,7 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) ...@@ -4142,7 +4142,7 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
vma->vm_flags & VM_EXEC); vma->vm_flags & VM_EXEC);
} }
} }
rcu_read_unlock_sched(); rcu_read_unlock();
kfree(buf); kfree(buf);
} }
...@@ -5218,10 +5218,11 @@ void perf_pmu_unregister(struct pmu *pmu) ...@@ -5218,10 +5218,11 @@ void perf_pmu_unregister(struct pmu *pmu)
mutex_unlock(&pmus_lock); mutex_unlock(&pmus_lock);
/* /*
* We use the pmu list either under SRCU or preempt_disable, * We dereference the pmu list under both SRCU and regular RCU, so
* synchronize_srcu() implies synchronize_sched() so we're good. * synchronize against both of those.
*/ */
synchronize_srcu(&pmus_srcu); synchronize_srcu(&pmus_srcu);
synchronize_rcu();
free_percpu(pmu->pmu_disable_count); free_percpu(pmu->pmu_disable_count);
free_pmu_context(pmu->pmu_cpu_context); free_pmu_context(pmu->pmu_cpu_context);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册