提交 c079c791 编写于 作者: P Peter Zijlstra 提交者: Ingo Molnar

perf, amd: Remove the nb lock

Since all the hotplug stuff is serialized by the hotplug mutex,
do away with the amd_nb_lock.

Cc: Stephane Eranian <eranian@google.com>
Signed-off-by: NPeter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 b38aa896
#ifdef CONFIG_CPU_SUP_AMD #ifdef CONFIG_CPU_SUP_AMD
static DEFINE_RAW_SPINLOCK(amd_nb_lock);
static __initconst const u64 amd_hw_cache_event_ids static __initconst const u64 amd_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_OP_MAX]
...@@ -275,7 +273,7 @@ amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) ...@@ -275,7 +273,7 @@ amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
return &emptyconstraint; return &emptyconstraint;
} }
static struct amd_nb *amd_alloc_nb(int cpu, int nb_id) static struct amd_nb *amd_alloc_nb(int cpu)
{ {
struct amd_nb *nb; struct amd_nb *nb;
int i; int i;
...@@ -285,7 +283,7 @@ static struct amd_nb *amd_alloc_nb(int cpu, int nb_id) ...@@ -285,7 +283,7 @@ static struct amd_nb *amd_alloc_nb(int cpu, int nb_id)
if (!nb) if (!nb)
return NULL; return NULL;
nb->nb_id = nb_id; nb->nb_id = -1;
/* /*
* initialize all possible NB constraints * initialize all possible NB constraints
...@@ -306,7 +304,7 @@ static int amd_pmu_cpu_prepare(int cpu) ...@@ -306,7 +304,7 @@ static int amd_pmu_cpu_prepare(int cpu)
if (boot_cpu_data.x86_max_cores < 2) if (boot_cpu_data.x86_max_cores < 2)
return NOTIFY_OK; return NOTIFY_OK;
cpuc->amd_nb = amd_alloc_nb(cpu, -1); cpuc->amd_nb = amd_alloc_nb(cpu);
if (!cpuc->amd_nb) if (!cpuc->amd_nb)
return NOTIFY_BAD; return NOTIFY_BAD;
...@@ -325,8 +323,6 @@ static void amd_pmu_cpu_starting(int cpu) ...@@ -325,8 +323,6 @@ static void amd_pmu_cpu_starting(int cpu)
nb_id = amd_get_nb_id(cpu); nb_id = amd_get_nb_id(cpu);
WARN_ON_ONCE(nb_id == BAD_APICID); WARN_ON_ONCE(nb_id == BAD_APICID);
raw_spin_lock(&amd_nb_lock);
for_each_online_cpu(i) { for_each_online_cpu(i) {
nb = per_cpu(cpu_hw_events, i).amd_nb; nb = per_cpu(cpu_hw_events, i).amd_nb;
if (WARN_ON_ONCE(!nb)) if (WARN_ON_ONCE(!nb))
...@@ -341,8 +337,6 @@ static void amd_pmu_cpu_starting(int cpu) ...@@ -341,8 +337,6 @@ static void amd_pmu_cpu_starting(int cpu)
cpuc->amd_nb->nb_id = nb_id; cpuc->amd_nb->nb_id = nb_id;
cpuc->amd_nb->refcnt++; cpuc->amd_nb->refcnt++;
raw_spin_unlock(&amd_nb_lock);
} }
static void amd_pmu_cpu_dead(int cpu) static void amd_pmu_cpu_dead(int cpu)
...@@ -354,8 +348,6 @@ static void amd_pmu_cpu_dead(int cpu) ...@@ -354,8 +348,6 @@ static void amd_pmu_cpu_dead(int cpu)
cpuhw = &per_cpu(cpu_hw_events, cpu); cpuhw = &per_cpu(cpu_hw_events, cpu);
raw_spin_lock(&amd_nb_lock);
if (cpuhw->amd_nb) { if (cpuhw->amd_nb) {
struct amd_nb *nb = cpuhw->amd_nb; struct amd_nb *nb = cpuhw->amd_nb;
...@@ -364,8 +356,6 @@ static void amd_pmu_cpu_dead(int cpu) ...@@ -364,8 +356,6 @@ static void amd_pmu_cpu_dead(int cpu)
cpuhw->amd_nb = NULL; cpuhw->amd_nb = NULL;
} }
raw_spin_unlock(&amd_nb_lock);
} }
static __initconst const struct x86_pmu amd_pmu = { static __initconst const struct x86_pmu amd_pmu = {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册