提交 b38b24ea 编写于 作者: P Peter Zijlstra 提交者: Ingo Molnar

perf, x86: Fix AMD hotplug & constraint initialization

Commit 3f6da390 ("perf: Rework and fix the arch CPU-hotplug hooks") moved
the amd northbridge allocation from CPUS_ONLINE to CPUS_PREPARE_UP
however amd_nb_id() doesn't work yet on prepare so it would simply bail
basically reverting to a state where we do not properly track node wide
constraints - causing weird perf results.

Fix up the AMD NorthBridge initialization code by allocating from
CPU_UP_PREPARE and installing it from CPU_STARTING once we have the
proper nb_id. It also properly deals with the allocation failing.
Signed-off-by: NPeter Zijlstra <a.p.zijlstra@chello.nl>
[ robustify using amd_has_nb() ]
Signed-off-by: NStephane Eranian <eranian@google.com>
LKML-Reference: <1269353485.5109.48.camel@twins>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 85257024
...@@ -158,7 +158,7 @@ struct x86_pmu { ...@@ -158,7 +158,7 @@ struct x86_pmu {
struct perf_event *event); struct perf_event *event);
struct event_constraint *event_constraints; struct event_constraint *event_constraints;
void (*cpu_prepare)(int cpu); int (*cpu_prepare)(int cpu);
void (*cpu_starting)(int cpu); void (*cpu_starting)(int cpu);
void (*cpu_dying)(int cpu); void (*cpu_dying)(int cpu);
void (*cpu_dead)(int cpu); void (*cpu_dead)(int cpu);
...@@ -1333,11 +1333,12 @@ static int __cpuinit ...@@ -1333,11 +1333,12 @@ static int __cpuinit
x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
{ {
unsigned int cpu = (long)hcpu; unsigned int cpu = (long)hcpu;
int ret = NOTIFY_OK;
switch (action & ~CPU_TASKS_FROZEN) { switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE: case CPU_UP_PREPARE:
if (x86_pmu.cpu_prepare) if (x86_pmu.cpu_prepare)
x86_pmu.cpu_prepare(cpu); ret = x86_pmu.cpu_prepare(cpu);
break; break;
case CPU_STARTING: case CPU_STARTING:
...@@ -1350,6 +1351,7 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) ...@@ -1350,6 +1351,7 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
x86_pmu.cpu_dying(cpu); x86_pmu.cpu_dying(cpu);
break; break;
case CPU_UP_CANCELED:
case CPU_DEAD: case CPU_DEAD:
if (x86_pmu.cpu_dead) if (x86_pmu.cpu_dead)
x86_pmu.cpu_dead(cpu); x86_pmu.cpu_dead(cpu);
...@@ -1359,7 +1361,7 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) ...@@ -1359,7 +1361,7 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
break; break;
} }
return NOTIFY_OK; return ret;
} }
static void __init pmu_check_apic(void) static void __init pmu_check_apic(void)
......
...@@ -137,6 +137,13 @@ static inline int amd_is_nb_event(struct hw_perf_event *hwc) ...@@ -137,6 +137,13 @@ static inline int amd_is_nb_event(struct hw_perf_event *hwc)
return (hwc->config & 0xe0) == 0xe0; return (hwc->config & 0xe0) == 0xe0;
} }
static inline int amd_has_nb(struct cpu_hw_events *cpuc)
{
struct amd_nb *nb = cpuc->amd_nb;
return nb && nb->nb_id != -1;
}
static void amd_put_event_constraints(struct cpu_hw_events *cpuc, static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
struct perf_event *event) struct perf_event *event)
{ {
...@@ -147,7 +154,7 @@ static void amd_put_event_constraints(struct cpu_hw_events *cpuc, ...@@ -147,7 +154,7 @@ static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
/* /*
* only care about NB events * only care about NB events
*/ */
if (!(nb && amd_is_nb_event(hwc))) if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
return; return;
/* /*
...@@ -214,7 +221,7 @@ amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) ...@@ -214,7 +221,7 @@ amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
/* /*
* if not NB event or no NB, then no constraints * if not NB event or no NB, then no constraints
*/ */
if (!(nb && amd_is_nb_event(hwc))) if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
return &unconstrained; return &unconstrained;
/* /*
...@@ -293,51 +300,55 @@ static struct amd_nb *amd_alloc_nb(int cpu, int nb_id) ...@@ -293,51 +300,55 @@ static struct amd_nb *amd_alloc_nb(int cpu, int nb_id)
return nb; return nb;
} }
static void amd_pmu_cpu_online(int cpu) static int amd_pmu_cpu_prepare(int cpu)
{
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
WARN_ON_ONCE(cpuc->amd_nb);
if (boot_cpu_data.x86_max_cores < 2)
return NOTIFY_OK;
cpuc->amd_nb = amd_alloc_nb(cpu, -1);
if (!cpuc->amd_nb)
return NOTIFY_BAD;
return NOTIFY_OK;
}
static void amd_pmu_cpu_starting(int cpu)
{ {
struct cpu_hw_events *cpu1, *cpu2; struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
struct amd_nb *nb = NULL; struct amd_nb *nb;
int i, nb_id; int i, nb_id;
if (boot_cpu_data.x86_max_cores < 2) if (boot_cpu_data.x86_max_cores < 2)
return; return;
/*
* function may be called too early in the
* boot process, in which case nb_id is bogus
*/
nb_id = amd_get_nb_id(cpu); nb_id = amd_get_nb_id(cpu);
if (nb_id == BAD_APICID) WARN_ON_ONCE(nb_id == BAD_APICID);
return;
cpu1 = &per_cpu(cpu_hw_events, cpu);
cpu1->amd_nb = NULL;
raw_spin_lock(&amd_nb_lock); raw_spin_lock(&amd_nb_lock);
for_each_online_cpu(i) { for_each_online_cpu(i) {
cpu2 = &per_cpu(cpu_hw_events, i); nb = per_cpu(cpu_hw_events, i).amd_nb;
nb = cpu2->amd_nb; if (WARN_ON_ONCE(!nb))
if (!nb)
continue; continue;
if (nb->nb_id == nb_id)
goto found;
}
nb = amd_alloc_nb(cpu, nb_id); if (nb->nb_id == nb_id) {
if (!nb) { kfree(cpuc->amd_nb);
pr_err("perf_events: failed NB allocation for CPU%d\n", cpu); cpuc->amd_nb = nb;
raw_spin_unlock(&amd_nb_lock); break;
return; }
} }
found:
nb->refcnt++; cpuc->amd_nb->nb_id = nb_id;
cpu1->amd_nb = nb; cpuc->amd_nb->refcnt++;
raw_spin_unlock(&amd_nb_lock); raw_spin_unlock(&amd_nb_lock);
} }
static void amd_pmu_cpu_offline(int cpu) static void amd_pmu_cpu_dead(int cpu)
{ {
struct cpu_hw_events *cpuhw; struct cpu_hw_events *cpuhw;
...@@ -349,8 +360,10 @@ static void amd_pmu_cpu_offline(int cpu) ...@@ -349,8 +360,10 @@ static void amd_pmu_cpu_offline(int cpu)
raw_spin_lock(&amd_nb_lock); raw_spin_lock(&amd_nb_lock);
if (cpuhw->amd_nb) { if (cpuhw->amd_nb) {
if (--cpuhw->amd_nb->refcnt == 0) struct amd_nb *nb = cpuhw->amd_nb;
kfree(cpuhw->amd_nb);
if (nb->nb_id == -1 || --nb->refcnt == 0)
kfree(nb);
cpuhw->amd_nb = NULL; cpuhw->amd_nb = NULL;
} }
...@@ -379,8 +392,9 @@ static __initconst struct x86_pmu amd_pmu = { ...@@ -379,8 +392,9 @@ static __initconst struct x86_pmu amd_pmu = {
.get_event_constraints = amd_get_event_constraints, .get_event_constraints = amd_get_event_constraints,
.put_event_constraints = amd_put_event_constraints, .put_event_constraints = amd_put_event_constraints,
.cpu_prepare = amd_pmu_cpu_online, .cpu_prepare = amd_pmu_cpu_prepare,
.cpu_dead = amd_pmu_cpu_offline, .cpu_starting = amd_pmu_cpu_starting,
.cpu_dead = amd_pmu_cpu_dead,
}; };
static __init int amd_pmu_init(void) static __init int amd_pmu_init(void)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册