提交 1a6e21f7 编写于 作者: P Peter Zijlstra 提交者: Ingo Molnar

perf_events, x86: Clean up hw_perf_*_all() implementation

Put the recursion avoidance code in the generic hook instead of
replicating it in each implementation.
Signed-off-by: NPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Stephane Eranian <eranian@google.com>
LKML-Reference: <20100127221122.057507285@chello.nl>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 ed8777fc
...@@ -1099,15 +1099,8 @@ static int __hw_perf_event_init(struct perf_event *event) ...@@ -1099,15 +1099,8 @@ static int __hw_perf_event_init(struct perf_event *event)
static void p6_pmu_disable_all(void) static void p6_pmu_disable_all(void)
{ {
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
u64 val; u64 val;
if (!cpuc->enabled)
return;
cpuc->enabled = 0;
barrier();
/* p6 only has one enable register */ /* p6 only has one enable register */
rdmsrl(MSR_P6_EVNTSEL0, val); rdmsrl(MSR_P6_EVNTSEL0, val);
val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
...@@ -1118,12 +1111,6 @@ static void intel_pmu_disable_all(void) ...@@ -1118,12 +1111,6 @@ static void intel_pmu_disable_all(void)
{ {
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
if (!cpuc->enabled)
return;
cpuc->enabled = 0;
barrier();
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
...@@ -1135,17 +1122,6 @@ static void amd_pmu_disable_all(void) ...@@ -1135,17 +1122,6 @@ static void amd_pmu_disable_all(void)
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int idx; int idx;
if (!cpuc->enabled)
return;
cpuc->enabled = 0;
/*
* ensure we write the disable before we start disabling the
* events proper, so that amd_pmu_enable_event() does the
* right thing.
*/
barrier();
for (idx = 0; idx < x86_pmu.num_events; idx++) { for (idx = 0; idx < x86_pmu.num_events; idx++) {
u64 val; u64 val;
...@@ -1166,23 +1142,20 @@ void hw_perf_disable(void) ...@@ -1166,23 +1142,20 @@ void hw_perf_disable(void)
if (!x86_pmu_initialized()) if (!x86_pmu_initialized())
return; return;
if (cpuc->enabled) if (!cpuc->enabled)
cpuc->n_added = 0; return;
cpuc->n_added = 0;
cpuc->enabled = 0;
barrier();
x86_pmu.disable_all(); x86_pmu.disable_all();
} }
static void p6_pmu_enable_all(void) static void p6_pmu_enable_all(void)
{ {
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
unsigned long val; unsigned long val;
if (cpuc->enabled)
return;
cpuc->enabled = 1;
barrier();
/* p6 only has one enable register */ /* p6 only has one enable register */
rdmsrl(MSR_P6_EVNTSEL0, val); rdmsrl(MSR_P6_EVNTSEL0, val);
val |= ARCH_PERFMON_EVENTSEL0_ENABLE; val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
...@@ -1193,12 +1166,6 @@ static void intel_pmu_enable_all(void) ...@@ -1193,12 +1166,6 @@ static void intel_pmu_enable_all(void)
{ {
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
if (cpuc->enabled)
return;
cpuc->enabled = 1;
barrier();
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) { if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
...@@ -1217,12 +1184,6 @@ static void amd_pmu_enable_all(void) ...@@ -1217,12 +1184,6 @@ static void amd_pmu_enable_all(void)
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int idx; int idx;
if (cpuc->enabled)
return;
cpuc->enabled = 1;
barrier();
for (idx = 0; idx < x86_pmu.num_events; idx++) { for (idx = 0; idx < x86_pmu.num_events; idx++) {
struct perf_event *event = cpuc->events[idx]; struct perf_event *event = cpuc->events[idx];
u64 val; u64 val;
...@@ -1417,6 +1378,10 @@ void hw_perf_enable(void) ...@@ -1417,6 +1378,10 @@ void hw_perf_enable(void)
if (!x86_pmu_initialized()) if (!x86_pmu_initialized())
return; return;
if (cpuc->enabled)
return;
if (cpuc->n_added) { if (cpuc->n_added) {
/* /*
* apply assignment obtained either from * apply assignment obtained either from
...@@ -1461,6 +1426,10 @@ void hw_perf_enable(void) ...@@ -1461,6 +1426,10 @@ void hw_perf_enable(void)
cpuc->n_added = 0; cpuc->n_added = 0;
perf_events_lapic_init(); perf_events_lapic_init();
} }
cpuc->enabled = 1;
barrier();
x86_pmu.enable_all(); x86_pmu.enable_all();
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册