提交 72eae04d 编写于 作者: R Robert Richter 提交者: Ingo Molnar

perf_counter, x86: modify initialization of struct x86_pmu

This patch adds an error handler and changes initialization of struct
x86_pmu. No functional changes. Needed for follow-on patches.

[ Impact: cleanup ]
Signed-off-by: NRobert Richter <robert.richter@amd.com>
Cc: Paul Mackerras <paulus@samba.org>
Acked-by: NPeter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1241002046-8832-14-git-send-email-robert.richter@amd.com>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 55de0f2e
...@@ -913,7 +913,7 @@ static struct x86_pmu amd_pmu = { ...@@ -913,7 +913,7 @@ static struct x86_pmu amd_pmu = {
.max_events = ARRAY_SIZE(amd_perfmon_event_map), .max_events = ARRAY_SIZE(amd_perfmon_event_map),
}; };
static struct x86_pmu *intel_pmu_init(void) static int intel_pmu_init(void)
{ {
union cpuid10_edx edx; union cpuid10_edx edx;
union cpuid10_eax eax; union cpuid10_eax eax;
...@@ -921,7 +921,7 @@ static struct x86_pmu *intel_pmu_init(void) ...@@ -921,7 +921,7 @@ static struct x86_pmu *intel_pmu_init(void)
unsigned int ebx; unsigned int ebx;
if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
return NULL; return -ENODEV;
/* /*
* Check whether the Architectural PerfMon supports * Check whether the Architectural PerfMon supports
...@@ -929,49 +929,54 @@ static struct x86_pmu *intel_pmu_init(void) ...@@ -929,49 +929,54 @@ static struct x86_pmu *intel_pmu_init(void)
*/ */
cpuid(10, &eax.full, &ebx, &unused, &edx.full); cpuid(10, &eax.full, &ebx, &unused, &edx.full);
if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED) if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
return NULL; return -ENODEV;
intel_perfmon_version = eax.split.version_id; intel_perfmon_version = eax.split.version_id;
if (intel_perfmon_version < 2) if (intel_perfmon_version < 2)
return NULL; return -ENODEV;
pr_info("Intel Performance Monitoring support detected.\n"); pr_info("Intel Performance Monitoring support detected.\n");
pr_info("... version: %d\n", intel_perfmon_version); pr_info("... version: %d\n", intel_perfmon_version);
pr_info("... bit width: %d\n", eax.split.bit_width); pr_info("... bit width: %d\n", eax.split.bit_width);
pr_info("... mask length: %d\n", eax.split.mask_length); pr_info("... mask length: %d\n", eax.split.mask_length);
x86_pmu = &intel_pmu;
nr_counters_generic = eax.split.num_counters; nr_counters_generic = eax.split.num_counters;
nr_counters_fixed = edx.split.num_counters_fixed; nr_counters_fixed = edx.split.num_counters_fixed;
counter_value_mask = (1ULL << eax.split.bit_width) - 1; counter_value_mask = (1ULL << eax.split.bit_width) - 1;
return &intel_pmu; return 0;
} }
static struct x86_pmu *amd_pmu_init(void) static int amd_pmu_init(void)
{ {
x86_pmu = &amd_pmu;
nr_counters_generic = 4; nr_counters_generic = 4;
nr_counters_fixed = 0; nr_counters_fixed = 0;
counter_value_mask = 0x0000FFFFFFFFFFFFULL; counter_value_mask = 0x0000FFFFFFFFFFFFULL;
counter_value_bits = 48; counter_value_bits = 48;
pr_info("AMD Performance Monitoring support detected.\n"); pr_info("AMD Performance Monitoring support detected.\n");
return 0;
return &amd_pmu;
} }
void __init init_hw_perf_counters(void) void __init init_hw_perf_counters(void)
{ {
int err;
switch (boot_cpu_data.x86_vendor) { switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_INTEL: case X86_VENDOR_INTEL:
x86_pmu = intel_pmu_init(); err = intel_pmu_init();
break; break;
case X86_VENDOR_AMD: case X86_VENDOR_AMD:
x86_pmu = amd_pmu_init(); err = amd_pmu_init();
break; break;
default: default:
return; return;
} }
if (!x86_pmu) if (err != 0)
return; return;
pr_info("... num counters: %d\n", nr_counters_generic); pr_info("... num counters: %d\n", nr_counters_generic);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册