提交 b8b3319a 编写于 作者: T Thomas Gleixner 提交者: Ingo Molnar

perf/x86/intel/rapl: Sanitize the quirk handling

There is no point in having a quirk machinery for a single possible
function. Get rid of it and move the quirk to a place where it actually
makes sense.
Signed-off-by: NThomas Gleixner <tglx@linutronix.de>
Signed-off-by: NPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andi Kleen <andi.kleen@intel.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Harish Chegondi <harish.chegondi@intel.com>
Cc: Jacob Pan <jacob.jun.pan@linux.intel.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Kan Liang <kan.liang@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Cc: linux-kernel@vger.kernel.org
Link: http://lkml.kernel.org/r/20160222221012.311639465@linutronix.deSigned-off-by: NIngo Molnar <mingo@kernel.org>
上级 55f2890f
...@@ -133,7 +133,6 @@ static int rapl_cntr_mask; ...@@ -133,7 +133,6 @@ static int rapl_cntr_mask;
static DEFINE_PER_CPU(struct rapl_pmu *, rapl_pmu); static DEFINE_PER_CPU(struct rapl_pmu *, rapl_pmu);
static DEFINE_PER_CPU(struct rapl_pmu *, rapl_pmu_to_free); static DEFINE_PER_CPU(struct rapl_pmu *, rapl_pmu_to_free);
static struct x86_pmu_quirk *rapl_quirks;
static inline u64 rapl_read_counter(struct perf_event *event) static inline u64 rapl_read_counter(struct perf_event *event)
{ {
u64 raw; u64 raw;
...@@ -141,15 +140,6 @@ static inline u64 rapl_read_counter(struct perf_event *event) ...@@ -141,15 +140,6 @@ static inline u64 rapl_read_counter(struct perf_event *event)
return raw; return raw;
} }
#define rapl_add_quirk(func_) \
do { \
static struct x86_pmu_quirk __quirk __initdata = { \
.func = func_, \
}; \
__quirk.next = rapl_quirks; \
rapl_quirks = &__quirk; \
} while (0)
static inline u64 rapl_scale(u64 v, int cfg) static inline u64 rapl_scale(u64 v, int cfg)
{ {
if (cfg > NR_RAPL_DOMAINS) { if (cfg > NR_RAPL_DOMAINS) {
...@@ -564,17 +554,6 @@ static void rapl_cpu_init(int cpu) ...@@ -564,17 +554,6 @@ static void rapl_cpu_init(int cpu)
cpumask_set_cpu(cpu, &rapl_cpu_mask); cpumask_set_cpu(cpu, &rapl_cpu_mask);
} }
static __init void rapl_hsw_server_quirk(void)
{
/*
* DRAM domain on HSW server has fixed energy unit which can be
* different than the unit from power unit MSR.
* "Intel Xeon Processor E5-1600 and E5-2600 v3 Product Families, V2
* of 2. Datasheet, September 2014, Reference Number: 330784-001 "
*/
rapl_hw_unit[RAPL_IDX_RAM_NRG_STAT] = 16;
}
static int rapl_cpu_prepare(int cpu) static int rapl_cpu_prepare(int cpu)
{ {
struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu); struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu);
...@@ -672,7 +651,18 @@ static int rapl_cpu_notifier(struct notifier_block *self, ...@@ -672,7 +651,18 @@ static int rapl_cpu_notifier(struct notifier_block *self,
return NOTIFY_OK; return NOTIFY_OK;
} }
static int rapl_check_hw_unit(void) static __init void rapl_hsw_server_quirk(void)
{
/*
* DRAM domain on HSW server has fixed energy unit which can be
* different than the unit from power unit MSR.
* "Intel Xeon Processor E5-1600 and E5-2600 v3 Product Families, V2
* of 2. Datasheet, September 2014, Reference Number: 330784-001 "
*/
rapl_hw_unit[RAPL_IDX_RAM_NRG_STAT] = 16;
}
static int rapl_check_hw_unit(void (*quirk)(void))
{ {
u64 msr_rapl_power_unit_bits; u64 msr_rapl_power_unit_bits;
int i; int i;
...@@ -683,6 +673,9 @@ static int rapl_check_hw_unit(void) ...@@ -683,6 +673,9 @@ static int rapl_check_hw_unit(void)
for (i = 0; i < NR_RAPL_DOMAINS; i++) for (i = 0; i < NR_RAPL_DOMAINS; i++)
rapl_hw_unit[i] = (msr_rapl_power_unit_bits >> 8) & 0x1FULL; rapl_hw_unit[i] = (msr_rapl_power_unit_bits >> 8) & 0x1FULL;
/* Apply cpu model quirk */
if (quirk)
quirk();
return 0; return 0;
} }
...@@ -701,9 +694,9 @@ static const struct x86_cpu_id rapl_cpu_match[] = { ...@@ -701,9 +694,9 @@ static const struct x86_cpu_id rapl_cpu_match[] = {
static int __init rapl_pmu_init(void) static int __init rapl_pmu_init(void)
{ {
void (*quirk)(void) = NULL;
struct rapl_pmu *pmu; struct rapl_pmu *pmu;
int cpu, ret; int cpu, ret;
struct x86_pmu_quirk *quirk;
int i; int i;
/* /*
...@@ -720,7 +713,7 @@ static int __init rapl_pmu_init(void) ...@@ -720,7 +713,7 @@ static int __init rapl_pmu_init(void)
rapl_pmu_events_group.attrs = rapl_events_cln_attr; rapl_pmu_events_group.attrs = rapl_events_cln_attr;
break; break;
case 63: /* Haswell-Server */ case 63: /* Haswell-Server */
rapl_add_quirk(rapl_hsw_server_quirk); quirk = rapl_hsw_server_quirk;
rapl_cntr_mask = RAPL_IDX_SRV; rapl_cntr_mask = RAPL_IDX_SRV;
rapl_pmu_events_group.attrs = rapl_events_srv_attr; rapl_pmu_events_group.attrs = rapl_events_srv_attr;
break; break;
...@@ -736,7 +729,7 @@ static int __init rapl_pmu_init(void) ...@@ -736,7 +729,7 @@ static int __init rapl_pmu_init(void)
rapl_pmu_events_group.attrs = rapl_events_srv_attr; rapl_pmu_events_group.attrs = rapl_events_srv_attr;
break; break;
case 87: /* Knights Landing */ case 87: /* Knights Landing */
rapl_add_quirk(rapl_hsw_server_quirk); quirk = rapl_hsw_server_quirk;
rapl_cntr_mask = RAPL_IDX_KNL; rapl_cntr_mask = RAPL_IDX_KNL;
rapl_pmu_events_group.attrs = rapl_events_knl_attr; rapl_pmu_events_group.attrs = rapl_events_knl_attr;
break; break;
...@@ -745,14 +738,10 @@ static int __init rapl_pmu_init(void) ...@@ -745,14 +738,10 @@ static int __init rapl_pmu_init(void)
return -ENODEV; return -ENODEV;
} }
ret = rapl_check_hw_unit(); ret = rapl_check_hw_unit(quirk);
if (ret) if (ret)
return ret; return ret;
/* run cpu model quirks */
for (quirk = rapl_quirks; quirk; quirk = quirk->next)
quirk->func();
cpu_notifier_register_begin(); cpu_notifier_register_begin();
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册