提交 34ae7f35 编写于 作者: M Mark Langsdorf 提交者: Dave Jones

[CPUFREQ][2/2] preregister support for powernow-k8

This patch provides support for the _PSD ACPI object in the Powernow-k8
driver.  Although it looks like an invasive patch, most of it is
simply the consequence of turning the static acpi_performance_data
structure into a pointer.

AMD has tested it on several machines over the past few days without issue.

[trivial checkpatch warnings fixed up by davej]
[X86_POWERNOW_K8_ACPI=n buildfix from Randy Dunlap]
Signed-off-by: NMark Langsdorf <mark.langsdorf@amd.com>
Tested-by: NFrank Arnold <frank.arnold@amd.com>
Signed-off-by: NRandy Dunlap <randy.dunlap@oracle.com>
Signed-off-by: NDave Jones <davej@redhat.com>
上级 23431b49
...@@ -737,44 +737,63 @@ static int find_psb_table(struct powernow_k8_data *data) ...@@ -737,44 +737,63 @@ static int find_psb_table(struct powernow_k8_data *data)
#ifdef CONFIG_X86_POWERNOW_K8_ACPI #ifdef CONFIG_X86_POWERNOW_K8_ACPI
static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index)
{ {
if (!data->acpi_data.state_count || (cpu_family == CPU_HW_PSTATE)) if (!data->acpi_data->state_count || (cpu_family == CPU_HW_PSTATE))
return; return;
data->irt = (data->acpi_data.states[index].control >> IRT_SHIFT) & IRT_MASK; data->irt = (data->acpi_data->states[index].control >> IRT_SHIFT) & IRT_MASK;
data->rvo = (data->acpi_data.states[index].control >> RVO_SHIFT) & RVO_MASK; data->rvo = (data->acpi_data->states[index].control >> RVO_SHIFT) & RVO_MASK;
data->exttype = (data->acpi_data.states[index].control >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK; data->exttype = (data->acpi_data->states[index].control >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK;
data->plllock = (data->acpi_data.states[index].control >> PLL_L_SHIFT) & PLL_L_MASK; data->plllock = (data->acpi_data->states[index].control >> PLL_L_SHIFT) & PLL_L_MASK;
data->vidmvs = 1 << ((data->acpi_data.states[index].control >> MVS_SHIFT) & MVS_MASK); data->vidmvs = 1 << ((data->acpi_data->states[index].control >> MVS_SHIFT) & MVS_MASK);
data->vstable = (data->acpi_data.states[index].control >> VST_SHIFT) & VST_MASK; data->vstable = (data->acpi_data->states[index].control >> VST_SHIFT) & VST_MASK;
}
static struct acpi_processor_performance *acpi_perf_data;
static int preregister_valid;
static int powernow_k8_cpu_preinit_acpi(void)
{
acpi_perf_data = alloc_percpu(struct acpi_processor_performance);
if (!acpi_perf_data)
return -ENODEV;
if (acpi_processor_preregister_performance(acpi_perf_data))
return -ENODEV;
else
preregister_valid = 1;
return 0;
} }
static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
{ {
struct cpufreq_frequency_table *powernow_table; struct cpufreq_frequency_table *powernow_table;
int ret_val; int ret_val;
int cpu = 0;
if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) { data->acpi_data = percpu_ptr(acpi_perf_data, cpu);
if (acpi_processor_register_performance(data->acpi_data, data->cpu)) {
dprintk("register performance failed: bad ACPI data\n"); dprintk("register performance failed: bad ACPI data\n");
return -EIO; return -EIO;
} }
/* verify the data contained in the ACPI structures */ /* verify the data contained in the ACPI structures */
if (data->acpi_data.state_count <= 1) { if (data->acpi_data->state_count <= 1) {
dprintk("No ACPI P-States\n"); dprintk("No ACPI P-States\n");
goto err_out; goto err_out;
} }
if ((data->acpi_data.control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) || if ((data->acpi_data->control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) ||
(data->acpi_data.status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) { (data->acpi_data->status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) {
dprintk("Invalid control/status registers (%x - %x)\n", dprintk("Invalid control/status registers (%x - %x)\n",
data->acpi_data.control_register.space_id, data->acpi_data->control_register.space_id,
data->acpi_data.status_register.space_id); data->acpi_data->status_register.space_id);
goto err_out; goto err_out;
} }
/* fill in data->powernow_table */ /* fill in data->powernow_table */
powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table) powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table)
* (data->acpi_data.state_count + 1)), GFP_KERNEL); * (data->acpi_data->state_count + 1)), GFP_KERNEL);
if (!powernow_table) { if (!powernow_table) {
dprintk("powernow_table memory alloc failure\n"); dprintk("powernow_table memory alloc failure\n");
goto err_out; goto err_out;
...@@ -787,12 +806,12 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) ...@@ -787,12 +806,12 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
if (ret_val) if (ret_val)
goto err_out_mem; goto err_out_mem;
powernow_table[data->acpi_data.state_count].frequency = CPUFREQ_TABLE_END; powernow_table[data->acpi_data->state_count].frequency = CPUFREQ_TABLE_END;
powernow_table[data->acpi_data.state_count].index = 0; powernow_table[data->acpi_data->state_count].index = 0;
data->powernow_table = powernow_table; data->powernow_table = powernow_table;
/* fill in data */ /* fill in data */
data->numps = data->acpi_data.state_count; data->numps = data->acpi_data->state_count;
if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu) if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu)
print_basics(data); print_basics(data);
powernow_k8_acpi_pst_values(data, 0); powernow_k8_acpi_pst_values(data, 0);
...@@ -800,16 +819,31 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) ...@@ -800,16 +819,31 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
/* notify BIOS that we exist */ /* notify BIOS that we exist */
acpi_processor_notify_smm(THIS_MODULE); acpi_processor_notify_smm(THIS_MODULE);
/* determine affinity, from ACPI if available */
if (preregister_valid) {
if ((data->acpi_data->shared_type == CPUFREQ_SHARED_TYPE_ALL) ||
(data->acpi_data->shared_type == CPUFREQ_SHARED_TYPE_ANY))
data->starting_core_affinity = data->acpi_data->shared_cpu_map;
else
data->starting_core_affinity = cpumask_of_cpu(data->cpu);
} else {
/* best guess from family if not */
if (cpu_family == CPU_HW_PSTATE)
data->starting_core_affinity = cpumask_of_cpu(data->cpu);
else
data->starting_core_affinity = per_cpu(cpu_core_map, data->cpu);
}
return 0; return 0;
err_out_mem: err_out_mem:
kfree(powernow_table); kfree(powernow_table);
err_out: err_out:
acpi_processor_unregister_performance(&data->acpi_data, data->cpu); acpi_processor_unregister_performance(data->acpi_data, data->cpu);
/* data->acpi_data.state_count informs us at ->exit() whether ACPI was used */ /* data->acpi_data.state_count informs us at ->exit() whether ACPI was used */
data->acpi_data.state_count = 0; data->acpi_data->state_count = 0;
return -ENODEV; return -ENODEV;
} }
...@@ -821,10 +855,10 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpuf ...@@ -821,10 +855,10 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpuf
rdmsr(MSR_PSTATE_CUR_LIMIT, hi, lo); rdmsr(MSR_PSTATE_CUR_LIMIT, hi, lo);
data->max_hw_pstate = (hi & HW_PSTATE_MAX_MASK) >> HW_PSTATE_MAX_SHIFT; data->max_hw_pstate = (hi & HW_PSTATE_MAX_MASK) >> HW_PSTATE_MAX_SHIFT;
for (i = 0; i < data->acpi_data.state_count; i++) { for (i = 0; i < data->acpi_data->state_count; i++) {
u32 index; u32 index;
index = data->acpi_data.states[i].control & HW_PSTATE_MASK; index = data->acpi_data->states[i].control & HW_PSTATE_MASK;
if (index > data->max_hw_pstate) { if (index > data->max_hw_pstate) {
printk(KERN_ERR PFX "invalid pstate %d - bad value %d.\n", i, index); printk(KERN_ERR PFX "invalid pstate %d - bad value %d.\n", i, index);
printk(KERN_ERR PFX "Please report to BIOS manufacturer\n"); printk(KERN_ERR PFX "Please report to BIOS manufacturer\n");
...@@ -840,7 +874,7 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpuf ...@@ -840,7 +874,7 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpuf
powernow_table[i].index = index; powernow_table[i].index = index;
powernow_table[i].frequency = data->acpi_data.states[i].core_frequency * 1000; powernow_table[i].frequency = data->acpi_data->states[i].core_frequency * 1000;
} }
return 0; return 0;
} }
...@@ -849,16 +883,16 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpuf ...@@ -849,16 +883,16 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpuf
{ {
int i; int i;
int cntlofreq = 0; int cntlofreq = 0;
for (i = 0; i < data->acpi_data.state_count; i++) { for (i = 0; i < data->acpi_data->state_count; i++) {
u32 fid; u32 fid;
u32 vid; u32 vid;
if (data->exttype) { if (data->exttype) {
fid = data->acpi_data.states[i].status & EXT_FID_MASK; fid = data->acpi_data->states[i].status & EXT_FID_MASK;
vid = (data->acpi_data.states[i].status >> VID_SHIFT) & EXT_VID_MASK; vid = (data->acpi_data->states[i].status >> VID_SHIFT) & EXT_VID_MASK;
} else { } else {
fid = data->acpi_data.states[i].control & FID_MASK; fid = data->acpi_data->states[i].control & FID_MASK;
vid = (data->acpi_data.states[i].control >> VID_SHIFT) & VID_MASK; vid = (data->acpi_data->states[i].control >> VID_SHIFT) & VID_MASK;
} }
dprintk(" %d : fid 0x%x, vid 0x%x\n", i, fid, vid); dprintk(" %d : fid 0x%x, vid 0x%x\n", i, fid, vid);
...@@ -899,10 +933,10 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpuf ...@@ -899,10 +933,10 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpuf
cntlofreq = i; cntlofreq = i;
} }
if (powernow_table[i].frequency != (data->acpi_data.states[i].core_frequency * 1000)) { if (powernow_table[i].frequency != (data->acpi_data->states[i].core_frequency * 1000)) {
printk(KERN_INFO PFX "invalid freq entries %u kHz vs. %u kHz\n", printk(KERN_INFO PFX "invalid freq entries %u kHz vs. %u kHz\n",
powernow_table[i].frequency, powernow_table[i].frequency,
(unsigned int) (data->acpi_data.states[i].core_frequency * 1000)); (unsigned int) (data->acpi_data->states[i].core_frequency * 1000));
powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID; powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID;
continue; continue;
} }
...@@ -912,11 +946,12 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpuf ...@@ -912,11 +946,12 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpuf
static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data)
{ {
if (data->acpi_data.state_count) if (data->acpi_data->state_count)
acpi_processor_unregister_performance(&data->acpi_data, data->cpu); acpi_processor_unregister_performance(data->acpi_data, data->cpu);
} }
#else #else
static int powernow_k8_cpu_preinit_acpi(void) { return -ENODEV; }
static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { return -ENODEV; } static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { return -ENODEV; }
static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { return; } static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { return; }
static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { return; } static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { return; }
...@@ -1101,7 +1136,7 @@ static int powernowk8_verify(struct cpufreq_policy *pol) ...@@ -1101,7 +1136,7 @@ static int powernowk8_verify(struct cpufreq_policy *pol)
static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
{ {
struct powernow_k8_data *data; struct powernow_k8_data *data;
cpumask_t oldmask; cpumask_t oldmask = CPU_MASK_ALL;
int rc; int rc;
if (!cpu_online(pol->cpu)) if (!cpu_online(pol->cpu))
...@@ -1174,10 +1209,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) ...@@ -1174,10 +1209,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
/* run on any CPU again */ /* run on any CPU again */
set_cpus_allowed_ptr(current, &oldmask); set_cpus_allowed_ptr(current, &oldmask);
if (cpu_family == CPU_HW_PSTATE) pol->cpus = data->starting_core_affinity;
pol->cpus = cpumask_of_cpu(pol->cpu);
else
pol->cpus = per_cpu(cpu_core_map, pol->cpu);
data->available_cores = &(pol->cpus); data->available_cores = &(pol->cpus);
/* Take a crude guess here. /* Take a crude guess here.
...@@ -1300,6 +1332,7 @@ static int __cpuinit powernowk8_init(void) ...@@ -1300,6 +1332,7 @@ static int __cpuinit powernowk8_init(void)
} }
if (supported_cpus == num_online_cpus()) { if (supported_cpus == num_online_cpus()) {
powernow_k8_cpu_preinit_acpi();
printk(KERN_INFO PFX "Found %d %s " printk(KERN_INFO PFX "Found %d %s "
"processors (%d cpu cores) (" VERSION ")\n", "processors (%d cpu cores) (" VERSION ")\n",
num_online_nodes(), num_online_nodes(),
...@@ -1316,6 +1349,10 @@ static void __exit powernowk8_exit(void) ...@@ -1316,6 +1349,10 @@ static void __exit powernowk8_exit(void)
dprintk("exit\n"); dprintk("exit\n");
cpufreq_unregister_driver(&cpufreq_amd64_driver); cpufreq_unregister_driver(&cpufreq_amd64_driver);
#ifdef CONFIG_X86_POWERNOW_K8_ACPI
free_percpu(acpi_perf_data);
#endif
} }
MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com> and Mark Langsdorf <mark.langsdorf@amd.com>"); MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com> and Mark Langsdorf <mark.langsdorf@amd.com>");
......
...@@ -33,12 +33,13 @@ struct powernow_k8_data { ...@@ -33,12 +33,13 @@ struct powernow_k8_data {
#ifdef CONFIG_X86_POWERNOW_K8_ACPI #ifdef CONFIG_X86_POWERNOW_K8_ACPI
/* the acpi table needs to be kept. it's only available if ACPI was /* the acpi table needs to be kept. it's only available if ACPI was
* used to determine valid frequency/vid/fid states */ * used to determine valid frequency/vid/fid states */
struct acpi_processor_performance acpi_data; struct acpi_processor_performance *acpi_data;
#endif #endif
/* we need to keep track of associated cores, but let cpufreq /* we need to keep track of associated cores, but let cpufreq
* handle hotplug events - so just point at cpufreq pol->cpus * handle hotplug events - so just point at cpufreq pol->cpus
* structure */ * structure */
cpumask_t *available_cores; cpumask_t *available_cores;
cpumask_t starting_core_affinity;
}; };
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册