acpi-cpufreq.c 25.3 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 * acpi-cpufreq.c - ACPI Processor P-States Driver
L
Linus Torvalds 已提交
3 4 5 6
 *
 *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
 *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
 *  Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de>
7
 *  Copyright (C) 2006       Denis Sadykov <denis.m.sadykov@intel.com>
L
Linus Torvalds 已提交
8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
 *
 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 *
 *  This program is free software; you can redistribute it and/or modify
 *  it under the terms of the GNU General Public License as published by
 *  the Free Software Foundation; either version 2 of the License, or (at
 *  your option) any later version.
 *
 *  This program is distributed in the hope that it will be useful, but
 *  WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 *  General Public License for more details.
 *
 *  You should have received a copy of the GNU General Public License along
 *  with this program; if not, write to the Free Software Foundation, Inc.,
 *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
 *
 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 */

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
31 32
#include <linux/smp.h>
#include <linux/sched.h>
L
Linus Torvalds 已提交
33
#include <linux/cpufreq.h>
34
#include <linux/compiler.h>
35
#include <linux/dmi.h>
36
#include <linux/slab.h>
L
Linus Torvalds 已提交
37 38

#include <linux/acpi.h>
39 40 41 42
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/uaccess.h>

L
Linus Torvalds 已提交
43 44
#include <acpi/processor.h>

45
#include <asm/msr.h>
46 47
#include <asm/processor.h>
#include <asm/cpufeature.h>
48
#include "mperf.h"
49

L
Linus Torvalds 已提交
50 51 52 53
MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
MODULE_DESCRIPTION("ACPI Processor P-States Driver");
MODULE_LICENSE("GPL");

54 55
#define PFX "acpi-cpufreq: "

56 57 58
enum {
	UNDEFINED_CAPABLE = 0,
	SYSTEM_INTEL_MSR_CAPABLE,
59
	SYSTEM_AMD_MSR_CAPABLE,
60 61 62 63
	SYSTEM_IO_CAPABLE,
};

#define INTEL_MSR_RANGE		(0xffff)
64
#define AMD_MSR_RANGE		(0x7)
65

66 67
#define MSR_K7_HWCR_CPB_DIS	(1ULL << 25)

68
struct acpi_cpufreq_data {
69 70 71 72
	struct acpi_processor_performance *acpi_data;
	struct cpufreq_frequency_table *freq_table;
	unsigned int resume;
	unsigned int cpu_feature;
L
Linus Torvalds 已提交
73 74
};

75
static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data);
76

77
/* acpi_perf_data is a pointer to percpu data. */
78
static struct acpi_processor_performance __percpu *acpi_perf_data;
L
Linus Torvalds 已提交
79 80 81

static struct cpufreq_driver acpi_cpufreq_driver;

82
static unsigned int acpi_pstate_strict;
83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
static bool boost_enabled, boost_supported;
static struct msr __percpu *msrs;

static bool boost_state(unsigned int cpu)
{
	u32 lo, hi;
	u64 msr;

	switch (boot_cpu_data.x86_vendor) {
	case X86_VENDOR_INTEL:
		rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi);
		msr = lo | ((u64)hi << 32);
		return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
	case X86_VENDOR_AMD:
		rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
		msr = lo | ((u64)hi << 32);
		return !(msr & MSR_K7_HWCR_CPB_DIS);
	}
	return false;
}

static void boost_set_msrs(bool enable, const struct cpumask *cpumask)
{
	u32 cpu;
	u32 msr_addr;
	u64 msr_mask;

	switch (boot_cpu_data.x86_vendor) {
	case X86_VENDOR_INTEL:
		msr_addr = MSR_IA32_MISC_ENABLE;
		msr_mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE;
		break;
	case X86_VENDOR_AMD:
		msr_addr = MSR_K7_HWCR;
		msr_mask = MSR_K7_HWCR_CPB_DIS;
		break;
	default:
		return;
	}

	rdmsr_on_cpus(cpumask, msr_addr, msrs);

	for_each_cpu(cpu, cpumask) {
		struct msr *reg = per_cpu_ptr(msrs, cpu);
		if (enable)
			reg->q &= ~msr_mask;
		else
			reg->q |= msr_mask;
	}

	wrmsr_on_cpus(cpumask, msr_addr, msrs);
}

136
static ssize_t _store_boost(const char *buf, size_t count)
137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162
{
	int ret;
	unsigned long val = 0;

	if (!boost_supported)
		return -EINVAL;

	ret = kstrtoul(buf, 10, &val);
	if (ret || (val > 1))
		return -EINVAL;

	if ((val && boost_enabled) || (!val && !boost_enabled))
		return count;

	get_online_cpus();

	boost_set_msrs(val, cpu_online_mask);

	put_online_cpus();

	boost_enabled = val;
	pr_debug("Core Boosting %sabled.\n", val ? "en" : "dis");

	return count;
}

163 164 165 166 167 168
static ssize_t store_global_boost(struct kobject *kobj, struct attribute *attr,
				  const char *buf, size_t count)
{
	return _store_boost(buf, count);
}

169 170 171 172 173 174 175 176 177
static ssize_t show_global_boost(struct kobject *kobj,
				 struct attribute *attr, char *buf)
{
	return sprintf(buf, "%u\n", boost_enabled);
}

static struct global_attr global_boost = __ATTR(boost, 0644,
						show_global_boost,
						store_global_boost);
178

179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf,
			 size_t count)
{
	return _store_boost(buf, count);
}

static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf)
{
	return sprintf(buf, "%u\n", boost_enabled);
}

static struct freq_attr cpb = __ATTR(cpb, 0644, show_cpb, store_cpb);
#endif

194 195
static int check_est_cpu(unsigned int cpuid)
{
196
	struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
197

198
	return cpu_has(cpu, X86_FEATURE_EST);
199 200
}

201 202 203 204 205 206 207
static int check_amd_hwpstate_cpu(unsigned int cpuid)
{
	struct cpuinfo_x86 *cpu = &cpu_data(cpuid);

	return cpu_has(cpu, X86_FEATURE_HW_PSTATE);
}

208
static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data)
209
{
210 211
	struct acpi_processor_performance *perf;
	int i;
212 213 214

	perf = data->acpi_data;

215
	for (i = 0; i < perf->state_count; i++) {
216 217 218 219 220 221
		if (value == perf->states[i].status)
			return data->freq_table[i].frequency;
	}
	return 0;
}

222 223 224
static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
{
	int i;
225
	struct acpi_processor_performance *perf;
226

227 228 229 230 231
	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
		msr &= AMD_MSR_RANGE;
	else
		msr &= INTEL_MSR_RANGE;

232 233
	perf = data->acpi_data;

234
	for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
235
		if (msr == perf->states[data->freq_table[i].index].status)
236 237 238 239 240 241 242 243
			return data->freq_table[i].frequency;
	}
	return data->freq_table[0].frequency;
}

static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data)
{
	switch (data->cpu_feature) {
244
	case SYSTEM_INTEL_MSR_CAPABLE:
245
	case SYSTEM_AMD_MSR_CAPABLE:
246
		return extract_msr(val, data);
247
	case SYSTEM_IO_CAPABLE:
248
		return extract_io(val, data);
249
	default:
250 251 252 253 254 255 256 257
		return 0;
	}
}

struct msr_addr {
	u32 reg;
};

258 259 260 261 262 263
struct io_addr {
	u16 port;
	u8 bit_width;
};

struct drv_cmd {
264
	unsigned int type;
265
	const struct cpumask *mask;
266 267 268 269
	union {
		struct msr_addr msr;
		struct io_addr io;
	} addr;
270 271 272
	u32 val;
};

273 274
/* Called via smp_call_function_single(), on the target CPU */
static void do_drv_read(void *_cmd)
L
Linus Torvalds 已提交
275
{
276
	struct drv_cmd *cmd = _cmd;
277 278 279
	u32 h;

	switch (cmd->type) {
280
	case SYSTEM_INTEL_MSR_CAPABLE:
281
	case SYSTEM_AMD_MSR_CAPABLE:
282 283
		rdmsr(cmd->addr.msr.reg, cmd->val, h);
		break;
284
	case SYSTEM_IO_CAPABLE:
285 286 287
		acpi_os_read_port((acpi_io_address)cmd->addr.io.port,
				&cmd->val,
				(u32)cmd->addr.io.bit_width);
288
		break;
289
	default:
290 291
		break;
	}
292
}
L
Linus Torvalds 已提交
293

294 295
/* Called via smp_call_function_many(), on the target CPUs */
static void do_drv_write(void *_cmd)
296
{
297
	struct drv_cmd *cmd = _cmd;
298
	u32 lo, hi;
299 300

	switch (cmd->type) {
301
	case SYSTEM_INTEL_MSR_CAPABLE:
302 303 304
		rdmsr(cmd->addr.msr.reg, lo, hi);
		lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE);
		wrmsr(cmd->addr.msr.reg, lo, hi);
305
		break;
306 307 308
	case SYSTEM_AMD_MSR_CAPABLE:
		wrmsr(cmd->addr.msr.reg, cmd->val, 0);
		break;
309
	case SYSTEM_IO_CAPABLE:
310 311 312
		acpi_os_write_port((acpi_io_address)cmd->addr.io.port,
				cmd->val,
				(u32)cmd->addr.io.bit_width);
313
		break;
314
	default:
315 316
		break;
	}
317
}
L
Linus Torvalds 已提交
318

319
static void drv_read(struct drv_cmd *cmd)
320
{
321
	int err;
322 323
	cmd->val = 0;

324 325
	err = smp_call_function_any(cmd->mask, do_drv_read, cmd, 1);
	WARN_ON_ONCE(err);	/* smp_call_function_any() was buggy? */
326 327 328 329
}

static void drv_write(struct drv_cmd *cmd)
{
330 331 332 333 334
	int this_cpu;

	this_cpu = get_cpu();
	if (cpumask_test_cpu(this_cpu, cmd->mask))
		do_drv_write(cmd);
335
	smp_call_function_many(cmd->mask, do_drv_write, cmd, 1);
336
	put_cpu();
337
}
L
Linus Torvalds 已提交
338

339
static u32 get_cur_val(const struct cpumask *mask)
340
{
341 342
	struct acpi_processor_performance *perf;
	struct drv_cmd cmd;
L
Linus Torvalds 已提交
343

344
	if (unlikely(cpumask_empty(mask)))
345
		return 0;
L
Linus Torvalds 已提交
346

347
	switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) {
348 349 350 351
	case SYSTEM_INTEL_MSR_CAPABLE:
		cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
		cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
		break;
352 353 354 355
	case SYSTEM_AMD_MSR_CAPABLE:
		cmd.type = SYSTEM_AMD_MSR_CAPABLE;
		cmd.addr.msr.reg = MSR_AMD_PERF_STATUS;
		break;
356 357
	case SYSTEM_IO_CAPABLE:
		cmd.type = SYSTEM_IO_CAPABLE;
358
		perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data;
359 360 361 362 363 364 365
		cmd.addr.io.port = perf->control_register.address;
		cmd.addr.io.bit_width = perf->control_register.bit_width;
		break;
	default:
		return 0;
	}

366
	cmd.mask = mask;
367
	drv_read(&cmd);
L
Linus Torvalds 已提交
368

369
	pr_debug("get_cur_val = %u\n", cmd.val);
370 371 372

	return cmd.val;
}
L
Linus Torvalds 已提交
373

374 375
static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
{
376
	struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu);
377
	unsigned int freq;
378
	unsigned int cached_freq;
379

380
	pr_debug("get_cur_freq_on_cpu (%d)\n", cpu);
381 382

	if (unlikely(data == NULL ||
383
		     data->acpi_data == NULL || data->freq_table == NULL)) {
384
		return 0;
L
Linus Torvalds 已提交
385 386
	}

387
	cached_freq = data->freq_table[data->acpi_data->state].frequency;
388
	freq = extract_freq(get_cur_val(cpumask_of(cpu)), data);
389 390 391 392 393 394 395 396
	if (freq != cached_freq) {
		/*
		 * The dreaded BIOS frequency change behind our back.
		 * Force set the frequency on next target call.
		 */
		data->resume = 1;
	}

397
	pr_debug("cur freq = %u\n", freq);
L
Linus Torvalds 已提交
398

399
	return freq;
L
Linus Torvalds 已提交
400 401
}

402
static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq,
403
				struct acpi_cpufreq_data *data)
404
{
405 406
	unsigned int cur_freq;
	unsigned int i;
L
Linus Torvalds 已提交
407

408
	for (i = 0; i < 100; i++) {
409 410 411 412 413 414 415 416 417
		cur_freq = extract_freq(get_cur_val(mask), data);
		if (cur_freq == freq)
			return 1;
		udelay(10);
	}
	return 0;
}

static int acpi_cpufreq_target(struct cpufreq_policy *policy,
418
			       unsigned int target_freq, unsigned int relation)
L
Linus Torvalds 已提交
419
{
420
	struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
421 422 423
	struct acpi_processor_performance *perf;
	struct cpufreq_freqs freqs;
	struct drv_cmd cmd;
424 425
	unsigned int next_state = 0; /* Index into freq_table */
	unsigned int next_perf_state = 0; /* Index into perf table */
426 427
	unsigned int i;
	int result = 0;
428

429
	pr_debug("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu);
430 431

	if (unlikely(data == NULL ||
432
	     data->acpi_data == NULL || data->freq_table == NULL)) {
433 434
		return -ENODEV;
	}
L
Linus Torvalds 已提交
435

436
	perf = data->acpi_data;
L
Linus Torvalds 已提交
437
	result = cpufreq_frequency_table_target(policy,
438 439 440
						data->freq_table,
						target_freq,
						relation, &next_state);
441 442 443 444
	if (unlikely(result)) {
		result = -ENODEV;
		goto out;
	}
L
Linus Torvalds 已提交
445

446
	next_perf_state = data->freq_table[next_state].index;
447
	if (perf->state == next_perf_state) {
448
		if (unlikely(data->resume)) {
449
			pr_debug("Called after resume, resetting to P%d\n",
450
				next_perf_state);
451 452
			data->resume = 0;
		} else {
453
			pr_debug("Already at target state (P%d)\n",
454
				next_perf_state);
455
			goto out;
456
		}
457 458
	}

459 460 461 462
	switch (data->cpu_feature) {
	case SYSTEM_INTEL_MSR_CAPABLE:
		cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
		cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
463
		cmd.val = (u32) perf->states[next_perf_state].control;
464
		break;
465 466 467 468 469
	case SYSTEM_AMD_MSR_CAPABLE:
		cmd.type = SYSTEM_AMD_MSR_CAPABLE;
		cmd.addr.msr.reg = MSR_AMD_PERF_CTL;
		cmd.val = (u32) perf->states[next_perf_state].control;
		break;
470 471 472 473 474 475 476
	case SYSTEM_IO_CAPABLE:
		cmd.type = SYSTEM_IO_CAPABLE;
		cmd.addr.io.port = perf->control_register.address;
		cmd.addr.io.bit_width = perf->control_register.bit_width;
		cmd.val = (u32) perf->states[next_perf_state].control;
		break;
	default:
477 478
		result = -ENODEV;
		goto out;
479
	}
480

481
	/* cpufreq holds the hotplug lock, so we are safe from here on */
482
	if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY)
483
		cmd.mask = policy->cpus;
484
	else
485
		cmd.mask = cpumask_of(policy->cpu);
486

487 488
	freqs.old = perf->states[perf->state].core_frequency * 1000;
	freqs.new = data->freq_table[next_state].frequency;
489
	for_each_cpu(i, policy->cpus) {
490 491
		freqs.cpu = i;
		cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
492
	}
L
Linus Torvalds 已提交
493

494
	drv_write(&cmd);
495

496
	if (acpi_pstate_strict) {
497
		if (!check_freqs(cmd.mask, freqs.new, data)) {
498
			pr_debug("acpi_cpufreq_target failed (%d)\n",
499
				policy->cpu);
500 501
			result = -EAGAIN;
			goto out;
502 503 504
		}
	}

505
	for_each_cpu(i, policy->cpus) {
506 507 508 509 510
		freqs.cpu = i;
		cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
	}
	perf->state = next_perf_state;

511
out:
512
	return result;
L
Linus Torvalds 已提交
513 514
}

515
static int acpi_cpufreq_verify(struct cpufreq_policy *policy)
L
Linus Torvalds 已提交
516
{
517
	struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
L
Linus Torvalds 已提交
518

519
	pr_debug("acpi_cpufreq_verify\n");
L
Linus Torvalds 已提交
520

521
	return cpufreq_frequency_table_verify(policy, data->freq_table);
L
Linus Torvalds 已提交
522 523 524
}

static unsigned long
525
acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
L
Linus Torvalds 已提交
526
{
527
	struct acpi_processor_performance *perf = data->acpi_data;
528

L
Linus Torvalds 已提交
529 530 531 532
	if (cpu_khz) {
		/* search the closest match to cpu_khz */
		unsigned int i;
		unsigned long freq;
533
		unsigned long freqn = perf->states[0].core_frequency * 1000;
L
Linus Torvalds 已提交
534

535
		for (i = 0; i < (perf->state_count-1); i++) {
L
Linus Torvalds 已提交
536
			freq = freqn;
537
			freqn = perf->states[i+1].core_frequency * 1000;
L
Linus Torvalds 已提交
538
			if ((2 * cpu_khz) > (freqn + freq)) {
539
				perf->state = i;
540
				return freq;
L
Linus Torvalds 已提交
541 542
			}
		}
543
		perf->state = perf->state_count-1;
544
		return freqn;
545
	} else {
L
Linus Torvalds 已提交
546
		/* assume CPU is at P0... */
547 548 549
		perf->state = 0;
		return perf->states[0].core_frequency * 1000;
	}
L
Linus Torvalds 已提交
550 551
}

552 553 554 555 556 557 558 559 560 561 562
static void free_acpi_perf_data(void)
{
	unsigned int i;

	/* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */
	for_each_possible_cpu(i)
		free_cpumask_var(per_cpu_ptr(acpi_perf_data, i)
				 ->shared_cpu_map);
	free_percpu(acpi_perf_data);
}

563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600
static int boost_notify(struct notifier_block *nb, unsigned long action,
		      void *hcpu)
{
	unsigned cpu = (long)hcpu;
	const struct cpumask *cpumask;

	cpumask = get_cpu_mask(cpu);

	/*
	 * Clear the boost-disable bit on the CPU_DOWN path so that
	 * this cpu cannot block the remaining ones from boosting. On
	 * the CPU_UP path we simply keep the boost-disable flag in
	 * sync with the current global state.
	 */

	switch (action) {
	case CPU_UP_PREPARE:
	case CPU_UP_PREPARE_FROZEN:
		boost_set_msrs(boost_enabled, cpumask);
		break;

	case CPU_DOWN_PREPARE:
	case CPU_DOWN_PREPARE_FROZEN:
		boost_set_msrs(1, cpumask);
		break;

	default:
		break;
	}

	return NOTIFY_OK;
}


static struct notifier_block boost_nb = {
	.notifier_call          = boost_notify,
};

601 602 603 604 605 606 607 608
/*
 * acpi_cpufreq_early_init - initialize ACPI P-States library
 *
 * Initialize the ACPI P-States library (drivers/acpi/processor_perflib.c)
 * in order to determine correct frequency and voltage pairings. We can
 * do _PDC and _PSD and find out the processor dependency for the
 * actual init that will happen later...
 */
609
static int __init acpi_cpufreq_early_init(void)
610
{
611
	unsigned int i;
612
	pr_debug("acpi_cpufreq_early_init\n");
613

614 615
	acpi_perf_data = alloc_percpu(struct acpi_processor_performance);
	if (!acpi_perf_data) {
616
		pr_debug("Memory allocation error for acpi_perf_data.\n");
617
		return -ENOMEM;
618
	}
619
	for_each_possible_cpu(i) {
620
		if (!zalloc_cpumask_var_node(
621 622
			&per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map,
			GFP_KERNEL, cpu_to_node(i))) {
623 624 625 626 627 628

			/* Freeing a NULL pointer is OK: alloc_percpu zeroes. */
			free_acpi_perf_data();
			return -ENOMEM;
		}
	}
629 630

	/* Do initialization in ACPI core */
631 632
	acpi_processor_preregister_performance(acpi_perf_data);
	return 0;
633 634
}

635
#ifdef CONFIG_SMP
636 637 638 639 640 641 642 643
/*
 * Some BIOSes do SW_ANY coordination internally, either set it up in hw
 * or do it in BIOS firmware and won't inform about it to OS. If not
 * detected, this has a side effect of making CPU run at a different speed
 * than OS intended it to run at. Detect it and handle it cleanly.
 */
static int bios_with_sw_any_bug;

644
static int sw_any_bug_found(const struct dmi_system_id *d)
645 646 647 648 649
{
	bios_with_sw_any_bug = 1;
	return 0;
}

650
static const struct dmi_system_id sw_any_bug_dmi_table[] = {
651 652 653 654 655 656 657 658 659 660 661
	{
		.callback = sw_any_bug_found,
		.ident = "Supermicro Server X6DLP",
		.matches = {
			DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
			DMI_MATCH(DMI_BIOS_VERSION, "080010"),
			DMI_MATCH(DMI_PRODUCT_NAME, "X6DLP"),
		},
	},
	{ }
};
662 663 664

static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
{
665 666
	/* Intel Xeon Processor 7100 Series Specification Update
	 * http://www.intel.com/Assets/PDF/specupdate/314554.pdf
667 668
	 * AL30: A Machine Check Exception (MCE) Occurring during an
	 * Enhanced Intel SpeedStep Technology Ratio Change May Cause
669
	 * Both Processor Cores to Lock Up. */
670 671 672
	if (c->x86_vendor == X86_VENDOR_INTEL) {
		if ((c->x86 == 15) &&
		    (c->x86_model == 6) &&
673 674 675 676 677
		    (c->x86_mask == 8)) {
			printk(KERN_INFO "acpi-cpufreq: Intel(R) "
			    "Xeon(R) 7100 Errata AL30, processors may "
			    "lock up on frequency changes: disabling "
			    "acpi-cpufreq.\n");
678
			return -ENODEV;
679
		    }
680 681 682
		}
	return 0;
}
683
#endif
684

685
static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
L
Linus Torvalds 已提交
686
{
687 688 689 690 691
	unsigned int i;
	unsigned int valid_states = 0;
	unsigned int cpu = policy->cpu;
	struct acpi_cpufreq_data *data;
	unsigned int result = 0;
692
	struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
693
	struct acpi_processor_performance *perf;
694 695 696
#ifdef CONFIG_SMP
	static int blacklisted;
#endif
L
Linus Torvalds 已提交
697

698
	pr_debug("acpi_cpufreq_cpu_init\n");
L
Linus Torvalds 已提交
699

700
#ifdef CONFIG_SMP
701 702 703 704 705
	if (blacklisted)
		return blacklisted;
	blacklisted = acpi_cpufreq_blacklist(c);
	if (blacklisted)
		return blacklisted;
706 707
#endif

708
	data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL);
L
Linus Torvalds 已提交
709
	if (!data)
710
		return -ENOMEM;
L
Linus Torvalds 已提交
711

712
	data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
713
	per_cpu(acfreq_data, cpu) = data;
L
Linus Torvalds 已提交
714

715
	if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
716
		acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
L
Linus Torvalds 已提交
717

718
	result = acpi_processor_register_performance(data->acpi_data, cpu);
L
Linus Torvalds 已提交
719 720 721
	if (result)
		goto err_free;

722 723
	perf = data->acpi_data;
	policy->shared_type = perf->shared_type;
724

725
	/*
726
	 * Will let policy->cpus know about dependency only when software
727 728 729
	 * coordination is required.
	 */
	if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
730
	    policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
731
		cpumask_copy(policy->cpus, perf->shared_cpu_map);
732 733 734 735
	}

#ifdef CONFIG_SMP
	dmi_check_system(sw_any_bug_dmi_table);
736
	if (bios_with_sw_any_bug && !policy_is_shared(policy)) {
737
		policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
738
		cpumask_copy(policy->cpus, cpu_core_mask(cpu));
739
	}
740 741 742 743 744 745 746

	if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) {
		cpumask_clear(policy->cpus);
		cpumask_set_cpu(cpu, policy->cpus);
		policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
		pr_info_once(PFX "overriding BIOS provided _PSD data\n");
	}
747
#endif
748

L
Linus Torvalds 已提交
749
	/* capability check */
750
	if (perf->state_count <= 1) {
751
		pr_debug("No P-States\n");
L
Linus Torvalds 已提交
752 753 754
		result = -ENODEV;
		goto err_unreg;
	}
755

756 757 758 759 760 761
	if (perf->control_register.space_id != perf->status_register.space_id) {
		result = -ENODEV;
		goto err_unreg;
	}

	switch (perf->control_register.space_id) {
762
	case ACPI_ADR_SPACE_SYSTEM_IO:
763 764 765 766 767 768
		if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
		    boot_cpu_data.x86 == 0xf) {
			pr_debug("AMD K8 systems must use native drivers.\n");
			result = -ENODEV;
			goto err_unreg;
		}
769
		pr_debug("SYSTEM IO addr space\n");
770 771
		data->cpu_feature = SYSTEM_IO_CAPABLE;
		break;
772
	case ACPI_ADR_SPACE_FIXED_HARDWARE:
773
		pr_debug("HARDWARE addr space\n");
774 775 776
		if (check_est_cpu(cpu)) {
			data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
			break;
777
		}
778 779 780 781 782 783
		if (check_amd_hwpstate_cpu(cpu)) {
			data->cpu_feature = SYSTEM_AMD_MSR_CAPABLE;
			break;
		}
		result = -ENODEV;
		goto err_unreg;
784
	default:
785
		pr_debug("Unknown addr space %d\n",
786
			(u32) (perf->control_register.space_id));
L
Linus Torvalds 已提交
787 788 789 790
		result = -ENODEV;
		goto err_unreg;
	}

791 792
	data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) *
		    (perf->state_count+1), GFP_KERNEL);
L
Linus Torvalds 已提交
793 794 795 796 797 798 799
	if (!data->freq_table) {
		result = -ENOMEM;
		goto err_unreg;
	}

	/* detect transition latency */
	policy->cpuinfo.transition_latency = 0;
800
	for (i = 0; i < perf->state_count; i++) {
801 802 803 804
		if ((perf->states[i].transition_latency * 1000) >
		    policy->cpuinfo.transition_latency)
			policy->cpuinfo.transition_latency =
			    perf->states[i].transition_latency * 1000;
L
Linus Torvalds 已提交
805 806
	}

807 808 809 810
	/* Check for high latency (>20uS) from buggy BIOSes, like on T42 */
	if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE &&
	    policy->cpuinfo.transition_latency > 20 * 1000) {
		policy->cpuinfo.transition_latency = 20 * 1000;
811 812
		printk_once(KERN_INFO
			    "P-state transition latency capped at 20 uS\n");
813 814
	}

L
Linus Torvalds 已提交
815
	/* table init */
816 817
	for (i = 0; i < perf->state_count; i++) {
		if (i > 0 && perf->states[i].core_frequency >=
818
		    data->freq_table[valid_states-1].frequency / 1000)
819 820 821 822
			continue;

		data->freq_table[valid_states].index = i;
		data->freq_table[valid_states].frequency =
823
		    perf->states[i].core_frequency * 1000;
824
		valid_states++;
L
Linus Torvalds 已提交
825
	}
826
	data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
827
	perf->state = 0;
L
Linus Torvalds 已提交
828 829

	result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
830
	if (result)
L
Linus Torvalds 已提交
831 832
		goto err_freqfree;

833 834 835
	if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq)
		printk(KERN_WARNING FW_WARN "P-state 0 is not max freq\n");

836
	switch (perf->control_register.space_id) {
837
	case ACPI_ADR_SPACE_SYSTEM_IO:
838 839 840
		/* Current speed is unknown and not detectable by IO port */
		policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
		break;
841
	case ACPI_ADR_SPACE_FIXED_HARDWARE:
842
		acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
843
		policy->cur = get_cur_freq_on_cpu(cpu);
844
		break;
845
	default:
846 847 848
		break;
	}

L
Linus Torvalds 已提交
849 850 851
	/* notify BIOS that we exist */
	acpi_processor_notify_smm(THIS_MODULE);

852
	/* Check for APERF/MPERF support in hardware */
853
	if (boot_cpu_has(X86_FEATURE_APERFMPERF))
854
		acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
855

856
	pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
857
	for (i = 0; i < perf->state_count; i++)
858
		pr_debug("     %cP%d: %d MHz, %d mW, %d uS\n",
859
			(i == perf->state ? '*' : ' '), i,
860 861 862
			(u32) perf->states[i].core_frequency,
			(u32) perf->states[i].power,
			(u32) perf->states[i].transition_latency);
L
Linus Torvalds 已提交
863 864

	cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
865

866 867 868 869 870
	/*
	 * the first call to ->target() should result in us actually
	 * writing something to the appropriate registers.
	 */
	data->resume = 1;
871

872
	return result;
L
Linus Torvalds 已提交
873

874
err_freqfree:
L
Linus Torvalds 已提交
875
	kfree(data->freq_table);
876
err_unreg:
877
	acpi_processor_unregister_performance(perf, cpu);
878
err_free:
L
Linus Torvalds 已提交
879
	kfree(data);
880
	per_cpu(acfreq_data, cpu) = NULL;
L
Linus Torvalds 已提交
881

882
	return result;
L
Linus Torvalds 已提交
883 884
}

885
static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
L
Linus Torvalds 已提交
886
{
887
	struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
L
Linus Torvalds 已提交
888

889
	pr_debug("acpi_cpufreq_cpu_exit\n");
L
Linus Torvalds 已提交
890 891 892

	if (data) {
		cpufreq_frequency_table_put_attr(policy->cpu);
893
		per_cpu(acfreq_data, policy->cpu) = NULL;
894 895
		acpi_processor_unregister_performance(data->acpi_data,
						      policy->cpu);
896
		kfree(data->freq_table);
L
Linus Torvalds 已提交
897 898 899
		kfree(data);
	}

900
	return 0;
L
Linus Torvalds 已提交
901 902
}

903
static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
L
Linus Torvalds 已提交
904
{
905
	struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
L
Linus Torvalds 已提交
906

907
	pr_debug("acpi_cpufreq_resume\n");
L
Linus Torvalds 已提交
908 909 910

	data->resume = 1;

911
	return 0;
L
Linus Torvalds 已提交
912 913
}

914
static struct freq_attr *acpi_cpufreq_attr[] = {
L
Linus Torvalds 已提交
915
	&cpufreq_freq_attr_scaling_available_freqs,
916
	NULL,	/* this is a placeholder for cpb, do not remove */
L
Linus Torvalds 已提交
917 918 919 920
	NULL,
};

static struct cpufreq_driver acpi_cpufreq_driver = {
921 922 923 924 925 926 927 928 929
	.verify		= acpi_cpufreq_verify,
	.target		= acpi_cpufreq_target,
	.bios_limit	= acpi_processor_get_bios_limit,
	.init		= acpi_cpufreq_cpu_init,
	.exit		= acpi_cpufreq_cpu_exit,
	.resume		= acpi_cpufreq_resume,
	.name		= "acpi-cpufreq",
	.owner		= THIS_MODULE,
	.attr		= acpi_cpufreq_attr,
L
Linus Torvalds 已提交
930 931
};

932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974
static void __init acpi_cpufreq_boost_init(void)
{
	if (boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA)) {
		msrs = msrs_alloc();

		if (!msrs)
			return;

		boost_supported = true;
		boost_enabled = boost_state(0);

		get_online_cpus();

		/* Force all MSRs to the same value */
		boost_set_msrs(boost_enabled, cpu_online_mask);

		register_cpu_notifier(&boost_nb);

		put_online_cpus();
	} else
		global_boost.attr.mode = 0444;

	/* We create the boost file in any case, though for systems without
	 * hardware support it will be read-only and hardwired to return 0.
	 */
	if (sysfs_create_file(cpufreq_global_kobject, &(global_boost.attr)))
		pr_warn(PFX "could not register global boost sysfs file\n");
	else
		pr_debug("registered global boost sysfs file\n");
}

static void __exit acpi_cpufreq_boost_exit(void)
{
	sysfs_remove_file(cpufreq_global_kobject, &(global_boost.attr));

	if (msrs) {
		unregister_cpu_notifier(&boost_nb);

		msrs_free(msrs);
		msrs = NULL;
	}
}

975
static int __init acpi_cpufreq_init(void)
L
Linus Torvalds 已提交
976
{
977 978
	int ret;

979 980 981
	if (acpi_disabled)
		return 0;

982
	pr_debug("acpi_cpufreq_init\n");
L
Linus Torvalds 已提交
983

984 985 986
	ret = acpi_cpufreq_early_init();
	if (ret)
		return ret;
987

988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008
#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
	/* this is a sysfs file with a strange name and an even stranger
	 * semantic - per CPU instantiation, but system global effect.
	 * Lets enable it only on AMD CPUs for compatibility reasons and
	 * only if configured. This is considered legacy code, which
	 * will probably be removed at some point in the future.
	 */
	if (check_amd_hwpstate_cpu(0)) {
		struct freq_attr **iter;

		pr_debug("adding sysfs entry for cpb\n");

		for (iter = acpi_cpufreq_attr; *iter != NULL; iter++)
			;

		/* make sure there is a terminator behind it */
		if (iter[1] == NULL)
			*iter = &cpb;
	}
#endif

1009 1010
	ret = cpufreq_register_driver(&acpi_cpufreq_driver);
	if (ret)
1011
		free_acpi_perf_data();
1012 1013
	else
		acpi_cpufreq_boost_init();
1014 1015

	return ret;
L
Linus Torvalds 已提交
1016 1017
}

1018
static void __exit acpi_cpufreq_exit(void)
L
Linus Torvalds 已提交
1019
{
1020
	pr_debug("acpi_cpufreq_exit\n");
L
Linus Torvalds 已提交
1021

1022 1023
	acpi_cpufreq_boost_exit();

L
Linus Torvalds 已提交
1024 1025
	cpufreq_unregister_driver(&acpi_cpufreq_driver);

1026
	free_acpi_perf_data();
L
Linus Torvalds 已提交
1027 1028
}

1029
module_param(acpi_pstate_strict, uint, 0644);
1030
MODULE_PARM_DESC(acpi_pstate_strict,
1031 1032
	"value 0 or non-zero. non-zero -> strict ACPI checks are "
	"performed during frequency changes.");
L
Linus Torvalds 已提交
1033 1034 1035 1036

late_initcall(acpi_cpufreq_init);
module_exit(acpi_cpufreq_exit);

1037 1038 1039 1040 1041 1042 1043
static const struct x86_cpu_id acpi_cpufreq_ids[] = {
	X86_FEATURE_MATCH(X86_FEATURE_ACPI),
	X86_FEATURE_MATCH(X86_FEATURE_HW_PSTATE),
	{}
};
MODULE_DEVICE_TABLE(x86cpu, acpi_cpufreq_ids);

L
Linus Torvalds 已提交
1044
MODULE_ALIAS("acpi");