acpi-cpufreq.c 25.8 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 * acpi-cpufreq.c - ACPI Processor P-States Driver
L
Linus Torvalds 已提交
3 4 5 6
 *
 *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
 *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
 *  Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de>
7
 *  Copyright (C) 2006       Denis Sadykov <denis.m.sadykov@intel.com>
L
Linus Torvalds 已提交
8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
 *
 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 *
 *  This program is free software; you can redistribute it and/or modify
 *  it under the terms of the GNU General Public License as published by
 *  the Free Software Foundation; either version 2 of the License, or (at
 *  your option) any later version.
 *
 *  This program is distributed in the hope that it will be useful, but
 *  WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 *  General Public License for more details.
 *
 *  You should have received a copy of the GNU General Public License along
 *  with this program; if not, write to the Free Software Foundation, Inc.,
 *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
 *
 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 */

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
31 32
#include <linux/smp.h>
#include <linux/sched.h>
L
Linus Torvalds 已提交
33
#include <linux/cpufreq.h>
34
#include <linux/compiler.h>
35
#include <linux/dmi.h>
36
#include <linux/slab.h>
L
Linus Torvalds 已提交
37 38

#include <linux/acpi.h>
39 40 41 42
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/uaccess.h>

L
Linus Torvalds 已提交
43 44
#include <acpi/processor.h>

45
#include <asm/msr.h>
46 47 48
#include <asm/processor.h>
#include <asm/cpufeature.h>

L
Linus Torvalds 已提交
49 50 51 52
MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
MODULE_DESCRIPTION("ACPI Processor P-States Driver");
MODULE_LICENSE("GPL");

53 54
#define PFX "acpi-cpufreq: "

55 56 57
enum {
	UNDEFINED_CAPABLE = 0,
	SYSTEM_INTEL_MSR_CAPABLE,
58
	SYSTEM_AMD_MSR_CAPABLE,
59 60 61 62
	SYSTEM_IO_CAPABLE,
};

#define INTEL_MSR_RANGE		(0xffff)
63
#define AMD_MSR_RANGE		(0x7)
64

65 66
#define MSR_K7_HWCR_CPB_DIS	(1ULL << 25)

67
struct acpi_cpufreq_data {
68 69 70 71
	struct acpi_processor_performance *acpi_data;
	struct cpufreq_frequency_table *freq_table;
	unsigned int resume;
	unsigned int cpu_feature;
72
	cpumask_var_t freqdomain_cpus;
L
Linus Torvalds 已提交
73 74
};

75
static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data);
76

77
/* acpi_perf_data is a pointer to percpu data. */
78
static struct acpi_processor_performance __percpu *acpi_perf_data;
L
Linus Torvalds 已提交
79 80 81

static struct cpufreq_driver acpi_cpufreq_driver;

82
static unsigned int acpi_pstate_strict;
83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
static bool boost_enabled, boost_supported;
static struct msr __percpu *msrs;

static bool boost_state(unsigned int cpu)
{
	u32 lo, hi;
	u64 msr;

	switch (boot_cpu_data.x86_vendor) {
	case X86_VENDOR_INTEL:
		rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi);
		msr = lo | ((u64)hi << 32);
		return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
	case X86_VENDOR_AMD:
		rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
		msr = lo | ((u64)hi << 32);
		return !(msr & MSR_K7_HWCR_CPB_DIS);
	}
	return false;
}

static void boost_set_msrs(bool enable, const struct cpumask *cpumask)
{
	u32 cpu;
	u32 msr_addr;
	u64 msr_mask;

	switch (boot_cpu_data.x86_vendor) {
	case X86_VENDOR_INTEL:
		msr_addr = MSR_IA32_MISC_ENABLE;
		msr_mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE;
		break;
	case X86_VENDOR_AMD:
		msr_addr = MSR_K7_HWCR;
		msr_mask = MSR_K7_HWCR_CPB_DIS;
		break;
	default:
		return;
	}

	rdmsr_on_cpus(cpumask, msr_addr, msrs);

	for_each_cpu(cpu, cpumask) {
		struct msr *reg = per_cpu_ptr(msrs, cpu);
		if (enable)
			reg->q &= ~msr_mask;
		else
			reg->q |= msr_mask;
	}

	wrmsr_on_cpus(cpumask, msr_addr, msrs);
}

136
static ssize_t _store_boost(const char *buf, size_t count)
137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162
{
	int ret;
	unsigned long val = 0;

	if (!boost_supported)
		return -EINVAL;

	ret = kstrtoul(buf, 10, &val);
	if (ret || (val > 1))
		return -EINVAL;

	if ((val && boost_enabled) || (!val && !boost_enabled))
		return count;

	get_online_cpus();

	boost_set_msrs(val, cpu_online_mask);

	put_online_cpus();

	boost_enabled = val;
	pr_debug("Core Boosting %sabled.\n", val ? "en" : "dis");

	return count;
}

163 164 165 166 167 168
static ssize_t store_global_boost(struct kobject *kobj, struct attribute *attr,
				  const char *buf, size_t count)
{
	return _store_boost(buf, count);
}

169 170 171 172 173 174 175 176 177
static ssize_t show_global_boost(struct kobject *kobj,
				 struct attribute *attr, char *buf)
{
	return sprintf(buf, "%u\n", boost_enabled);
}

static struct global_attr global_boost = __ATTR(boost, 0644,
						show_global_boost,
						store_global_boost);
178

179 180 181 182 183 184 185 186 187
static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf)
{
	struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);

	return cpufreq_show_cpus(data->freqdomain_cpus, buf);
}

cpufreq_freq_attr_ro(freqdomain_cpus);

188 189 190 191 192 193 194 195 196 197 198 199
#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf,
			 size_t count)
{
	return _store_boost(buf, count);
}

static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf)
{
	return sprintf(buf, "%u\n", boost_enabled);
}

200
cpufreq_freq_attr_rw(cpb);
201 202
#endif

203 204
static int check_est_cpu(unsigned int cpuid)
{
205
	struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
206

207
	return cpu_has(cpu, X86_FEATURE_EST);
208 209
}

210 211 212 213 214 215 216
static int check_amd_hwpstate_cpu(unsigned int cpuid)
{
	struct cpuinfo_x86 *cpu = &cpu_data(cpuid);

	return cpu_has(cpu, X86_FEATURE_HW_PSTATE);
}

217
static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data)
218
{
219 220
	struct acpi_processor_performance *perf;
	int i;
221 222 223

	perf = data->acpi_data;

224
	for (i = 0; i < perf->state_count; i++) {
225 226 227 228 229 230
		if (value == perf->states[i].status)
			return data->freq_table[i].frequency;
	}
	return 0;
}

231 232 233
static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
{
	int i;
234
	struct acpi_processor_performance *perf;
235

236 237 238 239 240
	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
		msr &= AMD_MSR_RANGE;
	else
		msr &= INTEL_MSR_RANGE;

241 242
	perf = data->acpi_data;

243
	for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
244
		if (msr == perf->states[data->freq_table[i].driver_data].status)
245 246 247 248 249 250 251 252
			return data->freq_table[i].frequency;
	}
	return data->freq_table[0].frequency;
}

static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data)
{
	switch (data->cpu_feature) {
253
	case SYSTEM_INTEL_MSR_CAPABLE:
254
	case SYSTEM_AMD_MSR_CAPABLE:
255
		return extract_msr(val, data);
256
	case SYSTEM_IO_CAPABLE:
257
		return extract_io(val, data);
258
	default:
259 260 261 262 263 264 265 266
		return 0;
	}
}

struct msr_addr {
	u32 reg;
};

267 268 269 270 271 272
struct io_addr {
	u16 port;
	u8 bit_width;
};

struct drv_cmd {
273
	unsigned int type;
274
	const struct cpumask *mask;
275 276 277 278
	union {
		struct msr_addr msr;
		struct io_addr io;
	} addr;
279 280 281
	u32 val;
};

282 283
/* Called via smp_call_function_single(), on the target CPU */
static void do_drv_read(void *_cmd)
L
Linus Torvalds 已提交
284
{
285
	struct drv_cmd *cmd = _cmd;
286 287 288
	u32 h;

	switch (cmd->type) {
289
	case SYSTEM_INTEL_MSR_CAPABLE:
290
	case SYSTEM_AMD_MSR_CAPABLE:
291 292
		rdmsr(cmd->addr.msr.reg, cmd->val, h);
		break;
293
	case SYSTEM_IO_CAPABLE:
294 295 296
		acpi_os_read_port((acpi_io_address)cmd->addr.io.port,
				&cmd->val,
				(u32)cmd->addr.io.bit_width);
297
		break;
298
	default:
299 300
		break;
	}
301
}
L
Linus Torvalds 已提交
302

303 304
/* Called via smp_call_function_many(), on the target CPUs */
static void do_drv_write(void *_cmd)
305
{
306
	struct drv_cmd *cmd = _cmd;
307
	u32 lo, hi;
308 309

	switch (cmd->type) {
310
	case SYSTEM_INTEL_MSR_CAPABLE:
311 312 313
		rdmsr(cmd->addr.msr.reg, lo, hi);
		lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE);
		wrmsr(cmd->addr.msr.reg, lo, hi);
314
		break;
315 316 317
	case SYSTEM_AMD_MSR_CAPABLE:
		wrmsr(cmd->addr.msr.reg, cmd->val, 0);
		break;
318
	case SYSTEM_IO_CAPABLE:
319 320 321
		acpi_os_write_port((acpi_io_address)cmd->addr.io.port,
				cmd->val,
				(u32)cmd->addr.io.bit_width);
322
		break;
323
	default:
324 325
		break;
	}
326
}
L
Linus Torvalds 已提交
327

328
static void drv_read(struct drv_cmd *cmd)
329
{
330
	int err;
331 332
	cmd->val = 0;

333 334
	err = smp_call_function_any(cmd->mask, do_drv_read, cmd, 1);
	WARN_ON_ONCE(err);	/* smp_call_function_any() was buggy? */
335 336 337 338
}

static void drv_write(struct drv_cmd *cmd)
{
339 340 341 342 343
	int this_cpu;

	this_cpu = get_cpu();
	if (cpumask_test_cpu(this_cpu, cmd->mask))
		do_drv_write(cmd);
344
	smp_call_function_many(cmd->mask, do_drv_write, cmd, 1);
345
	put_cpu();
346
}
L
Linus Torvalds 已提交
347

348
static u32 get_cur_val(const struct cpumask *mask)
349
{
350 351
	struct acpi_processor_performance *perf;
	struct drv_cmd cmd;
L
Linus Torvalds 已提交
352

353
	if (unlikely(cpumask_empty(mask)))
354
		return 0;
L
Linus Torvalds 已提交
355

356
	switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) {
357 358
	case SYSTEM_INTEL_MSR_CAPABLE:
		cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
359
		cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
360
		break;
361 362
	case SYSTEM_AMD_MSR_CAPABLE:
		cmd.type = SYSTEM_AMD_MSR_CAPABLE;
363
		cmd.addr.msr.reg = MSR_AMD_PERF_CTL;
364
		break;
365 366
	case SYSTEM_IO_CAPABLE:
		cmd.type = SYSTEM_IO_CAPABLE;
367
		perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data;
368 369 370 371 372 373 374
		cmd.addr.io.port = perf->control_register.address;
		cmd.addr.io.bit_width = perf->control_register.bit_width;
		break;
	default:
		return 0;
	}

375
	cmd.mask = mask;
376
	drv_read(&cmd);
L
Linus Torvalds 已提交
377

378
	pr_debug("get_cur_val = %u\n", cmd.val);
379 380 381

	return cmd.val;
}
L
Linus Torvalds 已提交
382

383 384
static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
{
385
	struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu);
386
	unsigned int freq;
387
	unsigned int cached_freq;
388

389
	pr_debug("get_cur_freq_on_cpu (%d)\n", cpu);
390 391

	if (unlikely(data == NULL ||
392
		     data->acpi_data == NULL || data->freq_table == NULL)) {
393
		return 0;
L
Linus Torvalds 已提交
394 395
	}

396
	cached_freq = data->freq_table[data->acpi_data->state].frequency;
397
	freq = extract_freq(get_cur_val(cpumask_of(cpu)), data);
398 399 400 401 402 403 404 405
	if (freq != cached_freq) {
		/*
		 * The dreaded BIOS frequency change behind our back.
		 * Force set the frequency on next target call.
		 */
		data->resume = 1;
	}

406
	pr_debug("cur freq = %u\n", freq);
L
Linus Torvalds 已提交
407

408
	return freq;
L
Linus Torvalds 已提交
409 410
}

411
static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq,
412
				struct acpi_cpufreq_data *data)
413
{
414 415
	unsigned int cur_freq;
	unsigned int i;
L
Linus Torvalds 已提交
416

417
	for (i = 0; i < 100; i++) {
418 419 420 421 422 423 424 425 426
		cur_freq = extract_freq(get_cur_val(mask), data);
		if (cur_freq == freq)
			return 1;
		udelay(10);
	}
	return 0;
}

static int acpi_cpufreq_target(struct cpufreq_policy *policy,
427
			       unsigned int target_freq, unsigned int relation)
L
Linus Torvalds 已提交
428
{
429
	struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
430 431 432
	struct acpi_processor_performance *perf;
	struct cpufreq_freqs freqs;
	struct drv_cmd cmd;
433 434
	unsigned int next_state = 0; /* Index into freq_table */
	unsigned int next_perf_state = 0; /* Index into perf table */
435
	int result = 0;
436

437
	pr_debug("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu);
438 439

	if (unlikely(data == NULL ||
440
	     data->acpi_data == NULL || data->freq_table == NULL)) {
441 442
		return -ENODEV;
	}
L
Linus Torvalds 已提交
443

444
	perf = data->acpi_data;
L
Linus Torvalds 已提交
445
	result = cpufreq_frequency_table_target(policy,
446 447 448
						data->freq_table,
						target_freq,
						relation, &next_state);
449 450 451 452
	if (unlikely(result)) {
		result = -ENODEV;
		goto out;
	}
L
Linus Torvalds 已提交
453

454
	next_perf_state = data->freq_table[next_state].driver_data;
455
	if (perf->state == next_perf_state) {
456
		if (unlikely(data->resume)) {
457
			pr_debug("Called after resume, resetting to P%d\n",
458
				next_perf_state);
459 460
			data->resume = 0;
		} else {
461
			pr_debug("Already at target state (P%d)\n",
462
				next_perf_state);
463
			goto out;
464
		}
465 466
	}

467 468 469 470
	switch (data->cpu_feature) {
	case SYSTEM_INTEL_MSR_CAPABLE:
		cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
		cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
471
		cmd.val = (u32) perf->states[next_perf_state].control;
472
		break;
473 474 475 476 477
	case SYSTEM_AMD_MSR_CAPABLE:
		cmd.type = SYSTEM_AMD_MSR_CAPABLE;
		cmd.addr.msr.reg = MSR_AMD_PERF_CTL;
		cmd.val = (u32) perf->states[next_perf_state].control;
		break;
478 479 480 481 482 483 484
	case SYSTEM_IO_CAPABLE:
		cmd.type = SYSTEM_IO_CAPABLE;
		cmd.addr.io.port = perf->control_register.address;
		cmd.addr.io.bit_width = perf->control_register.bit_width;
		cmd.val = (u32) perf->states[next_perf_state].control;
		break;
	default:
485 486
		result = -ENODEV;
		goto out;
487
	}
488

489
	/* cpufreq holds the hotplug lock, so we are safe from here on */
490
	if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY)
491
		cmd.mask = policy->cpus;
492
	else
493
		cmd.mask = cpumask_of(policy->cpu);
494

495 496
	freqs.old = perf->states[perf->state].core_frequency * 1000;
	freqs.new = data->freq_table[next_state].frequency;
497
	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
L
Linus Torvalds 已提交
498

499
	drv_write(&cmd);
500

501
	if (acpi_pstate_strict) {
502
		if (!check_freqs(cmd.mask, freqs.new, data)) {
503
			pr_debug("acpi_cpufreq_target failed (%d)\n",
504
				policy->cpu);
505
			result = -EAGAIN;
506
			freqs.new = freqs.old;
507 508 509
		}
	}

510
	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
511 512 513

	if (!result)
		perf->state = next_perf_state;
514

515
out:
516
	return result;
L
Linus Torvalds 已提交
517 518
}

519
static int acpi_cpufreq_verify(struct cpufreq_policy *policy)
L
Linus Torvalds 已提交
520
{
521
	struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
L
Linus Torvalds 已提交
522

523
	pr_debug("acpi_cpufreq_verify\n");
L
Linus Torvalds 已提交
524

525
	return cpufreq_frequency_table_verify(policy, data->freq_table);
L
Linus Torvalds 已提交
526 527 528
}

static unsigned long
529
acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
L
Linus Torvalds 已提交
530
{
531
	struct acpi_processor_performance *perf = data->acpi_data;
532

L
Linus Torvalds 已提交
533 534 535 536
	if (cpu_khz) {
		/* search the closest match to cpu_khz */
		unsigned int i;
		unsigned long freq;
537
		unsigned long freqn = perf->states[0].core_frequency * 1000;
L
Linus Torvalds 已提交
538

539
		for (i = 0; i < (perf->state_count-1); i++) {
L
Linus Torvalds 已提交
540
			freq = freqn;
541
			freqn = perf->states[i+1].core_frequency * 1000;
L
Linus Torvalds 已提交
542
			if ((2 * cpu_khz) > (freqn + freq)) {
543
				perf->state = i;
544
				return freq;
L
Linus Torvalds 已提交
545 546
			}
		}
547
		perf->state = perf->state_count-1;
548
		return freqn;
549
	} else {
L
Linus Torvalds 已提交
550
		/* assume CPU is at P0... */
551 552 553
		perf->state = 0;
		return perf->states[0].core_frequency * 1000;
	}
L
Linus Torvalds 已提交
554 555
}

556 557 558 559 560 561 562 563 564 565 566
static void free_acpi_perf_data(void)
{
	unsigned int i;

	/* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */
	for_each_possible_cpu(i)
		free_cpumask_var(per_cpu_ptr(acpi_perf_data, i)
				 ->shared_cpu_map);
	free_percpu(acpi_perf_data);
}

567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604
static int boost_notify(struct notifier_block *nb, unsigned long action,
		      void *hcpu)
{
	unsigned cpu = (long)hcpu;
	const struct cpumask *cpumask;

	cpumask = get_cpu_mask(cpu);

	/*
	 * Clear the boost-disable bit on the CPU_DOWN path so that
	 * this cpu cannot block the remaining ones from boosting. On
	 * the CPU_UP path we simply keep the boost-disable flag in
	 * sync with the current global state.
	 */

	switch (action) {
	case CPU_UP_PREPARE:
	case CPU_UP_PREPARE_FROZEN:
		boost_set_msrs(boost_enabled, cpumask);
		break;

	case CPU_DOWN_PREPARE:
	case CPU_DOWN_PREPARE_FROZEN:
		boost_set_msrs(1, cpumask);
		break;

	default:
		break;
	}

	return NOTIFY_OK;
}


static struct notifier_block boost_nb = {
	.notifier_call          = boost_notify,
};

605 606 607 608 609 610 611 612
/*
 * acpi_cpufreq_early_init - initialize ACPI P-States library
 *
 * Initialize the ACPI P-States library (drivers/acpi/processor_perflib.c)
 * in order to determine correct frequency and voltage pairings. We can
 * do _PDC and _PSD and find out the processor dependency for the
 * actual init that will happen later...
 */
613
static int __init acpi_cpufreq_early_init(void)
614
{
615
	unsigned int i;
616
	pr_debug("acpi_cpufreq_early_init\n");
617

618 619
	acpi_perf_data = alloc_percpu(struct acpi_processor_performance);
	if (!acpi_perf_data) {
620
		pr_debug("Memory allocation error for acpi_perf_data.\n");
621
		return -ENOMEM;
622
	}
623
	for_each_possible_cpu(i) {
624
		if (!zalloc_cpumask_var_node(
625 626
			&per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map,
			GFP_KERNEL, cpu_to_node(i))) {
627 628 629 630 631 632

			/* Freeing a NULL pointer is OK: alloc_percpu zeroes. */
			free_acpi_perf_data();
			return -ENOMEM;
		}
	}
633 634

	/* Do initialization in ACPI core */
635 636
	acpi_processor_preregister_performance(acpi_perf_data);
	return 0;
637 638
}

639
#ifdef CONFIG_SMP
640 641 642 643 644 645 646 647
/*
 * Some BIOSes do SW_ANY coordination internally, either set it up in hw
 * or do it in BIOS firmware and won't inform about it to OS. If not
 * detected, this has a side effect of making CPU run at a different speed
 * than OS intended it to run at. Detect it and handle it cleanly.
 */
static int bios_with_sw_any_bug;

648
static int sw_any_bug_found(const struct dmi_system_id *d)
649 650 651 652 653
{
	bios_with_sw_any_bug = 1;
	return 0;
}

654
static const struct dmi_system_id sw_any_bug_dmi_table[] = {
655 656 657 658 659 660 661 662 663 664 665
	{
		.callback = sw_any_bug_found,
		.ident = "Supermicro Server X6DLP",
		.matches = {
			DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
			DMI_MATCH(DMI_BIOS_VERSION, "080010"),
			DMI_MATCH(DMI_PRODUCT_NAME, "X6DLP"),
		},
	},
	{ }
};
666 667 668

static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
{
669 670
	/* Intel Xeon Processor 7100 Series Specification Update
	 * http://www.intel.com/Assets/PDF/specupdate/314554.pdf
671 672
	 * AL30: A Machine Check Exception (MCE) Occurring during an
	 * Enhanced Intel SpeedStep Technology Ratio Change May Cause
673
	 * Both Processor Cores to Lock Up. */
674 675 676
	if (c->x86_vendor == X86_VENDOR_INTEL) {
		if ((c->x86 == 15) &&
		    (c->x86_model == 6) &&
677 678 679 680 681
		    (c->x86_mask == 8)) {
			printk(KERN_INFO "acpi-cpufreq: Intel(R) "
			    "Xeon(R) 7100 Errata AL30, processors may "
			    "lock up on frequency changes: disabling "
			    "acpi-cpufreq.\n");
682
			return -ENODEV;
683
		    }
684 685 686
		}
	return 0;
}
687
#endif
688

689
static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
L
Linus Torvalds 已提交
690
{
691 692 693 694 695
	unsigned int i;
	unsigned int valid_states = 0;
	unsigned int cpu = policy->cpu;
	struct acpi_cpufreq_data *data;
	unsigned int result = 0;
696
	struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
697
	struct acpi_processor_performance *perf;
698 699 700
#ifdef CONFIG_SMP
	static int blacklisted;
#endif
L
Linus Torvalds 已提交
701

702
	pr_debug("acpi_cpufreq_cpu_init\n");
L
Linus Torvalds 已提交
703

704
#ifdef CONFIG_SMP
705 706 707 708 709
	if (blacklisted)
		return blacklisted;
	blacklisted = acpi_cpufreq_blacklist(c);
	if (blacklisted)
		return blacklisted;
710 711
#endif

712
	data = kzalloc(sizeof(*data), GFP_KERNEL);
L
Linus Torvalds 已提交
713
	if (!data)
714
		return -ENOMEM;
L
Linus Torvalds 已提交
715

716 717 718 719 720
	if (!zalloc_cpumask_var(&data->freqdomain_cpus, GFP_KERNEL)) {
		result = -ENOMEM;
		goto err_free;
	}

721
	data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
722
	per_cpu(acfreq_data, cpu) = data;
L
Linus Torvalds 已提交
723

724
	if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
725
		acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
L
Linus Torvalds 已提交
726

727
	result = acpi_processor_register_performance(data->acpi_data, cpu);
L
Linus Torvalds 已提交
728
	if (result)
729
		goto err_free_mask;
L
Linus Torvalds 已提交
730

731 732
	perf = data->acpi_data;
	policy->shared_type = perf->shared_type;
733

734
	/*
735
	 * Will let policy->cpus know about dependency only when software
736 737 738
	 * coordination is required.
	 */
	if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
739
	    policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
740
		cpumask_copy(policy->cpus, perf->shared_cpu_map);
741
	}
742
	cpumask_copy(data->freqdomain_cpus, perf->shared_cpu_map);
743 744 745

#ifdef CONFIG_SMP
	dmi_check_system(sw_any_bug_dmi_table);
746
	if (bios_with_sw_any_bug && !policy_is_shared(policy)) {
747
		policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
748
		cpumask_copy(policy->cpus, cpu_core_mask(cpu));
749
	}
750 751 752 753

	if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) {
		cpumask_clear(policy->cpus);
		cpumask_set_cpu(cpu, policy->cpus);
754
		cpumask_copy(data->freqdomain_cpus, cpu_sibling_mask(cpu));
755 756 757
		policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
		pr_info_once(PFX "overriding BIOS provided _PSD data\n");
	}
758
#endif
759

L
Linus Torvalds 已提交
760
	/* capability check */
761
	if (perf->state_count <= 1) {
762
		pr_debug("No P-States\n");
L
Linus Torvalds 已提交
763 764 765
		result = -ENODEV;
		goto err_unreg;
	}
766

767 768 769 770 771 772
	if (perf->control_register.space_id != perf->status_register.space_id) {
		result = -ENODEV;
		goto err_unreg;
	}

	switch (perf->control_register.space_id) {
773
	case ACPI_ADR_SPACE_SYSTEM_IO:
774 775 776 777 778 779
		if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
		    boot_cpu_data.x86 == 0xf) {
			pr_debug("AMD K8 systems must use native drivers.\n");
			result = -ENODEV;
			goto err_unreg;
		}
780
		pr_debug("SYSTEM IO addr space\n");
781 782
		data->cpu_feature = SYSTEM_IO_CAPABLE;
		break;
783
	case ACPI_ADR_SPACE_FIXED_HARDWARE:
784
		pr_debug("HARDWARE addr space\n");
785 786 787
		if (check_est_cpu(cpu)) {
			data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
			break;
788
		}
789 790 791 792 793 794
		if (check_amd_hwpstate_cpu(cpu)) {
			data->cpu_feature = SYSTEM_AMD_MSR_CAPABLE;
			break;
		}
		result = -ENODEV;
		goto err_unreg;
795
	default:
796
		pr_debug("Unknown addr space %d\n",
797
			(u32) (perf->control_register.space_id));
L
Linus Torvalds 已提交
798 799 800 801
		result = -ENODEV;
		goto err_unreg;
	}

802
	data->freq_table = kmalloc(sizeof(*data->freq_table) *
803
		    (perf->state_count+1), GFP_KERNEL);
L
Linus Torvalds 已提交
804 805 806 807 808 809 810
	if (!data->freq_table) {
		result = -ENOMEM;
		goto err_unreg;
	}

	/* detect transition latency */
	policy->cpuinfo.transition_latency = 0;
811
	for (i = 0; i < perf->state_count; i++) {
812 813 814 815
		if ((perf->states[i].transition_latency * 1000) >
		    policy->cpuinfo.transition_latency)
			policy->cpuinfo.transition_latency =
			    perf->states[i].transition_latency * 1000;
L
Linus Torvalds 已提交
816 817
	}

818 819 820 821
	/* Check for high latency (>20uS) from buggy BIOSes, like on T42 */
	if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE &&
	    policy->cpuinfo.transition_latency > 20 * 1000) {
		policy->cpuinfo.transition_latency = 20 * 1000;
822 823
		printk_once(KERN_INFO
			    "P-state transition latency capped at 20 uS\n");
824 825
	}

L
Linus Torvalds 已提交
826
	/* table init */
827 828
	for (i = 0; i < perf->state_count; i++) {
		if (i > 0 && perf->states[i].core_frequency >=
829
		    data->freq_table[valid_states-1].frequency / 1000)
830 831
			continue;

832
		data->freq_table[valid_states].driver_data = i;
833
		data->freq_table[valid_states].frequency =
834
		    perf->states[i].core_frequency * 1000;
835
		valid_states++;
L
Linus Torvalds 已提交
836
	}
837
	data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
838
	perf->state = 0;
L
Linus Torvalds 已提交
839 840

	result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
841
	if (result)
L
Linus Torvalds 已提交
842 843
		goto err_freqfree;

844 845 846
	if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq)
		printk(KERN_WARNING FW_WARN "P-state 0 is not max freq\n");

847
	switch (perf->control_register.space_id) {
848
	case ACPI_ADR_SPACE_SYSTEM_IO:
849 850 851
		/* Current speed is unknown and not detectable by IO port */
		policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
		break;
852
	case ACPI_ADR_SPACE_FIXED_HARDWARE:
853
		acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
854
		policy->cur = get_cur_freq_on_cpu(cpu);
855
		break;
856
	default:
857 858 859
		break;
	}

L
Linus Torvalds 已提交
860 861 862
	/* notify BIOS that we exist */
	acpi_processor_notify_smm(THIS_MODULE);

863
	pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
864
	for (i = 0; i < perf->state_count; i++)
865
		pr_debug("     %cP%d: %d MHz, %d mW, %d uS\n",
866
			(i == perf->state ? '*' : ' '), i,
867 868 869
			(u32) perf->states[i].core_frequency,
			(u32) perf->states[i].power,
			(u32) perf->states[i].transition_latency);
L
Linus Torvalds 已提交
870 871

	cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
872

873 874 875 876 877
	/*
	 * the first call to ->target() should result in us actually
	 * writing something to the appropriate registers.
	 */
	data->resume = 1;
878

879
	return result;
L
Linus Torvalds 已提交
880

881
err_freqfree:
L
Linus Torvalds 已提交
882
	kfree(data->freq_table);
883
err_unreg:
884
	acpi_processor_unregister_performance(perf, cpu);
885 886
err_free_mask:
	free_cpumask_var(data->freqdomain_cpus);
887
err_free:
L
Linus Torvalds 已提交
888
	kfree(data);
889
	per_cpu(acfreq_data, cpu) = NULL;
L
Linus Torvalds 已提交
890

891
	return result;
L
Linus Torvalds 已提交
892 893
}

894
static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
L
Linus Torvalds 已提交
895
{
896
	struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
L
Linus Torvalds 已提交
897

898
	pr_debug("acpi_cpufreq_cpu_exit\n");
L
Linus Torvalds 已提交
899 900 901

	if (data) {
		cpufreq_frequency_table_put_attr(policy->cpu);
902
		per_cpu(acfreq_data, policy->cpu) = NULL;
903 904
		acpi_processor_unregister_performance(data->acpi_data,
						      policy->cpu);
905
		free_cpumask_var(data->freqdomain_cpus);
906
		kfree(data->freq_table);
L
Linus Torvalds 已提交
907 908 909
		kfree(data);
	}

910
	return 0;
L
Linus Torvalds 已提交
911 912
}

913
static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
L
Linus Torvalds 已提交
914
{
915
	struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
L
Linus Torvalds 已提交
916

917
	pr_debug("acpi_cpufreq_resume\n");
L
Linus Torvalds 已提交
918 919 920

	data->resume = 1;

921
	return 0;
L
Linus Torvalds 已提交
922 923
}

924
static struct freq_attr *acpi_cpufreq_attr[] = {
L
Linus Torvalds 已提交
925
	&cpufreq_freq_attr_scaling_available_freqs,
926
	&freqdomain_cpus,
927
	NULL,	/* this is a placeholder for cpb, do not remove */
L
Linus Torvalds 已提交
928 929 930 931
	NULL,
};

static struct cpufreq_driver acpi_cpufreq_driver = {
932 933 934 935 936 937 938 939
	.verify		= acpi_cpufreq_verify,
	.target		= acpi_cpufreq_target,
	.bios_limit	= acpi_processor_get_bios_limit,
	.init		= acpi_cpufreq_cpu_init,
	.exit		= acpi_cpufreq_cpu_exit,
	.resume		= acpi_cpufreq_resume,
	.name		= "acpi-cpufreq",
	.attr		= acpi_cpufreq_attr,
L
Linus Torvalds 已提交
940 941
};

942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966
static void __init acpi_cpufreq_boost_init(void)
{
	if (boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA)) {
		msrs = msrs_alloc();

		if (!msrs)
			return;

		boost_supported = true;
		boost_enabled = boost_state(0);

		get_online_cpus();

		/* Force all MSRs to the same value */
		boost_set_msrs(boost_enabled, cpu_online_mask);

		register_cpu_notifier(&boost_nb);

		put_online_cpus();
	} else
		global_boost.attr.mode = 0444;

	/* We create the boost file in any case, though for systems without
	 * hardware support it will be read-only and hardwired to return 0.
	 */
967
	if (cpufreq_sysfs_create_file(&(global_boost.attr)))
968 969 970 971 972 973 974
		pr_warn(PFX "could not register global boost sysfs file\n");
	else
		pr_debug("registered global boost sysfs file\n");
}

static void __exit acpi_cpufreq_boost_exit(void)
{
975
	cpufreq_sysfs_remove_file(&(global_boost.attr));
976 977 978 979 980 981 982 983 984

	if (msrs) {
		unregister_cpu_notifier(&boost_nb);

		msrs_free(msrs);
		msrs = NULL;
	}
}

985
static int __init acpi_cpufreq_init(void)
L
Linus Torvalds 已提交
986
{
987 988
	int ret;

989 990 991 992
	/* don't keep reloading if cpufreq_driver exists */
	if (cpufreq_get_current_driver())
		return 0;

993 994 995
	if (acpi_disabled)
		return 0;

996
	pr_debug("acpi_cpufreq_init\n");
L
Linus Torvalds 已提交
997

998 999 1000
	ret = acpi_cpufreq_early_init();
	if (ret)
		return ret;
1001

1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022
#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
	/* this is a sysfs file with a strange name and an even stranger
	 * semantic - per CPU instantiation, but system global effect.
	 * Lets enable it only on AMD CPUs for compatibility reasons and
	 * only if configured. This is considered legacy code, which
	 * will probably be removed at some point in the future.
	 */
	if (check_amd_hwpstate_cpu(0)) {
		struct freq_attr **iter;

		pr_debug("adding sysfs entry for cpb\n");

		for (iter = acpi_cpufreq_attr; *iter != NULL; iter++)
			;

		/* make sure there is a terminator behind it */
		if (iter[1] == NULL)
			*iter = &cpb;
	}
#endif

1023 1024
	ret = cpufreq_register_driver(&acpi_cpufreq_driver);
	if (ret)
1025
		free_acpi_perf_data();
1026 1027
	else
		acpi_cpufreq_boost_init();
1028 1029

	return ret;
L
Linus Torvalds 已提交
1030 1031
}

1032
static void __exit acpi_cpufreq_exit(void)
L
Linus Torvalds 已提交
1033
{
1034
	pr_debug("acpi_cpufreq_exit\n");
L
Linus Torvalds 已提交
1035

1036 1037
	acpi_cpufreq_boost_exit();

L
Linus Torvalds 已提交
1038 1039
	cpufreq_unregister_driver(&acpi_cpufreq_driver);

1040
	free_acpi_perf_data();
L
Linus Torvalds 已提交
1041 1042
}

1043
module_param(acpi_pstate_strict, uint, 0644);
1044
MODULE_PARM_DESC(acpi_pstate_strict,
1045 1046
	"value 0 or non-zero. non-zero -> strict ACPI checks are "
	"performed during frequency changes.");
L
Linus Torvalds 已提交
1047 1048 1049 1050

late_initcall(acpi_cpufreq_init);
module_exit(acpi_cpufreq_exit);

1051 1052 1053 1054 1055 1056 1057
static const struct x86_cpu_id acpi_cpufreq_ids[] = {
	X86_FEATURE_MATCH(X86_FEATURE_ACPI),
	X86_FEATURE_MATCH(X86_FEATURE_HW_PSTATE),
	{}
};
MODULE_DEVICE_TABLE(x86cpu, acpi_cpufreq_ids);

1058 1059 1060 1061 1062 1063 1064
static const struct acpi_device_id processor_device_ids[] = {
	{ACPI_PROCESSOR_OBJECT_HID, },
	{ACPI_PROCESSOR_DEVICE_HID, },
	{},
};
MODULE_DEVICE_TABLE(acpi, processor_device_ids);

L
Linus Torvalds 已提交
1065
MODULE_ALIAS("acpi");