acpi-cpufreq.c 25.5 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 * acpi-cpufreq.c - ACPI Processor P-States Driver
L
Linus Torvalds 已提交
3 4 5 6
 *
 *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
 *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
 *  Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de>
7
 *  Copyright (C) 2006       Denis Sadykov <denis.m.sadykov@intel.com>
L
Linus Torvalds 已提交
8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
 *
 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 *
 *  This program is free software; you can redistribute it and/or modify
 *  it under the terms of the GNU General Public License as published by
 *  the Free Software Foundation; either version 2 of the License, or (at
 *  your option) any later version.
 *
 *  This program is distributed in the hope that it will be useful, but
 *  WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 *  General Public License for more details.
 *
 *  You should have received a copy of the GNU General Public License along
 *  with this program; if not, write to the Free Software Foundation, Inc.,
 *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
 *
 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 */

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
31 32
#include <linux/smp.h>
#include <linux/sched.h>
L
Linus Torvalds 已提交
33
#include <linux/cpufreq.h>
34
#include <linux/compiler.h>
35
#include <linux/dmi.h>
36
#include <linux/slab.h>
L
Linus Torvalds 已提交
37 38

#include <linux/acpi.h>
39 40 41 42
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/uaccess.h>

L
Linus Torvalds 已提交
43 44
#include <acpi/processor.h>

45
#include <asm/msr.h>
46 47 48
#include <asm/processor.h>
#include <asm/cpufeature.h>

L
Linus Torvalds 已提交
49 50 51 52
MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
MODULE_DESCRIPTION("ACPI Processor P-States Driver");
MODULE_LICENSE("GPL");

53 54
#define PFX "acpi-cpufreq: "

55 56 57
enum {
	UNDEFINED_CAPABLE = 0,
	SYSTEM_INTEL_MSR_CAPABLE,
58
	SYSTEM_AMD_MSR_CAPABLE,
59 60 61 62
	SYSTEM_IO_CAPABLE,
};

#define INTEL_MSR_RANGE		(0xffff)
63
#define AMD_MSR_RANGE		(0x7)
64

65 66
#define MSR_K7_HWCR_CPB_DIS	(1ULL << 25)

67
struct acpi_cpufreq_data {
68 69 70 71
	struct acpi_processor_performance *acpi_data;
	struct cpufreq_frequency_table *freq_table;
	unsigned int resume;
	unsigned int cpu_feature;
72
	cpumask_var_t freqdomain_cpus;
L
Linus Torvalds 已提交
73 74
};

75
static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data);
76

77
/* acpi_perf_data is a pointer to percpu data. */
78
static struct acpi_processor_performance __percpu *acpi_perf_data;
L
Linus Torvalds 已提交
79 80 81

static struct cpufreq_driver acpi_cpufreq_driver;

82
static unsigned int acpi_pstate_strict;
83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
static bool boost_enabled, boost_supported;
static struct msr __percpu *msrs;

static bool boost_state(unsigned int cpu)
{
	u32 lo, hi;
	u64 msr;

	switch (boot_cpu_data.x86_vendor) {
	case X86_VENDOR_INTEL:
		rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi);
		msr = lo | ((u64)hi << 32);
		return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
	case X86_VENDOR_AMD:
		rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
		msr = lo | ((u64)hi << 32);
		return !(msr & MSR_K7_HWCR_CPB_DIS);
	}
	return false;
}

static void boost_set_msrs(bool enable, const struct cpumask *cpumask)
{
	u32 cpu;
	u32 msr_addr;
	u64 msr_mask;

	switch (boot_cpu_data.x86_vendor) {
	case X86_VENDOR_INTEL:
		msr_addr = MSR_IA32_MISC_ENABLE;
		msr_mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE;
		break;
	case X86_VENDOR_AMD:
		msr_addr = MSR_K7_HWCR;
		msr_mask = MSR_K7_HWCR_CPB_DIS;
		break;
	default:
		return;
	}

	rdmsr_on_cpus(cpumask, msr_addr, msrs);

	for_each_cpu(cpu, cpumask) {
		struct msr *reg = per_cpu_ptr(msrs, cpu);
		if (enable)
			reg->q &= ~msr_mask;
		else
			reg->q |= msr_mask;
	}

	wrmsr_on_cpus(cpumask, msr_addr, msrs);
}

136
static ssize_t _store_boost(const char *buf, size_t count)
137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162
{
	int ret;
	unsigned long val = 0;

	if (!boost_supported)
		return -EINVAL;

	ret = kstrtoul(buf, 10, &val);
	if (ret || (val > 1))
		return -EINVAL;

	if ((val && boost_enabled) || (!val && !boost_enabled))
		return count;

	get_online_cpus();

	boost_set_msrs(val, cpu_online_mask);

	put_online_cpus();

	boost_enabled = val;
	pr_debug("Core Boosting %sabled.\n", val ? "en" : "dis");

	return count;
}

163 164 165 166 167 168
static ssize_t store_global_boost(struct kobject *kobj, struct attribute *attr,
				  const char *buf, size_t count)
{
	return _store_boost(buf, count);
}

169 170 171 172 173 174 175 176 177
static ssize_t show_global_boost(struct kobject *kobj,
				 struct attribute *attr, char *buf)
{
	return sprintf(buf, "%u\n", boost_enabled);
}

static struct global_attr global_boost = __ATTR(boost, 0644,
						show_global_boost,
						store_global_boost);
178

179 180 181 182 183 184 185 186 187
static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf)
{
	struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);

	return cpufreq_show_cpus(data->freqdomain_cpus, buf);
}

cpufreq_freq_attr_ro(freqdomain_cpus);

188 189 190 191 192 193 194 195 196 197 198 199
#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf,
			 size_t count)
{
	return _store_boost(buf, count);
}

static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf)
{
	return sprintf(buf, "%u\n", boost_enabled);
}

200
cpufreq_freq_attr_rw(cpb);
201 202
#endif

203 204
static int check_est_cpu(unsigned int cpuid)
{
205
	struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
206

207
	return cpu_has(cpu, X86_FEATURE_EST);
208 209
}

210 211 212 213 214 215 216
static int check_amd_hwpstate_cpu(unsigned int cpuid)
{
	struct cpuinfo_x86 *cpu = &cpu_data(cpuid);

	return cpu_has(cpu, X86_FEATURE_HW_PSTATE);
}

217
static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data)
218
{
219 220
	struct acpi_processor_performance *perf;
	int i;
221 222 223

	perf = data->acpi_data;

224
	for (i = 0; i < perf->state_count; i++) {
225 226 227 228 229 230
		if (value == perf->states[i].status)
			return data->freq_table[i].frequency;
	}
	return 0;
}

231 232 233
static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
{
	int i;
234
	struct acpi_processor_performance *perf;
235

236 237 238 239 240
	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
		msr &= AMD_MSR_RANGE;
	else
		msr &= INTEL_MSR_RANGE;

241 242
	perf = data->acpi_data;

243
	for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
244
		if (msr == perf->states[data->freq_table[i].driver_data].status)
245 246 247 248 249 250 251 252
			return data->freq_table[i].frequency;
	}
	return data->freq_table[0].frequency;
}

static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data)
{
	switch (data->cpu_feature) {
253
	case SYSTEM_INTEL_MSR_CAPABLE:
254
	case SYSTEM_AMD_MSR_CAPABLE:
255
		return extract_msr(val, data);
256
	case SYSTEM_IO_CAPABLE:
257
		return extract_io(val, data);
258
	default:
259 260 261 262 263 264 265 266
		return 0;
	}
}

struct msr_addr {
	u32 reg;
};

267 268 269 270 271 272
struct io_addr {
	u16 port;
	u8 bit_width;
};

struct drv_cmd {
273
	unsigned int type;
274
	const struct cpumask *mask;
275 276 277 278
	union {
		struct msr_addr msr;
		struct io_addr io;
	} addr;
279 280 281
	u32 val;
};

282 283
/* Called via smp_call_function_single(), on the target CPU */
static void do_drv_read(void *_cmd)
L
Linus Torvalds 已提交
284
{
285
	struct drv_cmd *cmd = _cmd;
286 287 288
	u32 h;

	switch (cmd->type) {
289
	case SYSTEM_INTEL_MSR_CAPABLE:
290
	case SYSTEM_AMD_MSR_CAPABLE:
291 292
		rdmsr(cmd->addr.msr.reg, cmd->val, h);
		break;
293
	case SYSTEM_IO_CAPABLE:
294 295 296
		acpi_os_read_port((acpi_io_address)cmd->addr.io.port,
				&cmd->val,
				(u32)cmd->addr.io.bit_width);
297
		break;
298
	default:
299 300
		break;
	}
301
}
L
Linus Torvalds 已提交
302

303 304
/* Called via smp_call_function_many(), on the target CPUs */
static void do_drv_write(void *_cmd)
305
{
306
	struct drv_cmd *cmd = _cmd;
307
	u32 lo, hi;
308 309

	switch (cmd->type) {
310
	case SYSTEM_INTEL_MSR_CAPABLE:
311 312 313
		rdmsr(cmd->addr.msr.reg, lo, hi);
		lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE);
		wrmsr(cmd->addr.msr.reg, lo, hi);
314
		break;
315 316 317
	case SYSTEM_AMD_MSR_CAPABLE:
		wrmsr(cmd->addr.msr.reg, cmd->val, 0);
		break;
318
	case SYSTEM_IO_CAPABLE:
319 320 321
		acpi_os_write_port((acpi_io_address)cmd->addr.io.port,
				cmd->val,
				(u32)cmd->addr.io.bit_width);
322
		break;
323
	default:
324 325
		break;
	}
326
}
L
Linus Torvalds 已提交
327

328
static void drv_read(struct drv_cmd *cmd)
329
{
330
	int err;
331 332
	cmd->val = 0;

333 334
	err = smp_call_function_any(cmd->mask, do_drv_read, cmd, 1);
	WARN_ON_ONCE(err);	/* smp_call_function_any() was buggy? */
335 336 337 338
}

static void drv_write(struct drv_cmd *cmd)
{
339 340 341 342 343
	int this_cpu;

	this_cpu = get_cpu();
	if (cpumask_test_cpu(this_cpu, cmd->mask))
		do_drv_write(cmd);
344
	smp_call_function_many(cmd->mask, do_drv_write, cmd, 1);
345
	put_cpu();
346
}
L
Linus Torvalds 已提交
347

348
static u32 get_cur_val(const struct cpumask *mask)
349
{
350 351
	struct acpi_processor_performance *perf;
	struct drv_cmd cmd;
L
Linus Torvalds 已提交
352

353
	if (unlikely(cpumask_empty(mask)))
354
		return 0;
L
Linus Torvalds 已提交
355

356
	switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) {
357 358
	case SYSTEM_INTEL_MSR_CAPABLE:
		cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
359
		cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
360
		break;
361 362
	case SYSTEM_AMD_MSR_CAPABLE:
		cmd.type = SYSTEM_AMD_MSR_CAPABLE;
363
		cmd.addr.msr.reg = MSR_AMD_PERF_CTL;
364
		break;
365 366
	case SYSTEM_IO_CAPABLE:
		cmd.type = SYSTEM_IO_CAPABLE;
367
		perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data;
368 369 370 371 372 373 374
		cmd.addr.io.port = perf->control_register.address;
		cmd.addr.io.bit_width = perf->control_register.bit_width;
		break;
	default:
		return 0;
	}

375
	cmd.mask = mask;
376
	drv_read(&cmd);
L
Linus Torvalds 已提交
377

378
	pr_debug("get_cur_val = %u\n", cmd.val);
379 380 381

	return cmd.val;
}
L
Linus Torvalds 已提交
382

383 384
static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
{
385
	struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu);
386
	unsigned int freq;
387
	unsigned int cached_freq;
388

389
	pr_debug("get_cur_freq_on_cpu (%d)\n", cpu);
390 391

	if (unlikely(data == NULL ||
392
		     data->acpi_data == NULL || data->freq_table == NULL)) {
393
		return 0;
L
Linus Torvalds 已提交
394 395
	}

396
	cached_freq = data->freq_table[data->acpi_data->state].frequency;
397
	freq = extract_freq(get_cur_val(cpumask_of(cpu)), data);
398 399 400 401 402 403 404 405
	if (freq != cached_freq) {
		/*
		 * The dreaded BIOS frequency change behind our back.
		 * Force set the frequency on next target call.
		 */
		data->resume = 1;
	}

406
	pr_debug("cur freq = %u\n", freq);
L
Linus Torvalds 已提交
407

408
	return freq;
L
Linus Torvalds 已提交
409 410
}

411
static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq,
412
				struct acpi_cpufreq_data *data)
413
{
414 415
	unsigned int cur_freq;
	unsigned int i;
L
Linus Torvalds 已提交
416

417
	for (i = 0; i < 100; i++) {
418 419 420 421 422 423 424 425 426
		cur_freq = extract_freq(get_cur_val(mask), data);
		if (cur_freq == freq)
			return 1;
		udelay(10);
	}
	return 0;
}

static int acpi_cpufreq_target(struct cpufreq_policy *policy,
427
			       unsigned int target_freq, unsigned int relation)
L
Linus Torvalds 已提交
428
{
429
	struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
430 431 432
	struct acpi_processor_performance *perf;
	struct cpufreq_freqs freqs;
	struct drv_cmd cmd;
433 434
	unsigned int next_state = 0; /* Index into freq_table */
	unsigned int next_perf_state = 0; /* Index into perf table */
435
	int result = 0;
436

437
	pr_debug("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu);
438 439

	if (unlikely(data == NULL ||
440
	     data->acpi_data == NULL || data->freq_table == NULL)) {
441 442
		return -ENODEV;
	}
L
Linus Torvalds 已提交
443

444
	perf = data->acpi_data;
L
Linus Torvalds 已提交
445
	result = cpufreq_frequency_table_target(policy,
446 447 448
						data->freq_table,
						target_freq,
						relation, &next_state);
449 450 451 452
	if (unlikely(result)) {
		result = -ENODEV;
		goto out;
	}
L
Linus Torvalds 已提交
453

454
	next_perf_state = data->freq_table[next_state].driver_data;
455
	if (perf->state == next_perf_state) {
456
		if (unlikely(data->resume)) {
457
			pr_debug("Called after resume, resetting to P%d\n",
458
				next_perf_state);
459 460
			data->resume = 0;
		} else {
461
			pr_debug("Already at target state (P%d)\n",
462
				next_perf_state);
463
			goto out;
464
		}
465 466
	}

467 468 469 470
	switch (data->cpu_feature) {
	case SYSTEM_INTEL_MSR_CAPABLE:
		cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
		cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
471
		cmd.val = (u32) perf->states[next_perf_state].control;
472
		break;
473 474 475 476 477
	case SYSTEM_AMD_MSR_CAPABLE:
		cmd.type = SYSTEM_AMD_MSR_CAPABLE;
		cmd.addr.msr.reg = MSR_AMD_PERF_CTL;
		cmd.val = (u32) perf->states[next_perf_state].control;
		break;
478 479 480 481 482 483 484
	case SYSTEM_IO_CAPABLE:
		cmd.type = SYSTEM_IO_CAPABLE;
		cmd.addr.io.port = perf->control_register.address;
		cmd.addr.io.bit_width = perf->control_register.bit_width;
		cmd.val = (u32) perf->states[next_perf_state].control;
		break;
	default:
485 486
		result = -ENODEV;
		goto out;
487
	}
488

489
	/* cpufreq holds the hotplug lock, so we are safe from here on */
490
	if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY)
491
		cmd.mask = policy->cpus;
492
	else
493
		cmd.mask = cpumask_of(policy->cpu);
494

495 496
	freqs.old = perf->states[perf->state].core_frequency * 1000;
	freqs.new = data->freq_table[next_state].frequency;
497
	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
L
Linus Torvalds 已提交
498

499
	drv_write(&cmd);
500

501
	if (acpi_pstate_strict) {
502
		if (!check_freqs(cmd.mask, freqs.new, data)) {
503
			pr_debug("acpi_cpufreq_target failed (%d)\n",
504
				policy->cpu);
505
			result = -EAGAIN;
506
			freqs.new = freqs.old;
507 508 509
		}
	}

510
	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
511 512 513

	if (!result)
		perf->state = next_perf_state;
514

515
out:
516
	return result;
L
Linus Torvalds 已提交
517 518 519
}

static unsigned long
520
acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
L
Linus Torvalds 已提交
521
{
522
	struct acpi_processor_performance *perf = data->acpi_data;
523

L
Linus Torvalds 已提交
524 525 526 527
	if (cpu_khz) {
		/* search the closest match to cpu_khz */
		unsigned int i;
		unsigned long freq;
528
		unsigned long freqn = perf->states[0].core_frequency * 1000;
L
Linus Torvalds 已提交
529

530
		for (i = 0; i < (perf->state_count-1); i++) {
L
Linus Torvalds 已提交
531
			freq = freqn;
532
			freqn = perf->states[i+1].core_frequency * 1000;
L
Linus Torvalds 已提交
533
			if ((2 * cpu_khz) > (freqn + freq)) {
534
				perf->state = i;
535
				return freq;
L
Linus Torvalds 已提交
536 537
			}
		}
538
		perf->state = perf->state_count-1;
539
		return freqn;
540
	} else {
L
Linus Torvalds 已提交
541
		/* assume CPU is at P0... */
542 543 544
		perf->state = 0;
		return perf->states[0].core_frequency * 1000;
	}
L
Linus Torvalds 已提交
545 546
}

547 548 549 550 551 552 553 554 555 556 557
static void free_acpi_perf_data(void)
{
	unsigned int i;

	/* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */
	for_each_possible_cpu(i)
		free_cpumask_var(per_cpu_ptr(acpi_perf_data, i)
				 ->shared_cpu_map);
	free_percpu(acpi_perf_data);
}

558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595
static int boost_notify(struct notifier_block *nb, unsigned long action,
		      void *hcpu)
{
	unsigned cpu = (long)hcpu;
	const struct cpumask *cpumask;

	cpumask = get_cpu_mask(cpu);

	/*
	 * Clear the boost-disable bit on the CPU_DOWN path so that
	 * this cpu cannot block the remaining ones from boosting. On
	 * the CPU_UP path we simply keep the boost-disable flag in
	 * sync with the current global state.
	 */

	switch (action) {
	case CPU_UP_PREPARE:
	case CPU_UP_PREPARE_FROZEN:
		boost_set_msrs(boost_enabled, cpumask);
		break;

	case CPU_DOWN_PREPARE:
	case CPU_DOWN_PREPARE_FROZEN:
		boost_set_msrs(1, cpumask);
		break;

	default:
		break;
	}

	return NOTIFY_OK;
}


static struct notifier_block boost_nb = {
	.notifier_call          = boost_notify,
};

596 597 598 599 600 601 602 603
/*
 * acpi_cpufreq_early_init - initialize ACPI P-States library
 *
 * Initialize the ACPI P-States library (drivers/acpi/processor_perflib.c)
 * in order to determine correct frequency and voltage pairings. We can
 * do _PDC and _PSD and find out the processor dependency for the
 * actual init that will happen later...
 */
604
static int __init acpi_cpufreq_early_init(void)
605
{
606
	unsigned int i;
607
	pr_debug("acpi_cpufreq_early_init\n");
608

609 610
	acpi_perf_data = alloc_percpu(struct acpi_processor_performance);
	if (!acpi_perf_data) {
611
		pr_debug("Memory allocation error for acpi_perf_data.\n");
612
		return -ENOMEM;
613
	}
614
	for_each_possible_cpu(i) {
615
		if (!zalloc_cpumask_var_node(
616 617
			&per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map,
			GFP_KERNEL, cpu_to_node(i))) {
618 619 620 621 622 623

			/* Freeing a NULL pointer is OK: alloc_percpu zeroes. */
			free_acpi_perf_data();
			return -ENOMEM;
		}
	}
624 625

	/* Do initialization in ACPI core */
626 627
	acpi_processor_preregister_performance(acpi_perf_data);
	return 0;
628 629
}

630
#ifdef CONFIG_SMP
631 632 633 634 635 636 637 638
/*
 * Some BIOSes do SW_ANY coordination internally, either set it up in hw
 * or do it in BIOS firmware and won't inform about it to OS. If not
 * detected, this has a side effect of making CPU run at a different speed
 * than OS intended it to run at. Detect it and handle it cleanly.
 */
static int bios_with_sw_any_bug;

639
static int sw_any_bug_found(const struct dmi_system_id *d)
640 641 642 643 644
{
	bios_with_sw_any_bug = 1;
	return 0;
}

645
static const struct dmi_system_id sw_any_bug_dmi_table[] = {
646 647 648 649 650 651 652 653 654 655 656
	{
		.callback = sw_any_bug_found,
		.ident = "Supermicro Server X6DLP",
		.matches = {
			DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
			DMI_MATCH(DMI_BIOS_VERSION, "080010"),
			DMI_MATCH(DMI_PRODUCT_NAME, "X6DLP"),
		},
	},
	{ }
};
657 658 659

static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
{
660 661
	/* Intel Xeon Processor 7100 Series Specification Update
	 * http://www.intel.com/Assets/PDF/specupdate/314554.pdf
662 663
	 * AL30: A Machine Check Exception (MCE) Occurring during an
	 * Enhanced Intel SpeedStep Technology Ratio Change May Cause
664
	 * Both Processor Cores to Lock Up. */
665 666 667
	if (c->x86_vendor == X86_VENDOR_INTEL) {
		if ((c->x86 == 15) &&
		    (c->x86_model == 6) &&
668 669 670 671 672
		    (c->x86_mask == 8)) {
			printk(KERN_INFO "acpi-cpufreq: Intel(R) "
			    "Xeon(R) 7100 Errata AL30, processors may "
			    "lock up on frequency changes: disabling "
			    "acpi-cpufreq.\n");
673
			return -ENODEV;
674
		    }
675 676 677
		}
	return 0;
}
678
#endif
679

680
static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
L
Linus Torvalds 已提交
681
{
682 683 684 685 686
	unsigned int i;
	unsigned int valid_states = 0;
	unsigned int cpu = policy->cpu;
	struct acpi_cpufreq_data *data;
	unsigned int result = 0;
687
	struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
688
	struct acpi_processor_performance *perf;
689 690 691
#ifdef CONFIG_SMP
	static int blacklisted;
#endif
L
Linus Torvalds 已提交
692

693
	pr_debug("acpi_cpufreq_cpu_init\n");
L
Linus Torvalds 已提交
694

695
#ifdef CONFIG_SMP
696 697 698 699 700
	if (blacklisted)
		return blacklisted;
	blacklisted = acpi_cpufreq_blacklist(c);
	if (blacklisted)
		return blacklisted;
701 702
#endif

703
	data = kzalloc(sizeof(*data), GFP_KERNEL);
L
Linus Torvalds 已提交
704
	if (!data)
705
		return -ENOMEM;
L
Linus Torvalds 已提交
706

707 708 709 710 711
	if (!zalloc_cpumask_var(&data->freqdomain_cpus, GFP_KERNEL)) {
		result = -ENOMEM;
		goto err_free;
	}

712
	data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
713
	per_cpu(acfreq_data, cpu) = data;
L
Linus Torvalds 已提交
714

715
	if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
716
		acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
L
Linus Torvalds 已提交
717

718
	result = acpi_processor_register_performance(data->acpi_data, cpu);
L
Linus Torvalds 已提交
719
	if (result)
720
		goto err_free_mask;
L
Linus Torvalds 已提交
721

722 723
	perf = data->acpi_data;
	policy->shared_type = perf->shared_type;
724

725
	/*
726
	 * Will let policy->cpus know about dependency only when software
727 728 729
	 * coordination is required.
	 */
	if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
730
	    policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
731
		cpumask_copy(policy->cpus, perf->shared_cpu_map);
732
	}
733
	cpumask_copy(data->freqdomain_cpus, perf->shared_cpu_map);
734 735 736

#ifdef CONFIG_SMP
	dmi_check_system(sw_any_bug_dmi_table);
737
	if (bios_with_sw_any_bug && !policy_is_shared(policy)) {
738
		policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
739
		cpumask_copy(policy->cpus, cpu_core_mask(cpu));
740
	}
741 742 743 744

	if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) {
		cpumask_clear(policy->cpus);
		cpumask_set_cpu(cpu, policy->cpus);
745
		cpumask_copy(data->freqdomain_cpus, cpu_sibling_mask(cpu));
746 747 748
		policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
		pr_info_once(PFX "overriding BIOS provided _PSD data\n");
	}
749
#endif
750

L
Linus Torvalds 已提交
751
	/* capability check */
752
	if (perf->state_count <= 1) {
753
		pr_debug("No P-States\n");
L
Linus Torvalds 已提交
754 755 756
		result = -ENODEV;
		goto err_unreg;
	}
757

758 759 760 761 762 763
	if (perf->control_register.space_id != perf->status_register.space_id) {
		result = -ENODEV;
		goto err_unreg;
	}

	switch (perf->control_register.space_id) {
764
	case ACPI_ADR_SPACE_SYSTEM_IO:
765 766 767 768 769 770
		if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
		    boot_cpu_data.x86 == 0xf) {
			pr_debug("AMD K8 systems must use native drivers.\n");
			result = -ENODEV;
			goto err_unreg;
		}
771
		pr_debug("SYSTEM IO addr space\n");
772 773
		data->cpu_feature = SYSTEM_IO_CAPABLE;
		break;
774
	case ACPI_ADR_SPACE_FIXED_HARDWARE:
775
		pr_debug("HARDWARE addr space\n");
776 777 778
		if (check_est_cpu(cpu)) {
			data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
			break;
779
		}
780 781 782 783 784 785
		if (check_amd_hwpstate_cpu(cpu)) {
			data->cpu_feature = SYSTEM_AMD_MSR_CAPABLE;
			break;
		}
		result = -ENODEV;
		goto err_unreg;
786
	default:
787
		pr_debug("Unknown addr space %d\n",
788
			(u32) (perf->control_register.space_id));
L
Linus Torvalds 已提交
789 790 791 792
		result = -ENODEV;
		goto err_unreg;
	}

793
	data->freq_table = kmalloc(sizeof(*data->freq_table) *
794
		    (perf->state_count+1), GFP_KERNEL);
L
Linus Torvalds 已提交
795 796 797 798 799 800 801
	if (!data->freq_table) {
		result = -ENOMEM;
		goto err_unreg;
	}

	/* detect transition latency */
	policy->cpuinfo.transition_latency = 0;
802
	for (i = 0; i < perf->state_count; i++) {
803 804 805 806
		if ((perf->states[i].transition_latency * 1000) >
		    policy->cpuinfo.transition_latency)
			policy->cpuinfo.transition_latency =
			    perf->states[i].transition_latency * 1000;
L
Linus Torvalds 已提交
807 808
	}

809 810 811 812
	/* Check for high latency (>20uS) from buggy BIOSes, like on T42 */
	if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE &&
	    policy->cpuinfo.transition_latency > 20 * 1000) {
		policy->cpuinfo.transition_latency = 20 * 1000;
813 814
		printk_once(KERN_INFO
			    "P-state transition latency capped at 20 uS\n");
815 816
	}

L
Linus Torvalds 已提交
817
	/* table init */
818 819
	for (i = 0; i < perf->state_count; i++) {
		if (i > 0 && perf->states[i].core_frequency >=
820
		    data->freq_table[valid_states-1].frequency / 1000)
821 822
			continue;

823
		data->freq_table[valid_states].driver_data = i;
824
		data->freq_table[valid_states].frequency =
825
		    perf->states[i].core_frequency * 1000;
826
		valid_states++;
L
Linus Torvalds 已提交
827
	}
828
	data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
829
	perf->state = 0;
L
Linus Torvalds 已提交
830

831
	result = cpufreq_table_validate_and_show(policy, data->freq_table);
832
	if (result)
L
Linus Torvalds 已提交
833 834
		goto err_freqfree;

835 836 837
	if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq)
		printk(KERN_WARNING FW_WARN "P-state 0 is not max freq\n");

838
	switch (perf->control_register.space_id) {
839
	case ACPI_ADR_SPACE_SYSTEM_IO:
840 841 842
		/* Current speed is unknown and not detectable by IO port */
		policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
		break;
843
	case ACPI_ADR_SPACE_FIXED_HARDWARE:
844
		acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
845
		break;
846
	default:
847 848 849
		break;
	}

L
Linus Torvalds 已提交
850 851 852
	/* notify BIOS that we exist */
	acpi_processor_notify_smm(THIS_MODULE);

853
	pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
854
	for (i = 0; i < perf->state_count; i++)
855
		pr_debug("     %cP%d: %d MHz, %d mW, %d uS\n",
856
			(i == perf->state ? '*' : ' '), i,
857 858 859
			(u32) perf->states[i].core_frequency,
			(u32) perf->states[i].power,
			(u32) perf->states[i].transition_latency);
L
Linus Torvalds 已提交
860

861 862 863 864 865
	/*
	 * the first call to ->target() should result in us actually
	 * writing something to the appropriate registers.
	 */
	data->resume = 1;
866

867
	return result;
L
Linus Torvalds 已提交
868

869
err_freqfree:
L
Linus Torvalds 已提交
870
	kfree(data->freq_table);
871
err_unreg:
872
	acpi_processor_unregister_performance(perf, cpu);
873 874
err_free_mask:
	free_cpumask_var(data->freqdomain_cpus);
875
err_free:
L
Linus Torvalds 已提交
876
	kfree(data);
877
	per_cpu(acfreq_data, cpu) = NULL;
L
Linus Torvalds 已提交
878

879
	return result;
L
Linus Torvalds 已提交
880 881
}

882
static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
L
Linus Torvalds 已提交
883
{
884
	struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
L
Linus Torvalds 已提交
885

886
	pr_debug("acpi_cpufreq_cpu_exit\n");
L
Linus Torvalds 已提交
887 888 889

	if (data) {
		cpufreq_frequency_table_put_attr(policy->cpu);
890
		per_cpu(acfreq_data, policy->cpu) = NULL;
891 892
		acpi_processor_unregister_performance(data->acpi_data,
						      policy->cpu);
893
		free_cpumask_var(data->freqdomain_cpus);
894
		kfree(data->freq_table);
L
Linus Torvalds 已提交
895 896 897
		kfree(data);
	}

898
	return 0;
L
Linus Torvalds 已提交
899 900
}

901
static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
L
Linus Torvalds 已提交
902
{
903
	struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
L
Linus Torvalds 已提交
904

905
	pr_debug("acpi_cpufreq_resume\n");
L
Linus Torvalds 已提交
906 907 908

	data->resume = 1;

909
	return 0;
L
Linus Torvalds 已提交
910 911
}

912
static struct freq_attr *acpi_cpufreq_attr[] = {
L
Linus Torvalds 已提交
913
	&cpufreq_freq_attr_scaling_available_freqs,
914
	&freqdomain_cpus,
915
	NULL,	/* this is a placeholder for cpb, do not remove */
L
Linus Torvalds 已提交
916 917 918 919
	NULL,
};

static struct cpufreq_driver acpi_cpufreq_driver = {
920
	.verify		= cpufreq_generic_frequency_table_verify,
921 922 923 924 925 926 927
	.target		= acpi_cpufreq_target,
	.bios_limit	= acpi_processor_get_bios_limit,
	.init		= acpi_cpufreq_cpu_init,
	.exit		= acpi_cpufreq_cpu_exit,
	.resume		= acpi_cpufreq_resume,
	.name		= "acpi-cpufreq",
	.attr		= acpi_cpufreq_attr,
L
Linus Torvalds 已提交
928 929
};

930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954
static void __init acpi_cpufreq_boost_init(void)
{
	if (boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA)) {
		msrs = msrs_alloc();

		if (!msrs)
			return;

		boost_supported = true;
		boost_enabled = boost_state(0);

		get_online_cpus();

		/* Force all MSRs to the same value */
		boost_set_msrs(boost_enabled, cpu_online_mask);

		register_cpu_notifier(&boost_nb);

		put_online_cpus();
	} else
		global_boost.attr.mode = 0444;

	/* We create the boost file in any case, though for systems without
	 * hardware support it will be read-only and hardwired to return 0.
	 */
955
	if (cpufreq_sysfs_create_file(&(global_boost.attr)))
956 957 958 959 960 961 962
		pr_warn(PFX "could not register global boost sysfs file\n");
	else
		pr_debug("registered global boost sysfs file\n");
}

static void __exit acpi_cpufreq_boost_exit(void)
{
963
	cpufreq_sysfs_remove_file(&(global_boost.attr));
964 965 966 967 968 969 970 971 972

	if (msrs) {
		unregister_cpu_notifier(&boost_nb);

		msrs_free(msrs);
		msrs = NULL;
	}
}

973
static int __init acpi_cpufreq_init(void)
L
Linus Torvalds 已提交
974
{
975 976
	int ret;

977 978 979 980
	/* don't keep reloading if cpufreq_driver exists */
	if (cpufreq_get_current_driver())
		return 0;

981 982 983
	if (acpi_disabled)
		return 0;

984
	pr_debug("acpi_cpufreq_init\n");
L
Linus Torvalds 已提交
985

986 987 988
	ret = acpi_cpufreq_early_init();
	if (ret)
		return ret;
989

990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010
#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
	/* this is a sysfs file with a strange name and an even stranger
	 * semantic - per CPU instantiation, but system global effect.
	 * Lets enable it only on AMD CPUs for compatibility reasons and
	 * only if configured. This is considered legacy code, which
	 * will probably be removed at some point in the future.
	 */
	if (check_amd_hwpstate_cpu(0)) {
		struct freq_attr **iter;

		pr_debug("adding sysfs entry for cpb\n");

		for (iter = acpi_cpufreq_attr; *iter != NULL; iter++)
			;

		/* make sure there is a terminator behind it */
		if (iter[1] == NULL)
			*iter = &cpb;
	}
#endif

1011 1012
	ret = cpufreq_register_driver(&acpi_cpufreq_driver);
	if (ret)
1013
		free_acpi_perf_data();
1014 1015
	else
		acpi_cpufreq_boost_init();
1016 1017

	return ret;
L
Linus Torvalds 已提交
1018 1019
}

1020
static void __exit acpi_cpufreq_exit(void)
L
Linus Torvalds 已提交
1021
{
1022
	pr_debug("acpi_cpufreq_exit\n");
L
Linus Torvalds 已提交
1023

1024 1025
	acpi_cpufreq_boost_exit();

L
Linus Torvalds 已提交
1026 1027
	cpufreq_unregister_driver(&acpi_cpufreq_driver);

1028
	free_acpi_perf_data();
L
Linus Torvalds 已提交
1029 1030
}

1031
module_param(acpi_pstate_strict, uint, 0644);
1032
MODULE_PARM_DESC(acpi_pstate_strict,
1033 1034
	"value 0 or non-zero. non-zero -> strict ACPI checks are "
	"performed during frequency changes.");
L
Linus Torvalds 已提交
1035 1036 1037 1038

late_initcall(acpi_cpufreq_init);
module_exit(acpi_cpufreq_exit);

1039 1040 1041 1042 1043 1044 1045
static const struct x86_cpu_id acpi_cpufreq_ids[] = {
	X86_FEATURE_MATCH(X86_FEATURE_ACPI),
	X86_FEATURE_MATCH(X86_FEATURE_HW_PSTATE),
	{}
};
MODULE_DEVICE_TABLE(x86cpu, acpi_cpufreq_ids);

1046 1047 1048 1049 1050 1051 1052
static const struct acpi_device_id processor_device_ids[] = {
	{ACPI_PROCESSOR_OBJECT_HID, },
	{ACPI_PROCESSOR_DEVICE_HID, },
	{},
};
MODULE_DEVICE_TABLE(acpi, processor_device_ids);

L
Linus Torvalds 已提交
1053
MODULE_ALIAS("acpi");