acpi-cpufreq.c 25.2 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 * acpi-cpufreq.c - ACPI Processor P-States Driver
L
Linus Torvalds 已提交
3 4 5 6
 *
 *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
 *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
 *  Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de>
7
 *  Copyright (C) 2006       Denis Sadykov <denis.m.sadykov@intel.com>
L
Linus Torvalds 已提交
8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
 *
 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 *
 *  This program is free software; you can redistribute it and/or modify
 *  it under the terms of the GNU General Public License as published by
 *  the Free Software Foundation; either version 2 of the License, or (at
 *  your option) any later version.
 *
 *  This program is distributed in the hope that it will be useful, but
 *  WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 *  General Public License for more details.
 *
 *  You should have received a copy of the GNU General Public License along
 *  with this program; if not, write to the Free Software Foundation, Inc.,
 *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
 *
 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 */

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
31 32
#include <linux/smp.h>
#include <linux/sched.h>
L
Linus Torvalds 已提交
33
#include <linux/cpufreq.h>
34
#include <linux/compiler.h>
35
#include <linux/dmi.h>
36
#include <linux/slab.h>
L
Linus Torvalds 已提交
37 38

#include <linux/acpi.h>
39 40 41 42
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/uaccess.h>

L
Linus Torvalds 已提交
43 44
#include <acpi/processor.h>

45
#include <asm/msr.h>
46 47
#include <asm/processor.h>
#include <asm/cpufeature.h>
48
#include "mperf.h"
49

L
Linus Torvalds 已提交
50 51 52 53
MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
MODULE_DESCRIPTION("ACPI Processor P-States Driver");
MODULE_LICENSE("GPL");

54 55
#define PFX "acpi-cpufreq: "

56 57 58
enum {
	UNDEFINED_CAPABLE = 0,
	SYSTEM_INTEL_MSR_CAPABLE,
59
	SYSTEM_AMD_MSR_CAPABLE,
60 61 62 63
	SYSTEM_IO_CAPABLE,
};

#define INTEL_MSR_RANGE		(0xffff)
64
#define AMD_MSR_RANGE		(0x7)
65

66 67
#define MSR_K7_HWCR_CPB_DIS	(1ULL << 25)

68
struct acpi_cpufreq_data {
69 70 71 72
	struct acpi_processor_performance *acpi_data;
	struct cpufreq_frequency_table *freq_table;
	unsigned int resume;
	unsigned int cpu_feature;
L
Linus Torvalds 已提交
73 74
};

75
static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data);
76

77
/* acpi_perf_data is a pointer to percpu data. */
78
static struct acpi_processor_performance __percpu *acpi_perf_data;
L
Linus Torvalds 已提交
79 80 81

static struct cpufreq_driver acpi_cpufreq_driver;

82
static unsigned int acpi_pstate_strict;
83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
static bool boost_enabled, boost_supported;
static struct msr __percpu *msrs;

static bool boost_state(unsigned int cpu)
{
	u32 lo, hi;
	u64 msr;

	switch (boot_cpu_data.x86_vendor) {
	case X86_VENDOR_INTEL:
		rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi);
		msr = lo | ((u64)hi << 32);
		return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
	case X86_VENDOR_AMD:
		rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
		msr = lo | ((u64)hi << 32);
		return !(msr & MSR_K7_HWCR_CPB_DIS);
	}
	return false;
}

static void boost_set_msrs(bool enable, const struct cpumask *cpumask)
{
	u32 cpu;
	u32 msr_addr;
	u64 msr_mask;

	switch (boot_cpu_data.x86_vendor) {
	case X86_VENDOR_INTEL:
		msr_addr = MSR_IA32_MISC_ENABLE;
		msr_mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE;
		break;
	case X86_VENDOR_AMD:
		msr_addr = MSR_K7_HWCR;
		msr_mask = MSR_K7_HWCR_CPB_DIS;
		break;
	default:
		return;
	}

	rdmsr_on_cpus(cpumask, msr_addr, msrs);

	for_each_cpu(cpu, cpumask) {
		struct msr *reg = per_cpu_ptr(msrs, cpu);
		if (enable)
			reg->q &= ~msr_mask;
		else
			reg->q |= msr_mask;
	}

	wrmsr_on_cpus(cpumask, msr_addr, msrs);
}

136
static ssize_t _store_boost(const char *buf, size_t count)
137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162
{
	int ret;
	unsigned long val = 0;

	if (!boost_supported)
		return -EINVAL;

	ret = kstrtoul(buf, 10, &val);
	if (ret || (val > 1))
		return -EINVAL;

	if ((val && boost_enabled) || (!val && !boost_enabled))
		return count;

	get_online_cpus();

	boost_set_msrs(val, cpu_online_mask);

	put_online_cpus();

	boost_enabled = val;
	pr_debug("Core Boosting %sabled.\n", val ? "en" : "dis");

	return count;
}

163 164 165 166 167 168
static ssize_t store_global_boost(struct kobject *kobj, struct attribute *attr,
				  const char *buf, size_t count)
{
	return _store_boost(buf, count);
}

169 170 171 172 173 174 175 176 177
static ssize_t show_global_boost(struct kobject *kobj,
				 struct attribute *attr, char *buf)
{
	return sprintf(buf, "%u\n", boost_enabled);
}

static struct global_attr global_boost = __ATTR(boost, 0644,
						show_global_boost,
						store_global_boost);
178

179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf,
			 size_t count)
{
	return _store_boost(buf, count);
}

static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf)
{
	return sprintf(buf, "%u\n", boost_enabled);
}

static struct freq_attr cpb = __ATTR(cpb, 0644, show_cpb, store_cpb);
#endif

194 195
static int check_est_cpu(unsigned int cpuid)
{
196
	struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
197

198
	return cpu_has(cpu, X86_FEATURE_EST);
199 200
}

201 202 203 204 205 206 207
static int check_amd_hwpstate_cpu(unsigned int cpuid)
{
	struct cpuinfo_x86 *cpu = &cpu_data(cpuid);

	return cpu_has(cpu, X86_FEATURE_HW_PSTATE);
}

208
static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data)
209
{
210 211
	struct acpi_processor_performance *perf;
	int i;
212 213 214

	perf = data->acpi_data;

215
	for (i = 0; i < perf->state_count; i++) {
216 217 218 219 220 221
		if (value == perf->states[i].status)
			return data->freq_table[i].frequency;
	}
	return 0;
}

222 223 224
static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
{
	int i;
225
	struct acpi_processor_performance *perf;
226

227 228 229 230 231
	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
		msr &= AMD_MSR_RANGE;
	else
		msr &= INTEL_MSR_RANGE;

232 233
	perf = data->acpi_data;

234
	for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
235
		if (msr == perf->states[data->freq_table[i].index].status)
236 237 238 239 240 241 242 243
			return data->freq_table[i].frequency;
	}
	return data->freq_table[0].frequency;
}

static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data)
{
	switch (data->cpu_feature) {
244
	case SYSTEM_INTEL_MSR_CAPABLE:
245
	case SYSTEM_AMD_MSR_CAPABLE:
246
		return extract_msr(val, data);
247
	case SYSTEM_IO_CAPABLE:
248
		return extract_io(val, data);
249
	default:
250 251 252 253 254 255 256 257
		return 0;
	}
}

struct msr_addr {
	u32 reg;
};

258 259 260 261 262 263
struct io_addr {
	u16 port;
	u8 bit_width;
};

struct drv_cmd {
264
	unsigned int type;
265
	const struct cpumask *mask;
266 267 268 269
	union {
		struct msr_addr msr;
		struct io_addr io;
	} addr;
270 271 272
	u32 val;
};

273 274
/* Called via smp_call_function_single(), on the target CPU */
static void do_drv_read(void *_cmd)
L
Linus Torvalds 已提交
275
{
276
	struct drv_cmd *cmd = _cmd;
277 278 279
	u32 h;

	switch (cmd->type) {
280
	case SYSTEM_INTEL_MSR_CAPABLE:
281
	case SYSTEM_AMD_MSR_CAPABLE:
282 283
		rdmsr(cmd->addr.msr.reg, cmd->val, h);
		break;
284
	case SYSTEM_IO_CAPABLE:
285 286 287
		acpi_os_read_port((acpi_io_address)cmd->addr.io.port,
				&cmd->val,
				(u32)cmd->addr.io.bit_width);
288
		break;
289
	default:
290 291
		break;
	}
292
}
L
Linus Torvalds 已提交
293

294 295
/* Called via smp_call_function_many(), on the target CPUs */
static void do_drv_write(void *_cmd)
296
{
297
	struct drv_cmd *cmd = _cmd;
298
	u32 lo, hi;
299 300

	switch (cmd->type) {
301
	case SYSTEM_INTEL_MSR_CAPABLE:
302 303 304
		rdmsr(cmd->addr.msr.reg, lo, hi);
		lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE);
		wrmsr(cmd->addr.msr.reg, lo, hi);
305
		break;
306 307 308
	case SYSTEM_AMD_MSR_CAPABLE:
		wrmsr(cmd->addr.msr.reg, cmd->val, 0);
		break;
309
	case SYSTEM_IO_CAPABLE:
310 311 312
		acpi_os_write_port((acpi_io_address)cmd->addr.io.port,
				cmd->val,
				(u32)cmd->addr.io.bit_width);
313
		break;
314
	default:
315 316
		break;
	}
317
}
L
Linus Torvalds 已提交
318

319
static void drv_read(struct drv_cmd *cmd)
320
{
321
	int err;
322 323
	cmd->val = 0;

324 325
	err = smp_call_function_any(cmd->mask, do_drv_read, cmd, 1);
	WARN_ON_ONCE(err);	/* smp_call_function_any() was buggy? */
326 327 328 329
}

static void drv_write(struct drv_cmd *cmd)
{
330 331 332 333 334
	int this_cpu;

	this_cpu = get_cpu();
	if (cpumask_test_cpu(this_cpu, cmd->mask))
		do_drv_write(cmd);
335
	smp_call_function_many(cmd->mask, do_drv_write, cmd, 1);
336
	put_cpu();
337
}
L
Linus Torvalds 已提交
338

339
static u32 get_cur_val(const struct cpumask *mask)
340
{
341 342
	struct acpi_processor_performance *perf;
	struct drv_cmd cmd;
L
Linus Torvalds 已提交
343

344
	if (unlikely(cpumask_empty(mask)))
345
		return 0;
L
Linus Torvalds 已提交
346

347
	switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) {
348 349 350 351
	case SYSTEM_INTEL_MSR_CAPABLE:
		cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
		cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
		break;
352 353 354 355
	case SYSTEM_AMD_MSR_CAPABLE:
		cmd.type = SYSTEM_AMD_MSR_CAPABLE;
		cmd.addr.msr.reg = MSR_AMD_PERF_STATUS;
		break;
356 357
	case SYSTEM_IO_CAPABLE:
		cmd.type = SYSTEM_IO_CAPABLE;
358
		perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data;
359 360 361 362 363 364 365
		cmd.addr.io.port = perf->control_register.address;
		cmd.addr.io.bit_width = perf->control_register.bit_width;
		break;
	default:
		return 0;
	}

366
	cmd.mask = mask;
367
	drv_read(&cmd);
L
Linus Torvalds 已提交
368

369
	pr_debug("get_cur_val = %u\n", cmd.val);
370 371 372

	return cmd.val;
}
L
Linus Torvalds 已提交
373

374 375
static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
{
376
	struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu);
377
	unsigned int freq;
378
	unsigned int cached_freq;
379

380
	pr_debug("get_cur_freq_on_cpu (%d)\n", cpu);
381 382

	if (unlikely(data == NULL ||
383
		     data->acpi_data == NULL || data->freq_table == NULL)) {
384
		return 0;
L
Linus Torvalds 已提交
385 386
	}

387
	cached_freq = data->freq_table[data->acpi_data->state].frequency;
388
	freq = extract_freq(get_cur_val(cpumask_of(cpu)), data);
389 390 391 392 393 394 395 396
	if (freq != cached_freq) {
		/*
		 * The dreaded BIOS frequency change behind our back.
		 * Force set the frequency on next target call.
		 */
		data->resume = 1;
	}

397
	pr_debug("cur freq = %u\n", freq);
L
Linus Torvalds 已提交
398

399
	return freq;
L
Linus Torvalds 已提交
400 401
}

402
static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq,
403
				struct acpi_cpufreq_data *data)
404
{
405 406
	unsigned int cur_freq;
	unsigned int i;
L
Linus Torvalds 已提交
407

408
	for (i = 0; i < 100; i++) {
409 410 411 412 413 414 415 416 417
		cur_freq = extract_freq(get_cur_val(mask), data);
		if (cur_freq == freq)
			return 1;
		udelay(10);
	}
	return 0;
}

static int acpi_cpufreq_target(struct cpufreq_policy *policy,
418
			       unsigned int target_freq, unsigned int relation)
L
Linus Torvalds 已提交
419
{
420
	struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
421 422 423
	struct acpi_processor_performance *perf;
	struct cpufreq_freqs freqs;
	struct drv_cmd cmd;
424 425
	unsigned int next_state = 0; /* Index into freq_table */
	unsigned int next_perf_state = 0; /* Index into perf table */
426
	int result = 0;
427

428
	pr_debug("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu);
429 430

	if (unlikely(data == NULL ||
431
	     data->acpi_data == NULL || data->freq_table == NULL)) {
432 433
		return -ENODEV;
	}
L
Linus Torvalds 已提交
434

435
	perf = data->acpi_data;
L
Linus Torvalds 已提交
436
	result = cpufreq_frequency_table_target(policy,
437 438 439
						data->freq_table,
						target_freq,
						relation, &next_state);
440 441 442 443
	if (unlikely(result)) {
		result = -ENODEV;
		goto out;
	}
L
Linus Torvalds 已提交
444

445
	next_perf_state = data->freq_table[next_state].index;
446
	if (perf->state == next_perf_state) {
447
		if (unlikely(data->resume)) {
448
			pr_debug("Called after resume, resetting to P%d\n",
449
				next_perf_state);
450 451
			data->resume = 0;
		} else {
452
			pr_debug("Already at target state (P%d)\n",
453
				next_perf_state);
454
			goto out;
455
		}
456 457
	}

458 459 460 461
	switch (data->cpu_feature) {
	case SYSTEM_INTEL_MSR_CAPABLE:
		cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
		cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
462
		cmd.val = (u32) perf->states[next_perf_state].control;
463
		break;
464 465 466 467 468
	case SYSTEM_AMD_MSR_CAPABLE:
		cmd.type = SYSTEM_AMD_MSR_CAPABLE;
		cmd.addr.msr.reg = MSR_AMD_PERF_CTL;
		cmd.val = (u32) perf->states[next_perf_state].control;
		break;
469 470 471 472 473 474 475
	case SYSTEM_IO_CAPABLE:
		cmd.type = SYSTEM_IO_CAPABLE;
		cmd.addr.io.port = perf->control_register.address;
		cmd.addr.io.bit_width = perf->control_register.bit_width;
		cmd.val = (u32) perf->states[next_perf_state].control;
		break;
	default:
476 477
		result = -ENODEV;
		goto out;
478
	}
479

480
	/* cpufreq holds the hotplug lock, so we are safe from here on */
481
	if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY)
482
		cmd.mask = policy->cpus;
483
	else
484
		cmd.mask = cpumask_of(policy->cpu);
485

486 487
	freqs.old = perf->states[perf->state].core_frequency * 1000;
	freqs.new = data->freq_table[next_state].frequency;
488
	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
L
Linus Torvalds 已提交
489

490
	drv_write(&cmd);
491

492
	if (acpi_pstate_strict) {
493
		if (!check_freqs(cmd.mask, freqs.new, data)) {
494
			pr_debug("acpi_cpufreq_target failed (%d)\n",
495
				policy->cpu);
496 497
			result = -EAGAIN;
			goto out;
498 499 500
		}
	}

501
	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
502 503
	perf->state = next_perf_state;

504
out:
505
	return result;
L
Linus Torvalds 已提交
506 507
}

508
static int acpi_cpufreq_verify(struct cpufreq_policy *policy)
L
Linus Torvalds 已提交
509
{
510
	struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
L
Linus Torvalds 已提交
511

512
	pr_debug("acpi_cpufreq_verify\n");
L
Linus Torvalds 已提交
513

514
	return cpufreq_frequency_table_verify(policy, data->freq_table);
L
Linus Torvalds 已提交
515 516 517
}

static unsigned long
518
acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
L
Linus Torvalds 已提交
519
{
520
	struct acpi_processor_performance *perf = data->acpi_data;
521

L
Linus Torvalds 已提交
522 523 524 525
	if (cpu_khz) {
		/* search the closest match to cpu_khz */
		unsigned int i;
		unsigned long freq;
526
		unsigned long freqn = perf->states[0].core_frequency * 1000;
L
Linus Torvalds 已提交
527

528
		for (i = 0; i < (perf->state_count-1); i++) {
L
Linus Torvalds 已提交
529
			freq = freqn;
530
			freqn = perf->states[i+1].core_frequency * 1000;
L
Linus Torvalds 已提交
531
			if ((2 * cpu_khz) > (freqn + freq)) {
532
				perf->state = i;
533
				return freq;
L
Linus Torvalds 已提交
534 535
			}
		}
536
		perf->state = perf->state_count-1;
537
		return freqn;
538
	} else {
L
Linus Torvalds 已提交
539
		/* assume CPU is at P0... */
540 541 542
		perf->state = 0;
		return perf->states[0].core_frequency * 1000;
	}
L
Linus Torvalds 已提交
543 544
}

545 546 547 548 549 550 551 552 553 554 555
static void free_acpi_perf_data(void)
{
	unsigned int i;

	/* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */
	for_each_possible_cpu(i)
		free_cpumask_var(per_cpu_ptr(acpi_perf_data, i)
				 ->shared_cpu_map);
	free_percpu(acpi_perf_data);
}

556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593
static int boost_notify(struct notifier_block *nb, unsigned long action,
		      void *hcpu)
{
	unsigned cpu = (long)hcpu;
	const struct cpumask *cpumask;

	cpumask = get_cpu_mask(cpu);

	/*
	 * Clear the boost-disable bit on the CPU_DOWN path so that
	 * this cpu cannot block the remaining ones from boosting. On
	 * the CPU_UP path we simply keep the boost-disable flag in
	 * sync with the current global state.
	 */

	switch (action) {
	case CPU_UP_PREPARE:
	case CPU_UP_PREPARE_FROZEN:
		boost_set_msrs(boost_enabled, cpumask);
		break;

	case CPU_DOWN_PREPARE:
	case CPU_DOWN_PREPARE_FROZEN:
		boost_set_msrs(1, cpumask);
		break;

	default:
		break;
	}

	return NOTIFY_OK;
}


static struct notifier_block boost_nb = {
	.notifier_call          = boost_notify,
};

594 595 596 597 598 599 600 601
/*
 * acpi_cpufreq_early_init - initialize ACPI P-States library
 *
 * Initialize the ACPI P-States library (drivers/acpi/processor_perflib.c)
 * in order to determine correct frequency and voltage pairings. We can
 * do _PDC and _PSD and find out the processor dependency for the
 * actual init that will happen later...
 */
602
static int __init acpi_cpufreq_early_init(void)
603
{
604
	unsigned int i;
605
	pr_debug("acpi_cpufreq_early_init\n");
606

607 608
	acpi_perf_data = alloc_percpu(struct acpi_processor_performance);
	if (!acpi_perf_data) {
609
		pr_debug("Memory allocation error for acpi_perf_data.\n");
610
		return -ENOMEM;
611
	}
612
	for_each_possible_cpu(i) {
613
		if (!zalloc_cpumask_var_node(
614 615
			&per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map,
			GFP_KERNEL, cpu_to_node(i))) {
616 617 618 619 620 621

			/* Freeing a NULL pointer is OK: alloc_percpu zeroes. */
			free_acpi_perf_data();
			return -ENOMEM;
		}
	}
622 623

	/* Do initialization in ACPI core */
624 625
	acpi_processor_preregister_performance(acpi_perf_data);
	return 0;
626 627
}

628
#ifdef CONFIG_SMP
629 630 631 632 633 634 635 636
/*
 * Some BIOSes do SW_ANY coordination internally, either set it up in hw
 * or do it in BIOS firmware and won't inform about it to OS. If not
 * detected, this has a side effect of making CPU run at a different speed
 * than OS intended it to run at. Detect it and handle it cleanly.
 */
static int bios_with_sw_any_bug;

637
static int sw_any_bug_found(const struct dmi_system_id *d)
638 639 640 641 642
{
	bios_with_sw_any_bug = 1;
	return 0;
}

643
static const struct dmi_system_id sw_any_bug_dmi_table[] = {
644 645 646 647 648 649 650 651 652 653 654
	{
		.callback = sw_any_bug_found,
		.ident = "Supermicro Server X6DLP",
		.matches = {
			DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
			DMI_MATCH(DMI_BIOS_VERSION, "080010"),
			DMI_MATCH(DMI_PRODUCT_NAME, "X6DLP"),
		},
	},
	{ }
};
655 656 657

static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
{
658 659
	/* Intel Xeon Processor 7100 Series Specification Update
	 * http://www.intel.com/Assets/PDF/specupdate/314554.pdf
660 661
	 * AL30: A Machine Check Exception (MCE) Occurring during an
	 * Enhanced Intel SpeedStep Technology Ratio Change May Cause
662
	 * Both Processor Cores to Lock Up. */
663 664 665
	if (c->x86_vendor == X86_VENDOR_INTEL) {
		if ((c->x86 == 15) &&
		    (c->x86_model == 6) &&
666 667 668 669 670
		    (c->x86_mask == 8)) {
			printk(KERN_INFO "acpi-cpufreq: Intel(R) "
			    "Xeon(R) 7100 Errata AL30, processors may "
			    "lock up on frequency changes: disabling "
			    "acpi-cpufreq.\n");
671
			return -ENODEV;
672
		    }
673 674 675
		}
	return 0;
}
676
#endif
677

678
static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
L
Linus Torvalds 已提交
679
{
680 681 682 683 684
	unsigned int i;
	unsigned int valid_states = 0;
	unsigned int cpu = policy->cpu;
	struct acpi_cpufreq_data *data;
	unsigned int result = 0;
685
	struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
686
	struct acpi_processor_performance *perf;
687 688 689
#ifdef CONFIG_SMP
	static int blacklisted;
#endif
L
Linus Torvalds 已提交
690

691
	pr_debug("acpi_cpufreq_cpu_init\n");
L
Linus Torvalds 已提交
692

693
#ifdef CONFIG_SMP
694 695 696 697 698
	if (blacklisted)
		return blacklisted;
	blacklisted = acpi_cpufreq_blacklist(c);
	if (blacklisted)
		return blacklisted;
699 700
#endif

701
	data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL);
L
Linus Torvalds 已提交
702
	if (!data)
703
		return -ENOMEM;
L
Linus Torvalds 已提交
704

705
	data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
706
	per_cpu(acfreq_data, cpu) = data;
L
Linus Torvalds 已提交
707

708
	if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
709
		acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
L
Linus Torvalds 已提交
710

711
	result = acpi_processor_register_performance(data->acpi_data, cpu);
L
Linus Torvalds 已提交
712 713 714
	if (result)
		goto err_free;

715 716
	perf = data->acpi_data;
	policy->shared_type = perf->shared_type;
717

718
	/*
719
	 * Will let policy->cpus know about dependency only when software
720 721 722
	 * coordination is required.
	 */
	if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
723
	    policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
724
		cpumask_copy(policy->cpus, perf->shared_cpu_map);
725 726 727 728
	}

#ifdef CONFIG_SMP
	dmi_check_system(sw_any_bug_dmi_table);
729
	if (bios_with_sw_any_bug && !policy_is_shared(policy)) {
730
		policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
731
		cpumask_copy(policy->cpus, cpu_core_mask(cpu));
732
	}
733 734 735 736 737 738 739

	if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) {
		cpumask_clear(policy->cpus);
		cpumask_set_cpu(cpu, policy->cpus);
		policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
		pr_info_once(PFX "overriding BIOS provided _PSD data\n");
	}
740
#endif
741

L
Linus Torvalds 已提交
742
	/* capability check */
743
	if (perf->state_count <= 1) {
744
		pr_debug("No P-States\n");
L
Linus Torvalds 已提交
745 746 747
		result = -ENODEV;
		goto err_unreg;
	}
748

749 750 751 752 753 754
	if (perf->control_register.space_id != perf->status_register.space_id) {
		result = -ENODEV;
		goto err_unreg;
	}

	switch (perf->control_register.space_id) {
755
	case ACPI_ADR_SPACE_SYSTEM_IO:
756 757 758 759 760 761
		if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
		    boot_cpu_data.x86 == 0xf) {
			pr_debug("AMD K8 systems must use native drivers.\n");
			result = -ENODEV;
			goto err_unreg;
		}
762
		pr_debug("SYSTEM IO addr space\n");
763 764
		data->cpu_feature = SYSTEM_IO_CAPABLE;
		break;
765
	case ACPI_ADR_SPACE_FIXED_HARDWARE:
766
		pr_debug("HARDWARE addr space\n");
767 768 769
		if (check_est_cpu(cpu)) {
			data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
			break;
770
		}
771 772 773 774 775 776
		if (check_amd_hwpstate_cpu(cpu)) {
			data->cpu_feature = SYSTEM_AMD_MSR_CAPABLE;
			break;
		}
		result = -ENODEV;
		goto err_unreg;
777
	default:
778
		pr_debug("Unknown addr space %d\n",
779
			(u32) (perf->control_register.space_id));
L
Linus Torvalds 已提交
780 781 782 783
		result = -ENODEV;
		goto err_unreg;
	}

784 785
	data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) *
		    (perf->state_count+1), GFP_KERNEL);
L
Linus Torvalds 已提交
786 787 788 789 790 791 792
	if (!data->freq_table) {
		result = -ENOMEM;
		goto err_unreg;
	}

	/* detect transition latency */
	policy->cpuinfo.transition_latency = 0;
793
	for (i = 0; i < perf->state_count; i++) {
794 795 796 797
		if ((perf->states[i].transition_latency * 1000) >
		    policy->cpuinfo.transition_latency)
			policy->cpuinfo.transition_latency =
			    perf->states[i].transition_latency * 1000;
L
Linus Torvalds 已提交
798 799
	}

800 801 802 803
	/* Check for high latency (>20uS) from buggy BIOSes, like on T42 */
	if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE &&
	    policy->cpuinfo.transition_latency > 20 * 1000) {
		policy->cpuinfo.transition_latency = 20 * 1000;
804 805
		printk_once(KERN_INFO
			    "P-state transition latency capped at 20 uS\n");
806 807
	}

L
Linus Torvalds 已提交
808
	/* table init */
809 810
	for (i = 0; i < perf->state_count; i++) {
		if (i > 0 && perf->states[i].core_frequency >=
811
		    data->freq_table[valid_states-1].frequency / 1000)
812 813 814 815
			continue;

		data->freq_table[valid_states].index = i;
		data->freq_table[valid_states].frequency =
816
		    perf->states[i].core_frequency * 1000;
817
		valid_states++;
L
Linus Torvalds 已提交
818
	}
819
	data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
820
	perf->state = 0;
L
Linus Torvalds 已提交
821 822

	result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
823
	if (result)
L
Linus Torvalds 已提交
824 825
		goto err_freqfree;

826 827 828
	if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq)
		printk(KERN_WARNING FW_WARN "P-state 0 is not max freq\n");

829
	switch (perf->control_register.space_id) {
830
	case ACPI_ADR_SPACE_SYSTEM_IO:
831 832 833
		/* Current speed is unknown and not detectable by IO port */
		policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
		break;
834
	case ACPI_ADR_SPACE_FIXED_HARDWARE:
835
		acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
836
		policy->cur = get_cur_freq_on_cpu(cpu);
837
		break;
838
	default:
839 840 841
		break;
	}

L
Linus Torvalds 已提交
842 843 844
	/* notify BIOS that we exist */
	acpi_processor_notify_smm(THIS_MODULE);

845
	/* Check for APERF/MPERF support in hardware */
846
	if (boot_cpu_has(X86_FEATURE_APERFMPERF))
847
		acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
848

849
	pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
850
	for (i = 0; i < perf->state_count; i++)
851
		pr_debug("     %cP%d: %d MHz, %d mW, %d uS\n",
852
			(i == perf->state ? '*' : ' '), i,
853 854 855
			(u32) perf->states[i].core_frequency,
			(u32) perf->states[i].power,
			(u32) perf->states[i].transition_latency);
L
Linus Torvalds 已提交
856 857

	cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
858

859 860 861 862 863
	/*
	 * the first call to ->target() should result in us actually
	 * writing something to the appropriate registers.
	 */
	data->resume = 1;
864

865
	return result;
L
Linus Torvalds 已提交
866

867
err_freqfree:
L
Linus Torvalds 已提交
868
	kfree(data->freq_table);
869
err_unreg:
870
	acpi_processor_unregister_performance(perf, cpu);
871
err_free:
L
Linus Torvalds 已提交
872
	kfree(data);
873
	per_cpu(acfreq_data, cpu) = NULL;
L
Linus Torvalds 已提交
874

875
	return result;
L
Linus Torvalds 已提交
876 877
}

878
static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
L
Linus Torvalds 已提交
879
{
880
	struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
L
Linus Torvalds 已提交
881

882
	pr_debug("acpi_cpufreq_cpu_exit\n");
L
Linus Torvalds 已提交
883 884 885

	if (data) {
		cpufreq_frequency_table_put_attr(policy->cpu);
886
		per_cpu(acfreq_data, policy->cpu) = NULL;
887 888
		acpi_processor_unregister_performance(data->acpi_data,
						      policy->cpu);
889
		kfree(data->freq_table);
L
Linus Torvalds 已提交
890 891 892
		kfree(data);
	}

893
	return 0;
L
Linus Torvalds 已提交
894 895
}

896
static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
L
Linus Torvalds 已提交
897
{
898
	struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
L
Linus Torvalds 已提交
899

900
	pr_debug("acpi_cpufreq_resume\n");
L
Linus Torvalds 已提交
901 902 903

	data->resume = 1;

904
	return 0;
L
Linus Torvalds 已提交
905 906
}

907
static struct freq_attr *acpi_cpufreq_attr[] = {
L
Linus Torvalds 已提交
908
	&cpufreq_freq_attr_scaling_available_freqs,
909
	NULL,	/* this is a placeholder for cpb, do not remove */
L
Linus Torvalds 已提交
910 911 912 913
	NULL,
};

static struct cpufreq_driver acpi_cpufreq_driver = {
914 915 916 917 918 919 920 921 922
	.verify		= acpi_cpufreq_verify,
	.target		= acpi_cpufreq_target,
	.bios_limit	= acpi_processor_get_bios_limit,
	.init		= acpi_cpufreq_cpu_init,
	.exit		= acpi_cpufreq_cpu_exit,
	.resume		= acpi_cpufreq_resume,
	.name		= "acpi-cpufreq",
	.owner		= THIS_MODULE,
	.attr		= acpi_cpufreq_attr,
L
Linus Torvalds 已提交
923 924
};

925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967
static void __init acpi_cpufreq_boost_init(void)
{
	if (boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA)) {
		msrs = msrs_alloc();

		if (!msrs)
			return;

		boost_supported = true;
		boost_enabled = boost_state(0);

		get_online_cpus();

		/* Force all MSRs to the same value */
		boost_set_msrs(boost_enabled, cpu_online_mask);

		register_cpu_notifier(&boost_nb);

		put_online_cpus();
	} else
		global_boost.attr.mode = 0444;

	/* We create the boost file in any case, though for systems without
	 * hardware support it will be read-only and hardwired to return 0.
	 */
	if (sysfs_create_file(cpufreq_global_kobject, &(global_boost.attr)))
		pr_warn(PFX "could not register global boost sysfs file\n");
	else
		pr_debug("registered global boost sysfs file\n");
}

static void __exit acpi_cpufreq_boost_exit(void)
{
	sysfs_remove_file(cpufreq_global_kobject, &(global_boost.attr));

	if (msrs) {
		unregister_cpu_notifier(&boost_nb);

		msrs_free(msrs);
		msrs = NULL;
	}
}

968
static int __init acpi_cpufreq_init(void)
L
Linus Torvalds 已提交
969
{
970 971
	int ret;

972 973 974
	if (acpi_disabled)
		return 0;

975
	pr_debug("acpi_cpufreq_init\n");
L
Linus Torvalds 已提交
976

977 978 979
	ret = acpi_cpufreq_early_init();
	if (ret)
		return ret;
980

981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001
#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
	/* this is a sysfs file with a strange name and an even stranger
	 * semantic - per CPU instantiation, but system global effect.
	 * Lets enable it only on AMD CPUs for compatibility reasons and
	 * only if configured. This is considered legacy code, which
	 * will probably be removed at some point in the future.
	 */
	if (check_amd_hwpstate_cpu(0)) {
		struct freq_attr **iter;

		pr_debug("adding sysfs entry for cpb\n");

		for (iter = acpi_cpufreq_attr; *iter != NULL; iter++)
			;

		/* make sure there is a terminator behind it */
		if (iter[1] == NULL)
			*iter = &cpb;
	}
#endif

1002 1003
	ret = cpufreq_register_driver(&acpi_cpufreq_driver);
	if (ret)
1004
		free_acpi_perf_data();
1005 1006
	else
		acpi_cpufreq_boost_init();
1007 1008

	return ret;
L
Linus Torvalds 已提交
1009 1010
}

1011
static void __exit acpi_cpufreq_exit(void)
L
Linus Torvalds 已提交
1012
{
1013
	pr_debug("acpi_cpufreq_exit\n");
L
Linus Torvalds 已提交
1014

1015 1016
	acpi_cpufreq_boost_exit();

L
Linus Torvalds 已提交
1017 1018
	cpufreq_unregister_driver(&acpi_cpufreq_driver);

1019
	free_acpi_perf_data();
L
Linus Torvalds 已提交
1020 1021
}

1022
module_param(acpi_pstate_strict, uint, 0644);
1023
MODULE_PARM_DESC(acpi_pstate_strict,
1024 1025
	"value 0 or non-zero. non-zero -> strict ACPI checks are "
	"performed during frequency changes.");
L
Linus Torvalds 已提交
1026 1027 1028 1029

late_initcall(acpi_cpufreq_init);
module_exit(acpi_cpufreq_exit);

1030 1031 1032 1033 1034 1035 1036
static const struct x86_cpu_id acpi_cpufreq_ids[] = {
	X86_FEATURE_MATCH(X86_FEATURE_ACPI),
	X86_FEATURE_MATCH(X86_FEATURE_HW_PSTATE),
	{}
};
MODULE_DEVICE_TABLE(x86cpu, acpi_cpufreq_ids);

L
Linus Torvalds 已提交
1037
MODULE_ALIAS("acpi");