processor_idle.c 33.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5
/*
 * processor_idle - idle state submodule to the ACPI processor driver
 *
 *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
 *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6
 *  Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
L
Linus Torvalds 已提交
7 8
 *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
 *  			- Added processor hotplug support
9 10
 *  Copyright (C) 2005  Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
 *  			- Added support for C3 on SMP
L
Linus Torvalds 已提交
11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39
 *
 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 *
 *  This program is free software; you can redistribute it and/or modify
 *  it under the terms of the GNU General Public License as published by
 *  the Free Software Foundation; either version 2 of the License, or (at
 *  your option) any later version.
 *
 *  This program is distributed in the hope that it will be useful, but
 *  WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 *  General Public License for more details.
 *
 *  You should have received a copy of the GNU General Public License along
 *  with this program; if not, write to the Free Software Foundation, Inc.,
 *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
 *
 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 */

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
#include <linux/moduleparam.h>
T
Tim Schmielau 已提交
40
#include <linux/sched.h>	/* need_resched() */
M
Mark Gross 已提交
41
#include <linux/pm_qos_params.h>
42
#include <linux/clockchips.h>
43
#include <linux/cpuidle.h>
44
#include <linux/irqflags.h>
L
Linus Torvalds 已提交
45

46 47 48 49 50 51 52 53 54 55
/*
 * Include the apic definitions for x86 to have the APIC timer related defines
 * available also for UP (on SMP it gets magically included via linux/smp.h).
 * asm/acpi.h is not an option, as it would require more include magic. Also
 * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
 */
#ifdef CONFIG_X86
#include <asm/apic.h>
#endif

L
Linus Torvalds 已提交
56 57 58 59 60
#include <asm/io.h>
#include <asm/uaccess.h>

#include <acpi/acpi_bus.h>
#include <acpi/processor.h>
Z
Zhao Yakui 已提交
61
#include <asm/processor.h>
L
Linus Torvalds 已提交
62 63 64

#define ACPI_PROCESSOR_CLASS            "processor"
#define _COMPONENT              ACPI_PROCESSOR_COMPONENT
65
ACPI_MODULE_NAME("processor_idle");
L
Linus Torvalds 已提交
66 67
#define ACPI_PROCESSOR_FILE_POWER	"power"
#define US_TO_PM_TIMER_TICKS(t)		((t * (PM_TIMER_FREQUENCY/1000)) / 1000)
68
#define PM_TIMER_TICK_NS		(1000000000ULL/PM_TIMER_FREQUENCY)
69 70 71
#define C2_OVERHEAD			1	/* 1us */
#define C3_OVERHEAD			1	/* 1us */
#define PM_TIMER_TICKS_TO_US(p)		(((p) * 1000)/(PM_TIMER_FREQUENCY/1000))
L
Linus Torvalds 已提交
72

73 74
static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
module_param(max_cstate, uint, 0000);
75
static unsigned int nocst __read_mostly;
L
Linus Torvalds 已提交
76 77
module_param(nocst, uint, 0000);

78
static unsigned int latency_factor __read_mostly = 2;
79
module_param(latency_factor, uint, 0644);
L
Linus Torvalds 已提交
80 81 82 83 84 85 86

/*
 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
 * For now disable this. Probably a bug somewhere else.
 *
 * To skip this limit, boot/load with a large max_cstate limit.
 */
87
static int set_max_cstate(const struct dmi_system_id *id)
L
Linus Torvalds 已提交
88 89 90 91
{
	if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
		return 0;

92
	printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate."
L
Len Brown 已提交
93 94
	       " Override with \"processor.max_cstate=%d\"\n", id->ident,
	       (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
L
Linus Torvalds 已提交
95

96
	max_cstate = (long)id->driver_data;
L
Linus Torvalds 已提交
97 98 99 100

	return 0;
}

101 102 103
/* Actually this shouldn't be __cpuinitdata, would be better to fix the
   callers to only run once -AK */
static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = {
104 105 106
	{ set_max_cstate, "IBM ThinkPad R40e", {
	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
	  DMI_MATCH(DMI_BIOS_VERSION,"1SET70WW")}, (void *)1},
107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157
	{ set_max_cstate, "IBM ThinkPad R40e", {
	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
	  DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW")}, (void *)1},
	{ set_max_cstate, "IBM ThinkPad R40e", {
	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
	  DMI_MATCH(DMI_BIOS_VERSION,"1SET43WW") }, (void*)1},
	{ set_max_cstate, "IBM ThinkPad R40e", {
	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
	  DMI_MATCH(DMI_BIOS_VERSION,"1SET45WW") }, (void*)1},
	{ set_max_cstate, "IBM ThinkPad R40e", {
	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
	  DMI_MATCH(DMI_BIOS_VERSION,"1SET47WW") }, (void*)1},
	{ set_max_cstate, "IBM ThinkPad R40e", {
	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
	  DMI_MATCH(DMI_BIOS_VERSION,"1SET50WW") }, (void*)1},
	{ set_max_cstate, "IBM ThinkPad R40e", {
	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
	  DMI_MATCH(DMI_BIOS_VERSION,"1SET52WW") }, (void*)1},
	{ set_max_cstate, "IBM ThinkPad R40e", {
	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
	  DMI_MATCH(DMI_BIOS_VERSION,"1SET55WW") }, (void*)1},
	{ set_max_cstate, "IBM ThinkPad R40e", {
	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
	  DMI_MATCH(DMI_BIOS_VERSION,"1SET56WW") }, (void*)1},
	{ set_max_cstate, "IBM ThinkPad R40e", {
	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
	  DMI_MATCH(DMI_BIOS_VERSION,"1SET59WW") }, (void*)1},
	{ set_max_cstate, "IBM ThinkPad R40e", {
	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
	  DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW") }, (void*)1},
	{ set_max_cstate, "IBM ThinkPad R40e", {
	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
	  DMI_MATCH(DMI_BIOS_VERSION,"1SET61WW") }, (void*)1},
	{ set_max_cstate, "IBM ThinkPad R40e", {
	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
	  DMI_MATCH(DMI_BIOS_VERSION,"1SET62WW") }, (void*)1},
	{ set_max_cstate, "IBM ThinkPad R40e", {
	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
	  DMI_MATCH(DMI_BIOS_VERSION,"1SET64WW") }, (void*)1},
	{ set_max_cstate, "IBM ThinkPad R40e", {
	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
	  DMI_MATCH(DMI_BIOS_VERSION,"1SET65WW") }, (void*)1},
	{ set_max_cstate, "IBM ThinkPad R40e", {
	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
	  DMI_MATCH(DMI_BIOS_VERSION,"1SET68WW") }, (void*)1},
	{ set_max_cstate, "Medion 41700", {
	  DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
	  DMI_MATCH(DMI_BIOS_VERSION,"R01-A1J")}, (void *)1},
	{ set_max_cstate, "Clevo 5600D", {
	  DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
	  DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
L
Len Brown 已提交
158
	 (void *)2},
L
Linus Torvalds 已提交
159 160 161
	{},
};

L
Len Brown 已提交
162
static inline u32 ticks_elapsed(u32 t1, u32 t2)
L
Linus Torvalds 已提交
163 164 165
{
	if (t2 >= t1)
		return (t2 - t1);
166
	else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
L
Linus Torvalds 已提交
167 168 169 170 171
		return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
	else
		return ((0xFFFFFFFF - t1) + t2);
}

172 173 174 175 176 177 178 179 180 181
static inline u32 ticks_elapsed_in_us(u32 t1, u32 t2)
{
	if (t2 >= t1)
		return PM_TIMER_TICKS_TO_US(t2 - t1);
	else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
		return PM_TIMER_TICKS_TO_US(((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
	else
		return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2);
}

182 183 184 185
/*
 * Callers should disable interrupts before the call and enable
 * interrupts after return.
 */
186 187 188 189 190 191 192 193
static void acpi_safe_halt(void)
{
	current_thread_info()->status &= ~TS_POLLING;
	/*
	 * TS_POLLING-cleared state must be visible before we
	 * test NEED_RESCHED:
	 */
	smp_mb();
194
	if (!need_resched()) {
195
		safe_halt();
196 197
		local_irq_disable();
	}
198 199 200
	current_thread_info()->status |= TS_POLLING;
}

201 202 203 204
#ifdef ARCH_APICTIMER_STOPS_ON_C3

/*
 * Some BIOS implementations switch to C3 in the published C2 state.
205 206 207
 * This seems to be a common problem on AMD boxen, but other vendors
 * are affected too. We pick the most conservative approach: we assume
 * that the local APIC stops in both C2 and C3.
208 209 210 211 212
 */
static void acpi_timer_check_state(int state, struct acpi_processor *pr,
				   struct acpi_processor_cx *cx)
{
	struct acpi_processor_power *pwr = &pr->power;
213
	u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
214 215 216 217 218 219 220 221

	/*
	 * Check, if one of the previous states already marked the lapic
	 * unstable
	 */
	if (pwr->timer_broadcast_on_state < state)
		return;

222
	if (cx->type >= type)
223
		pr->power.timer_broadcast_on_state = state;
224 225 226 227
}

static void acpi_propagate_timer_broadcast(struct acpi_processor *pr)
{
228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249
	unsigned long reason;

	reason = pr->power.timer_broadcast_on_state < INT_MAX ?
		CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;

	clockevents_notify(reason, &pr->id);
}

/* Power(C) State timer broadcast control */
static void acpi_state_timer_broadcast(struct acpi_processor *pr,
				       struct acpi_processor_cx *cx,
				       int broadcast)
{
	int state = cx - pr->power.states;

	if (state >= pr->power.timer_broadcast_on_state) {
		unsigned long reason;

		reason = broadcast ?  CLOCK_EVT_NOTIFY_BROADCAST_ENTER :
			CLOCK_EVT_NOTIFY_BROADCAST_EXIT;
		clockevents_notify(reason, &pr->id);
	}
250 251 252 253 254 255 256
}

#else

static void acpi_timer_check_state(int state, struct acpi_processor *pr,
				   struct acpi_processor_cx *cstate) { }
static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) { }
257 258 259 260 261
static void acpi_state_timer_broadcast(struct acpi_processor *pr,
				       struct acpi_processor_cx *cx,
				       int broadcast)
{
}
262 263 264

#endif

265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281
/*
 * Suspend / resume control
 */
static int acpi_idle_suspend;

int acpi_processor_suspend(struct acpi_device * device, pm_message_t state)
{
	acpi_idle_suspend = 1;
	return 0;
}

int acpi_processor_resume(struct acpi_device * device)
{
	acpi_idle_suspend = 0;
	return 0;
}

P
Pavel Machek 已提交
282
#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
283 284 285 286
static int tsc_halts_in_c(int state)
{
	switch (boot_cpu_data.x86_vendor) {
	case X86_VENDOR_AMD:
287
	case X86_VENDOR_INTEL:
288 289 290 291
		/*
		 * AMD Fam10h TSC will tick in all
		 * C/P/S0/S1 states when this bit is set.
		 */
292
		if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
293
			return 0;
294

295 296 297 298 299 300 301
		/*FALL THROUGH*/
	default:
		return state > ACPI_STATE_C1;
	}
}
#endif

L
Len Brown 已提交
302
static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
L
Linus Torvalds 已提交
303 304 305
{

	if (!pr)
306
		return -EINVAL;
L
Linus Torvalds 已提交
307 308

	if (!pr->pblk)
309
		return -ENODEV;
L
Linus Torvalds 已提交
310 311 312 313 314

	/* if info is obtained from pblk/fadt, type equals state */
	pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
	pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;

315 316 317
#ifndef CONFIG_HOTPLUG_CPU
	/*
	 * Check for P_LVL2_UP flag before entering C2 and above on
318
	 * an SMP system.
319
	 */
320
	if ((num_online_cpus() > 1) &&
321
	    !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
322
		return -ENODEV;
323 324
#endif

L
Linus Torvalds 已提交
325 326 327 328 329
	/* determine C2 and C3 address from pblk */
	pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
	pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;

	/* determine latencies from FADT */
330 331
	pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency;
	pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency;
L
Linus Torvalds 已提交
332 333 334 335 336 337

	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
			  "lvl2[0x%08x] lvl3[0x%08x]\n",
			  pr->power.states[ACPI_STATE_C2].address,
			  pr->power.states[ACPI_STATE_C3].address));

338
	return 0;
L
Linus Torvalds 已提交
339 340
}

341
static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
342
{
343 344 345 346 347
	if (!pr->power.states[ACPI_STATE_C1].valid) {
		/* set the first C-State to C1 */
		/* all processors need to support C1 */
		pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
		pr->power.states[ACPI_STATE_C1].valid = 1;
348
		pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT;
349 350
	}
	/* the C0 state only exists as a filler in our array */
351
	pr->power.states[ACPI_STATE_C0].valid = 1;
352
	return 0;
353 354
}

L
Len Brown 已提交
355
static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
L
Linus Torvalds 已提交
356
{
L
Len Brown 已提交
357 358
	acpi_status status = 0;
	acpi_integer count;
359
	int current_count;
L
Len Brown 已提交
360 361 362
	int i;
	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
	union acpi_object *cst;
L
Linus Torvalds 已提交
363 364 365


	if (nocst)
366
		return -ENODEV;
L
Linus Torvalds 已提交
367

368
	current_count = 0;
L
Linus Torvalds 已提交
369 370 371 372

	status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
	if (ACPI_FAILURE(status)) {
		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
373
		return -ENODEV;
L
Len Brown 已提交
374
	}
L
Linus Torvalds 已提交
375

376
	cst = buffer.pointer;
L
Linus Torvalds 已提交
377 378 379

	/* There must be at least 2 elements */
	if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
380
		printk(KERN_ERR PREFIX "not enough elements in _CST\n");
L
Linus Torvalds 已提交
381 382 383 384 385 386 387 388
		status = -EFAULT;
		goto end;
	}

	count = cst->package.elements[0].integer.value;

	/* Validate number of power states. */
	if (count < 1 || count != cst->package.count - 1) {
389
		printk(KERN_ERR PREFIX "count given by _CST is not valid\n");
L
Linus Torvalds 已提交
390 391 392 393 394 395 396 397 398 399 400 401 402 403 404
		status = -EFAULT;
		goto end;
	}

	/* Tell driver that at least _CST is supported. */
	pr->flags.has_cst = 1;

	for (i = 1; i <= count; i++) {
		union acpi_object *element;
		union acpi_object *obj;
		struct acpi_power_register *reg;
		struct acpi_processor_cx cx;

		memset(&cx, 0, sizeof(cx));

405
		element = &(cst->package.elements[i]);
L
Linus Torvalds 已提交
406 407 408 409 410 411
		if (element->type != ACPI_TYPE_PACKAGE)
			continue;

		if (element->package.count != 4)
			continue;

412
		obj = &(element->package.elements[0]);
L
Linus Torvalds 已提交
413 414 415 416

		if (obj->type != ACPI_TYPE_BUFFER)
			continue;

L
Len Brown 已提交
417
		reg = (struct acpi_power_register *)obj->buffer.pointer;
L
Linus Torvalds 已提交
418 419

		if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
L
Len Brown 已提交
420
		    (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
L
Linus Torvalds 已提交
421 422 423
			continue;

		/* There should be an easy way to extract an integer... */
424
		obj = &(element->package.elements[1]);
L
Linus Torvalds 已提交
425 426 427 428
		if (obj->type != ACPI_TYPE_INTEGER)
			continue;

		cx.type = obj->integer.value;
429 430 431 432 433 434 435 436 437 438
		/*
		 * Some buggy BIOSes won't list C1 in _CST -
		 * Let acpi_processor_get_power_info_default() handle them later
		 */
		if (i == 1 && cx.type != ACPI_STATE_C1)
			current_count++;

		cx.address = reg->address;
		cx.index = current_count + 1;

439
		cx.entry_method = ACPI_CSTATE_SYSTEMIO;
440 441 442
		if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
			if (acpi_processor_ffh_cstate_probe
					(pr->id, &cx, reg) == 0) {
443 444
				cx.entry_method = ACPI_CSTATE_FFH;
			} else if (cx.type == ACPI_STATE_C1) {
445 446 447 448 449 450
				/*
				 * C1 is a special case where FIXED_HARDWARE
				 * can be handled in non-MWAIT way as well.
				 * In that case, save this _CST entry info.
				 * Otherwise, ignore this info and continue.
				 */
451
				cx.entry_method = ACPI_CSTATE_HALT;
452
				snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
453
			} else {
454 455
				continue;
			}
456 457
			if (cx.type == ACPI_STATE_C1 &&
					(idle_halt || idle_nomwait)) {
Z
Zhao Yakui 已提交
458 459 460 461 462 463
				/*
				 * In most cases the C1 space_id obtained from
				 * _CST object is FIXED_HARDWARE access mode.
				 * But when the option of idle=halt is added,
				 * the entry_method type should be changed from
				 * CSTATE_FFH to CSTATE_HALT.
464 465 466
				 * When the option of idle=nomwait is added,
				 * the C1 entry_method type should be
				 * CSTATE_HALT.
Z
Zhao Yakui 已提交
467 468 469 470
				 */
				cx.entry_method = ACPI_CSTATE_HALT;
				snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
			}
471 472 473
		} else {
			snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x",
				 cx.address);
474
		}
L
Linus Torvalds 已提交
475

476 477 478
		if (cx.type == ACPI_STATE_C1) {
			cx.valid = 1;
		}
479

480
		obj = &(element->package.elements[2]);
L
Linus Torvalds 已提交
481 482 483 484 485
		if (obj->type != ACPI_TYPE_INTEGER)
			continue;

		cx.latency = obj->integer.value;

486
		obj = &(element->package.elements[3]);
L
Linus Torvalds 已提交
487 488 489 490 491
		if (obj->type != ACPI_TYPE_INTEGER)
			continue;

		cx.power = obj->integer.value;

492 493 494 495 496 497 498 499 500 501 502 503 504 505 506
		current_count++;
		memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));

		/*
		 * We support total ACPI_PROCESSOR_MAX_POWER - 1
		 * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1)
		 */
		if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) {
			printk(KERN_WARNING
			       "Limiting number of power states to max (%d)\n",
			       ACPI_PROCESSOR_MAX_POWER);
			printk(KERN_WARNING
			       "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
			break;
		}
L
Linus Torvalds 已提交
507 508
	}

L
Len Brown 已提交
509
	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n",
510
			  current_count));
L
Linus Torvalds 已提交
511 512

	/* Validate number of power states discovered */
513
	if (current_count < 2)
514
		status = -EFAULT;
L
Linus Torvalds 已提交
515

L
Len Brown 已提交
516
      end:
517
	kfree(buffer.pointer);
L
Linus Torvalds 已提交
518

519
	return status;
L
Linus Torvalds 已提交
520 521 522 523 524 525
}

static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx)
{

	if (!cx->address)
526
		return;
L
Linus Torvalds 已提交
527 528 529 530 531 532 533

	/*
	 * C2 latency must be less than or equal to 100
	 * microseconds.
	 */
	else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
L
Len Brown 已提交
534
				  "latency too large [%d]\n", cx->latency));
535
		return;
L
Linus Torvalds 已提交
536 537 538 539 540 541 542
	}

	/*
	 * Otherwise we've met all of our C2 requirements.
	 * Normalize the C2 latency to expidite policy
	 */
	cx->valid = 1;
543 544

	cx->latency_ticks = cx->latency;
L
Linus Torvalds 已提交
545

546
	return;
L
Linus Torvalds 已提交
547 548
}

L
Len Brown 已提交
549 550
static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
					   struct acpi_processor_cx *cx)
L
Linus Torvalds 已提交
551
{
552 553
	static int bm_check_flag;

L
Linus Torvalds 已提交
554 555

	if (!cx->address)
556
		return;
L
Linus Torvalds 已提交
557 558 559 560 561 562 563

	/*
	 * C3 latency must be less than or equal to 1000
	 * microseconds.
	 */
	else if (cx->latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
L
Len Brown 已提交
564
				  "latency too large [%d]\n", cx->latency));
565
		return;
L
Linus Torvalds 已提交
566 567 568 569 570 571 572 573 574 575 576
	}

	/*
	 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
	 * DMA transfers are used by any ISA device to avoid livelock.
	 * Note that we could disable Type-F DMA (as recommended by
	 * the erratum), but this is known to disrupt certain ISA
	 * devices thus we take the conservative approach.
	 */
	else if (errata.piix4.fdma) {
		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
L
Len Brown 已提交
577
				  "C3 not supported on PIIX4 with Type-F DMA\n"));
578
		return;
L
Linus Torvalds 已提交
579 580
	}

581 582 583 584 585 586 587 588 589 590 591
	/* All the logic here assumes flags.bm_check is same across all CPUs */
	if (!bm_check_flag) {
		/* Determine whether bm_check is needed based on CPU  */
		acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
		bm_check_flag = pr->flags.bm_check;
	} else {
		pr->flags.bm_check = bm_check_flag;
	}

	if (pr->flags.bm_check) {
		if (!pr->flags.bm_control) {
592 593 594 595 596 597 598 599 600 601
			if (pr->flags.has_cst != 1) {
				/* bus mastering control is necessary */
				ACPI_DEBUG_PRINT((ACPI_DB_INFO,
					"C3 support requires BM control\n"));
				return;
			} else {
				/* Here we enter C3 without bus mastering */
				ACPI_DEBUG_PRINT((ACPI_DB_INFO,
					"C3 support without BM control\n"));
			}
602 603 604 605 606 607
		}
	} else {
		/*
		 * WBINVD should be set in fadt, for C3 state to be
		 * supported on when bm_check is not required.
		 */
608
		if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
609
			ACPI_DEBUG_PRINT((ACPI_DB_INFO,
L
Len Brown 已提交
610 611
					  "Cache invalidation should work properly"
					  " for C3 to be enabled on SMP systems\n"));
612
			return;
613 614 615
		}
	}

L
Linus Torvalds 已提交
616 617 618 619 620 621 622
	/*
	 * Otherwise we've met all of our C3 requirements.
	 * Normalize the C3 latency to expidite policy.  Enable
	 * checking of bus mastering status (bm_check) so we can
	 * use this in our C3 policy
	 */
	cx->valid = 1;
623 624

	cx->latency_ticks = cx->latency;
625 626 627 628 629 630 631 632 633
	/*
	 * On older chipsets, BM_RLD needs to be set
	 * in order for Bus Master activity to wake the
	 * system from C3.  Newer chipsets handle DMA
	 * during C3 automatically and BM_RLD is a NOP.
	 * In either case, the proper way to
	 * handle BM_RLD is to set it and leave it set.
	 */
	acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
L
Linus Torvalds 已提交
634

635
	return;
L
Linus Torvalds 已提交
636 637 638 639 640 641
}

static int acpi_processor_power_verify(struct acpi_processor *pr)
{
	unsigned int i;
	unsigned int working = 0;
642

643
	pr->power.timer_broadcast_on_state = INT_MAX;
644

L
Len Brown 已提交
645
	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
L
Linus Torvalds 已提交
646 647 648 649 650 651 652 653 654
		struct acpi_processor_cx *cx = &pr->power.states[i];

		switch (cx->type) {
		case ACPI_STATE_C1:
			cx->valid = 1;
			break;

		case ACPI_STATE_C2:
			acpi_processor_power_verify_c2(cx);
655
			if (cx->valid)
656
				acpi_timer_check_state(i, pr, cx);
L
Linus Torvalds 已提交
657 658 659 660
			break;

		case ACPI_STATE_C3:
			acpi_processor_power_verify_c3(pr, cx);
661
			if (cx->valid)
662
				acpi_timer_check_state(i, pr, cx);
L
Linus Torvalds 已提交
663 664 665 666 667 668
			break;
		}

		if (cx->valid)
			working++;
	}
669

670
	acpi_propagate_timer_broadcast(pr);
L
Linus Torvalds 已提交
671 672 673 674

	return (working);
}

L
Len Brown 已提交
675
static int acpi_processor_get_power_info(struct acpi_processor *pr)
L
Linus Torvalds 已提交
676 677 678 679 680 681 682 683
{
	unsigned int i;
	int result;


	/* NOTE: the idle thread may not be running while calling
	 * this function */

684 685 686
	/* Zero initialize all the C-states info. */
	memset(pr->power.states, 0, sizeof(pr->power.states));

L
Linus Torvalds 已提交
687
	result = acpi_processor_get_power_info_cst(pr);
688
	if (result == -ENODEV)
689
		result = acpi_processor_get_power_info_fadt(pr);
690

691 692 693 694 695
	if (result)
		return result;

	acpi_processor_get_power_info_default(pr);

696
	pr->power.count = acpi_processor_power_verify(pr);
L
Linus Torvalds 已提交
697 698 699 700 701 702

	/*
	 * if one state of type C2 or C3 is available, mark this
	 * CPU as being "idle manageable"
	 */
	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
703
		if (pr->power.states[i].valid) {
L
Linus Torvalds 已提交
704
			pr->power.count = i;
705 706
			if (pr->power.states[i].type >= ACPI_STATE_C2)
				pr->flags.power = 1;
707
		}
L
Linus Torvalds 已提交
708 709
	}

710
	return 0;
L
Linus Torvalds 已提交
711 712 713 714
}

static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
{
715
	struct acpi_processor *pr = seq->private;
L
Len Brown 已提交
716
	unsigned int i;
L
Linus Torvalds 已提交
717 718 719 720 721 722


	if (!pr)
		goto end;

	seq_printf(seq, "active state:            C%zd\n"
L
Len Brown 已提交
723
		   "max_cstate:              C%d\n"
724 725
		   "bus master activity:     %08x\n"
		   "maximum allowed latency: %d usec\n",
L
Len Brown 已提交
726
		   pr->power.state ? pr->power.state - pr->power.states : 0,
727
		   max_cstate, (unsigned)pr->power.bm_activity,
M
Mark Gross 已提交
728
		   pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY));
L
Linus Torvalds 已提交
729 730 731 732 733

	seq_puts(seq, "states:\n");

	for (i = 1; i <= pr->power.count; i++) {
		seq_printf(seq, "   %cC%d:                  ",
L
Len Brown 已提交
734 735
			   (&pr->power.states[i] ==
			    pr->power.state ? '*' : ' '), i);
L
Linus Torvalds 已提交
736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758

		if (!pr->power.states[i].valid) {
			seq_puts(seq, "<not supported>\n");
			continue;
		}

		switch (pr->power.states[i].type) {
		case ACPI_STATE_C1:
			seq_printf(seq, "type[C1] ");
			break;
		case ACPI_STATE_C2:
			seq_printf(seq, "type[C2] ");
			break;
		case ACPI_STATE_C3:
			seq_printf(seq, "type[C3] ");
			break;
		default:
			seq_printf(seq, "type[--] ");
			break;
		}

		if (pr->power.states[i].promotion.state)
			seq_printf(seq, "promotion[C%zd] ",
L
Len Brown 已提交
759 760
				   (pr->power.states[i].promotion.state -
				    pr->power.states));
L
Linus Torvalds 已提交
761 762 763 764 765
		else
			seq_puts(seq, "promotion[--] ");

		if (pr->power.states[i].demotion.state)
			seq_printf(seq, "demotion[C%zd] ",
L
Len Brown 已提交
766 767
				   (pr->power.states[i].demotion.state -
				    pr->power.states));
L
Linus Torvalds 已提交
768 769 770
		else
			seq_puts(seq, "demotion[--] ");

771
		seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n",
L
Len Brown 已提交
772
			   pr->power.states[i].latency,
773
			   pr->power.states[i].usage,
774
			   (unsigned long long)pr->power.states[i].time);
L
Linus Torvalds 已提交
775 776
	}

L
Len Brown 已提交
777
      end:
778
	return 0;
L
Linus Torvalds 已提交
779 780 781 782 783
}

static int acpi_processor_power_open_fs(struct inode *inode, struct file *file)
{
	return single_open(file, acpi_processor_power_seq_show,
L
Len Brown 已提交
784
			   PDE(inode)->data);
L
Linus Torvalds 已提交
785 786
}

787
static const struct file_operations acpi_processor_power_fops = {
788
	.owner = THIS_MODULE,
L
Len Brown 已提交
789 790 791 792
	.open = acpi_processor_power_open_fs,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release,
L
Linus Torvalds 已提交
793 794
};

795 796 797 798 799 800 801 802

/**
 * acpi_idle_bm_check - checks if bus master activity was detected
 */
static int acpi_idle_bm_check(void)
{
	u32 bm_status = 0;

803
	acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821
	if (bm_status)
		acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
	/*
	 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
	 * the true state of bus mastering activity; forcing us to
	 * manually check the BMIDEA bit of each IDE channel.
	 */
	else if (errata.piix4.bmisx) {
		if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
		    || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
			bm_status = 1;
	}
	return bm_status;
}

/**
 * acpi_idle_do_entry - a helper function that does C2 and C3 type entry
 * @cx: cstate data
822 823
 *
 * Caller disables interrupt before call and enables interrupt after return.
824 825 826
 */
static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
{
827 828
	/* Don't trace irqs off for idle */
	stop_critical_timings();
829
	if (cx->entry_method == ACPI_CSTATE_FFH) {
830 831
		/* Call into architectural FFH based C-state */
		acpi_processor_ffh_cstate_enter(cx);
832 833
	} else if (cx->entry_method == ACPI_CSTATE_HALT) {
		acpi_safe_halt();
834 835 836 837 838 839 840 841 842
	} else {
		int unused;
		/* IO port based C-state */
		inb(cx->address);
		/* Dummy wait op - must do something useless after P_LVL2 read
		   because chipsets cannot guarantee that STPCLK# signal
		   gets asserted in time to freeze execution properly. */
		unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
	}
843
	start_critical_timings();
844 845 846 847 848 849 850 851 852 853 854 855
}

/**
 * acpi_idle_enter_c1 - enters an ACPI C1 state-type
 * @dev: the target CPU
 * @state: the state data
 *
 * This is equivalent to the HALT instruction.
 */
static int acpi_idle_enter_c1(struct cpuidle_device *dev,
			      struct cpuidle_state *state)
{
856
	u32 t1, t2;
857 858
	struct acpi_processor *pr;
	struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
859

860
	pr = __get_cpu_var(processors);
861 862 863 864

	if (unlikely(!pr))
		return 0;

865
	local_irq_disable();
866 867 868 869 870 871 872 873

	/* Do not access any ACPI IO ports in suspend path */
	if (acpi_idle_suspend) {
		acpi_safe_halt();
		local_irq_enable();
		return 0;
	}

874
	t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
875
	acpi_idle_do_entry(cx);
876
	t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
877

878
	local_irq_enable();
879 880
	cx->usage++;

881
	return ticks_elapsed_in_us(t1, t2);
882 883 884 885 886 887 888 889 890 891 892 893 894
}

/**
 * acpi_idle_enter_simple - enters an ACPI state without BM handling
 * @dev: the target CPU
 * @state: the state data
 */
static int acpi_idle_enter_simple(struct cpuidle_device *dev,
				  struct cpuidle_state *state)
{
	struct acpi_processor *pr;
	struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
	u32 t1, t2;
895 896
	int sleep_ticks = 0;

897
	pr = __get_cpu_var(processors);
898 899 900 901

	if (unlikely(!pr))
		return 0;

902 903 904
	if (acpi_idle_suspend)
		return(acpi_idle_enter_c1(dev, state));

905 906 907 908 909 910 911 912 913 914 915 916 917 918
	local_irq_disable();
	current_thread_info()->status &= ~TS_POLLING;
	/*
	 * TS_POLLING-cleared state must be visible before we test
	 * NEED_RESCHED:
	 */
	smp_mb();

	if (unlikely(need_resched())) {
		current_thread_info()->status |= TS_POLLING;
		local_irq_enable();
		return 0;
	}

919 920 921 922 923 924
	/*
	 * Must be done before busmaster disable as we might need to
	 * access HPET !
	 */
	acpi_state_timer_broadcast(pr, cx, 1);

925 926 927 928
	if (cx->type == ACPI_STATE_C3)
		ACPI_FLUSH_CPU_CACHE();

	t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
929 930
	/* Tell the scheduler that we are going deep-idle: */
	sched_clock_idle_sleep_event();
931 932 933
	acpi_idle_do_entry(cx);
	t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);

P
Pavel Machek 已提交
934
#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
935
	/* TSC could halt in idle, so notify users */
936 937
	if (tsc_halts_in_c(cx->type))
		mark_tsc_unstable("TSC halts in idle");;
938
#endif
939 940 941 942
	sleep_ticks = ticks_elapsed(t1, t2);

	/* Tell the scheduler how much we idled: */
	sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
943 944 945 946 947 948 949

	local_irq_enable();
	current_thread_info()->status |= TS_POLLING;

	cx->usage++;

	acpi_state_timer_broadcast(pr, cx, 0);
950
	cx->time += sleep_ticks;
951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969
	return ticks_elapsed_in_us(t1, t2);
}

static int c3_cpu_count;
static DEFINE_SPINLOCK(c3_lock);

/**
 * acpi_idle_enter_bm - enters C3 with proper BM handling
 * @dev: the target CPU
 * @state: the state data
 *
 * If BM is detected, the deepest non-C3 idle state is entered instead.
 */
static int acpi_idle_enter_bm(struct cpuidle_device *dev,
			      struct cpuidle_state *state)
{
	struct acpi_processor *pr;
	struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
	u32 t1, t2;
970 971
	int sleep_ticks = 0;

972
	pr = __get_cpu_var(processors);
973 974 975 976

	if (unlikely(!pr))
		return 0;

977 978 979
	if (acpi_idle_suspend)
		return(acpi_idle_enter_c1(dev, state));

980 981
	if (acpi_idle_bm_check()) {
		if (dev->safe_state) {
982
			dev->last_state = dev->safe_state;
983 984
			return dev->safe_state->enter(dev, dev->safe_state);
		} else {
985
			local_irq_disable();
986
			acpi_safe_halt();
987
			local_irq_enable();
988 989 990 991
			return 0;
		}
	}

992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005
	local_irq_disable();
	current_thread_info()->status &= ~TS_POLLING;
	/*
	 * TS_POLLING-cleared state must be visible before we test
	 * NEED_RESCHED:
	 */
	smp_mb();

	if (unlikely(need_resched())) {
		current_thread_info()->status |= TS_POLLING;
		local_irq_enable();
		return 0;
	}

1006 1007
	acpi_unlazy_tlb(smp_processor_id());

1008 1009
	/* Tell the scheduler that we are going deep-idle: */
	sched_clock_idle_sleep_event();
1010 1011 1012 1013 1014 1015
	/*
	 * Must be done before busmaster disable as we might need to
	 * access HPET !
	 */
	acpi_state_timer_broadcast(pr, cx, 1);

1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026
	/*
	 * disable bus master
	 * bm_check implies we need ARB_DIS
	 * !bm_check implies we need cache flush
	 * bm_control implies whether we can do ARB_DIS
	 *
	 * That leaves a case where bm_check is set and bm_control is
	 * not set. In that case we cannot do much, we enter C3
	 * without doing anything.
	 */
	if (pr->flags.bm_check && pr->flags.bm_control) {
1027 1028 1029 1030 1031 1032
		spin_lock(&c3_lock);
		c3_cpu_count++;
		/* Disable bus master arbitration when all CPUs are in C3 */
		if (c3_cpu_count == num_online_cpus())
			acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
		spin_unlock(&c3_lock);
1033 1034 1035
	} else if (!pr->flags.bm_check) {
		ACPI_FLUSH_CPU_CACHE();
	}
1036

1037 1038 1039
	t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
	acpi_idle_do_entry(cx);
	t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1040

1041 1042
	/* Re-enable bus master arbitration */
	if (pr->flags.bm_check && pr->flags.bm_control) {
1043
		spin_lock(&c3_lock);
1044
		acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
1045 1046 1047 1048
		c3_cpu_count--;
		spin_unlock(&c3_lock);
	}

P
Pavel Machek 已提交
1049
#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
1050
	/* TSC could halt in idle, so notify users */
1051 1052
	if (tsc_halts_in_c(ACPI_STATE_C3))
		mark_tsc_unstable("TSC halts in idle");
1053
#endif
1054 1055 1056
	sleep_ticks = ticks_elapsed(t1, t2);
	/* Tell the scheduler how much we idled: */
	sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
1057 1058 1059 1060 1061 1062 1063

	local_irq_enable();
	current_thread_info()->status |= TS_POLLING;

	cx->usage++;

	acpi_state_timer_broadcast(pr, cx, 0);
1064
	cx->time += sleep_ticks;
1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078
	return ticks_elapsed_in_us(t1, t2);
}

struct cpuidle_driver acpi_idle_driver = {
	.name =		"acpi_idle",
	.owner =	THIS_MODULE,
};

/**
 * acpi_processor_setup_cpuidle - prepares and configures CPUIDLE
 * @pr: the ACPI processor
 */
static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
{
1079
	int i, count = CPUIDLE_DRIVER_STATE_START;
1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090
	struct acpi_processor_cx *cx;
	struct cpuidle_state *state;
	struct cpuidle_device *dev = &pr->power.dev;

	if (!pr->flags.power_setup_done)
		return -EINVAL;

	if (pr->flags.power == 0) {
		return -EINVAL;
	}

1091
	dev->cpu = pr->id;
1092 1093 1094 1095 1096
	for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
		dev->states[i].name[0] = '\0';
		dev->states[i].desc[0] = '\0';
	}

1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108
	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
		cx = &pr->power.states[i];
		state = &dev->states[count];

		if (!cx->valid)
			continue;

#ifdef CONFIG_HOTPLUG_CPU
		if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
		    !pr->flags.has_cst &&
		    !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
			continue;
1109
#endif
1110 1111 1112
		cpuidle_set_statedata(state, cx);

		snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
1113
		strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
1114
		state->exit_latency = cx->latency;
1115
		state->target_residency = cx->latency * latency_factor;
1116 1117 1118 1119 1120 1121
		state->power_usage = cx->power;

		state->flags = 0;
		switch (cx->type) {
			case ACPI_STATE_C1:
			state->flags |= CPUIDLE_FLAG_SHALLOW;
1122 1123 1124
			if (cx->entry_method == ACPI_CSTATE_FFH)
				state->flags |= CPUIDLE_FLAG_TIME_VALID;

1125
			state->enter = acpi_idle_enter_c1;
1126
			dev->safe_state = state;
1127 1128 1129 1130 1131 1132
			break;

			case ACPI_STATE_C2:
			state->flags |= CPUIDLE_FLAG_BALANCED;
			state->flags |= CPUIDLE_FLAG_TIME_VALID;
			state->enter = acpi_idle_enter_simple;
1133
			dev->safe_state = state;
1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146
			break;

			case ACPI_STATE_C3:
			state->flags |= CPUIDLE_FLAG_DEEP;
			state->flags |= CPUIDLE_FLAG_TIME_VALID;
			state->flags |= CPUIDLE_FLAG_CHECK_BM;
			state->enter = pr->flags.bm_check ?
					acpi_idle_enter_bm :
					acpi_idle_enter_simple;
			break;
		}

		count++;
1147 1148
		if (count == CPUIDLE_STATE_MAX)
			break;
1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160
	}

	dev->state_count = count;

	if (!count)
		return -EINVAL;

	return 0;
}

int acpi_processor_cst_has_changed(struct acpi_processor *pr)
{
1161
	int ret = 0;
1162

1163 1164 1165
	if (boot_option_idle_override)
		return 0;

1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178
	if (!pr)
		return -EINVAL;

	if (nocst) {
		return -ENODEV;
	}

	if (!pr->flags.power_setup_done)
		return -ENODEV;

	cpuidle_pause_and_lock();
	cpuidle_disable_device(&pr->power.dev);
	acpi_processor_get_power_info(pr);
1179 1180 1181 1182
	if (pr->flags.power) {
		acpi_processor_setup_cpuidle(pr);
		ret = cpuidle_enable_device(&pr->power.dev);
	}
1183 1184 1185 1186 1187
	cpuidle_resume_and_unlock();

	return ret;
}

1188
int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
L
Len Brown 已提交
1189
			      struct acpi_device *device)
L
Linus Torvalds 已提交
1190
{
L
Len Brown 已提交
1191
	acpi_status status = 0;
1192
	static int first_run;
L
Len Brown 已提交
1193
	struct proc_dir_entry *entry = NULL;
L
Linus Torvalds 已提交
1194 1195
	unsigned int i;

1196 1197
	if (boot_option_idle_override)
		return 0;
L
Linus Torvalds 已提交
1198 1199

	if (!first_run) {
Z
Zhao Yakui 已提交
1200 1201 1202 1203 1204 1205 1206 1207 1208
		if (idle_halt) {
			/*
			 * When the boot option of "idle=halt" is added, halt
			 * is used for CPU IDLE.
			 * In such case C2/C3 is meaningless. So the max_cstate
			 * is set to one.
			 */
			max_cstate = 1;
		}
L
Linus Torvalds 已提交
1209
		dmi_check_system(processor_power_dmi_table);
1210
		max_cstate = acpi_processor_cstate_check(max_cstate);
L
Linus Torvalds 已提交
1211
		if (max_cstate < ACPI_C_STATES_MAX)
L
Len Brown 已提交
1212 1213 1214
			printk(KERN_NOTICE
			       "ACPI: processor limited to max C-state %d\n",
			       max_cstate);
L
Linus Torvalds 已提交
1215 1216 1217
		first_run++;
	}

1218
	if (!pr)
1219
		return -EINVAL;
1220

1221
	if (acpi_gbl_FADT.cst_control && !nocst) {
L
Len Brown 已提交
1222
		status =
1223
		    acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);
L
Linus Torvalds 已提交
1224
		if (ACPI_FAILURE(status)) {
1225 1226
			ACPI_EXCEPTION((AE_INFO, status,
					"Notifying BIOS of _CST ability failed"));
L
Linus Torvalds 已提交
1227 1228 1229 1230
		}
	}

	acpi_processor_get_power_info(pr);
1231
	pr->flags.power_setup_done = 1;
L
Linus Torvalds 已提交
1232 1233 1234 1235 1236 1237

	/*
	 * Install the idle handler if processor power management is supported.
	 * Note that we use previously set idle handler will be used on
	 * platforms that only support C1.
	 */
1238
	if (pr->flags.power) {
1239 1240 1241 1242
		acpi_processor_setup_cpuidle(pr);
		if (cpuidle_register_device(&pr->power.dev))
			return -EIO;

L
Linus Torvalds 已提交
1243 1244 1245
		printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id);
		for (i = 1; i <= pr->power.count; i++)
			if (pr->power.states[i].valid)
L
Len Brown 已提交
1246 1247
				printk(" C%d[C%d]", i,
				       pr->power.states[i].type);
L
Linus Torvalds 已提交
1248 1249 1250 1251
		printk(")\n");
	}

	/* 'power' [R] */
1252 1253 1254 1255
	entry = proc_create_data(ACPI_PROCESSOR_FILE_POWER,
				 S_IRUGO, acpi_device_dir(device),
				 &acpi_processor_power_fops,
				 acpi_driver_data(device));
L
Linus Torvalds 已提交
1256
	if (!entry)
1257
		return -EIO;
1258
	return 0;
L
Linus Torvalds 已提交
1259 1260
}

L
Len Brown 已提交
1261 1262
int acpi_processor_power_exit(struct acpi_processor *pr,
			      struct acpi_device *device)
L
Linus Torvalds 已提交
1263
{
1264 1265 1266
	if (boot_option_idle_override)
		return 0;

1267
	cpuidle_unregister_device(&pr->power.dev);
L
Linus Torvalds 已提交
1268 1269 1270
	pr->flags.power_setup_done = 0;

	if (acpi_device_dir(device))
L
Len Brown 已提交
1271 1272
		remove_proc_entry(ACPI_PROCESSOR_FILE_POWER,
				  acpi_device_dir(device));
L
Linus Torvalds 已提交
1273

1274
	return 0;
L
Linus Torvalds 已提交
1275
}