processor_idle.c 46.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5
/*
 * processor_idle - idle state submodule to the ACPI processor driver
 *
 *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
 *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6
 *  Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
L
Linus Torvalds 已提交
7 8
 *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
 *  			- Added processor hotplug support
9 10
 *  Copyright (C) 2005  Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
 *  			- Added support for C3 on SMP
L
Linus Torvalds 已提交
11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39
 *
 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 *
 *  This program is free software; you can redistribute it and/or modify
 *  it under the terms of the GNU General Public License as published by
 *  the Free Software Foundation; either version 2 of the License, or (at
 *  your option) any later version.
 *
 *  This program is distributed in the hope that it will be useful, but
 *  WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 *  General Public License for more details.
 *
 *  You should have received a copy of the GNU General Public License along
 *  with this program; if not, write to the Free Software Foundation, Inc.,
 *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
 *
 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 */

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
#include <linux/moduleparam.h>
T
Tim Schmielau 已提交
40
#include <linux/sched.h>	/* need_resched() */
41
#include <linux/latency.h>
42
#include <linux/clockchips.h>
43
#include <linux/cpuidle.h>
L
Linus Torvalds 已提交
44

45 46 47 48 49 50 51 52 53 54
/*
 * Include the apic definitions for x86 to have the APIC timer related defines
 * available also for UP (on SMP it gets magically included via linux/smp.h).
 * asm/acpi.h is not an option, as it would require more include magic. Also
 * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
 */
#ifdef CONFIG_X86
#include <asm/apic.h>
#endif

L
Linus Torvalds 已提交
55 56 57 58 59 60 61 62 63
#include <asm/io.h>
#include <asm/uaccess.h>

#include <acpi/acpi_bus.h>
#include <acpi/processor.h>

#define ACPI_PROCESSOR_COMPONENT        0x01000000
#define ACPI_PROCESSOR_CLASS            "processor"
#define _COMPONENT              ACPI_PROCESSOR_COMPONENT
64
ACPI_MODULE_NAME("processor_idle");
L
Linus Torvalds 已提交
65 66
#define ACPI_PROCESSOR_FILE_POWER	"power"
#define US_TO_PM_TIMER_TICKS(t)		((t * (PM_TIMER_FREQUENCY/1000)) / 1000)
67
#define PM_TIMER_TICK_NS		(1000000000ULL/PM_TIMER_FREQUENCY)
68
#ifndef CONFIG_CPU_IDLE
L
Linus Torvalds 已提交
69 70
#define C2_OVERHEAD			4	/* 1us (3.579 ticks per us) */
#define C3_OVERHEAD			4	/* 1us (3.579 ticks per us) */
71
static void (*pm_idle_save) (void) __read_mostly;
72 73 74 75 76
#else
#define C2_OVERHEAD			1	/* 1us */
#define C3_OVERHEAD			1	/* 1us */
#endif
#define PM_TIMER_TICKS_TO_US(p)		(((p) * 1000)/(PM_TIMER_FREQUENCY/1000))
L
Linus Torvalds 已提交
77

78
static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
79
#ifdef CONFIG_CPU_IDLE
80
module_param(max_cstate, uint, 0000);
81 82 83
#else
module_param(max_cstate, uint, 0644);
#endif
84
static unsigned int nocst __read_mostly;
L
Linus Torvalds 已提交
85 86
module_param(nocst, uint, 0000);

87
#ifndef CONFIG_CPU_IDLE
L
Linus Torvalds 已提交
88 89 90 91 92 93 94
/*
 * bm_history -- bit-mask with a bit per jiffy of bus-master activity
 * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms
 * 800 HZ: 0xFFFFFFFF: 32 jiffies = 40ms
 * 100 HZ: 0x0000000F: 4 jiffies = 40ms
 * reduce history for more aggressive entry into C3
 */
95
static unsigned int bm_history __read_mostly =
L
Len Brown 已提交
96
    (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1));
L
Linus Torvalds 已提交
97
module_param(bm_history, uint, 0644);
98 99 100 101

static int acpi_processor_set_power_policy(struct acpi_processor *pr);

#endif
L
Linus Torvalds 已提交
102 103 104 105 106 107 108

/*
 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
 * For now disable this. Probably a bug somewhere else.
 *
 * To skip this limit, boot/load with a large max_cstate limit.
 */
109
static int set_max_cstate(const struct dmi_system_id *id)
L
Linus Torvalds 已提交
110 111 112 113
{
	if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
		return 0;

114
	printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate."
L
Len Brown 已提交
115 116
	       " Override with \"processor.max_cstate=%d\"\n", id->ident,
	       (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
L
Linus Torvalds 已提交
117

118
	max_cstate = (long)id->driver_data;
L
Linus Torvalds 已提交
119 120 121 122

	return 0;
}

123 124 125
/* Actually this shouldn't be __cpuinitdata, would be better to fix the
   callers to only run once -AK */
static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = {
126 127 128
	{ set_max_cstate, "IBM ThinkPad R40e", {
	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
	  DMI_MATCH(DMI_BIOS_VERSION,"1SET70WW")}, (void *)1},
129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179
	{ set_max_cstate, "IBM ThinkPad R40e", {
	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
	  DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW")}, (void *)1},
	{ set_max_cstate, "IBM ThinkPad R40e", {
	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
	  DMI_MATCH(DMI_BIOS_VERSION,"1SET43WW") }, (void*)1},
	{ set_max_cstate, "IBM ThinkPad R40e", {
	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
	  DMI_MATCH(DMI_BIOS_VERSION,"1SET45WW") }, (void*)1},
	{ set_max_cstate, "IBM ThinkPad R40e", {
	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
	  DMI_MATCH(DMI_BIOS_VERSION,"1SET47WW") }, (void*)1},
	{ set_max_cstate, "IBM ThinkPad R40e", {
	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
	  DMI_MATCH(DMI_BIOS_VERSION,"1SET50WW") }, (void*)1},
	{ set_max_cstate, "IBM ThinkPad R40e", {
	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
	  DMI_MATCH(DMI_BIOS_VERSION,"1SET52WW") }, (void*)1},
	{ set_max_cstate, "IBM ThinkPad R40e", {
	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
	  DMI_MATCH(DMI_BIOS_VERSION,"1SET55WW") }, (void*)1},
	{ set_max_cstate, "IBM ThinkPad R40e", {
	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
	  DMI_MATCH(DMI_BIOS_VERSION,"1SET56WW") }, (void*)1},
	{ set_max_cstate, "IBM ThinkPad R40e", {
	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
	  DMI_MATCH(DMI_BIOS_VERSION,"1SET59WW") }, (void*)1},
	{ set_max_cstate, "IBM ThinkPad R40e", {
	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
	  DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW") }, (void*)1},
	{ set_max_cstate, "IBM ThinkPad R40e", {
	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
	  DMI_MATCH(DMI_BIOS_VERSION,"1SET61WW") }, (void*)1},
	{ set_max_cstate, "IBM ThinkPad R40e", {
	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
	  DMI_MATCH(DMI_BIOS_VERSION,"1SET62WW") }, (void*)1},
	{ set_max_cstate, "IBM ThinkPad R40e", {
	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
	  DMI_MATCH(DMI_BIOS_VERSION,"1SET64WW") }, (void*)1},
	{ set_max_cstate, "IBM ThinkPad R40e", {
	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
	  DMI_MATCH(DMI_BIOS_VERSION,"1SET65WW") }, (void*)1},
	{ set_max_cstate, "IBM ThinkPad R40e", {
	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
	  DMI_MATCH(DMI_BIOS_VERSION,"1SET68WW") }, (void*)1},
	{ set_max_cstate, "Medion 41700", {
	  DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
	  DMI_MATCH(DMI_BIOS_VERSION,"R01-A1J")}, (void *)1},
	{ set_max_cstate, "Clevo 5600D", {
	  DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
	  DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
L
Len Brown 已提交
180
	 (void *)2},
L
Linus Torvalds 已提交
181 182 183
	{},
};

L
Len Brown 已提交
184
static inline u32 ticks_elapsed(u32 t1, u32 t2)
L
Linus Torvalds 已提交
185 186 187
{
	if (t2 >= t1)
		return (t2 - t1);
188
	else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
L
Linus Torvalds 已提交
189 190 191 192 193
		return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
	else
		return ((0xFFFFFFFF - t1) + t2);
}

194 195 196 197 198 199 200 201 202 203
static inline u32 ticks_elapsed_in_us(u32 t1, u32 t2)
{
	if (t2 >= t1)
		return PM_TIMER_TICKS_TO_US(t2 - t1);
	else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
		return PM_TIMER_TICKS_TO_US(((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
	else
		return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2);
}

204 205 206 207 208 209 210 211 212 213 214 215 216
static void acpi_safe_halt(void)
{
	current_thread_info()->status &= ~TS_POLLING;
	/*
	 * TS_POLLING-cleared state must be visible before we
	 * test NEED_RESCHED:
	 */
	smp_mb();
	if (!need_resched())
		safe_halt();
	current_thread_info()->status |= TS_POLLING;
}

217 218
#ifndef CONFIG_CPU_IDLE

L
Linus Torvalds 已提交
219
static void
L
Len Brown 已提交
220 221
acpi_processor_power_activate(struct acpi_processor *pr,
			      struct acpi_processor_cx *new)
L
Linus Torvalds 已提交
222
{
L
Len Brown 已提交
223
	struct acpi_processor_cx *old;
L
Linus Torvalds 已提交
224 225 226 227 228 229 230 231

	if (!pr || !new)
		return;

	old = pr->power.state;

	if (old)
		old->promotion.count = 0;
L
Len Brown 已提交
232
	new->demotion.count = 0;
L
Linus Torvalds 已提交
233 234 235 236 237 238

	/* Cleanup from old state. */
	if (old) {
		switch (old->type) {
		case ACPI_STATE_C3:
			/* Disable bus master reload */
239
			if (new->type != ACPI_STATE_C3 && pr->flags.bm_check)
240
				acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
L
Linus Torvalds 已提交
241 242 243 244 245 246 247 248
			break;
		}
	}

	/* Prepare to use new state. */
	switch (new->type) {
	case ACPI_STATE_C3:
		/* Enable bus master reload */
249
		if (old->type != ACPI_STATE_C3 && pr->flags.bm_check)
250
			acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
L
Linus Torvalds 已提交
251 252 253 254 255 256 257 258
		break;
	}

	pr->power.state = new;

	return;
}

L
Len Brown 已提交
259
static atomic_t c3_cpu_count;
L
Linus Torvalds 已提交
260

261 262 263 264 265 266 267 268 269 270 271 272 273
/* Common C-state entry for C2, C3, .. */
static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
{
	if (cstate->space_id == ACPI_CSTATE_FFH) {
		/* Call into architectural FFH based C-state */
		acpi_processor_ffh_cstate_enter(cstate);
	} else {
		int unused;
		/* IO port based C-state */
		inb(cstate->address);
		/* Dummy wait op - must do something useless after P_LVL2 read
		   because chipsets cannot guarantee that STPCLK# signal
		   gets asserted in time to freeze execution properly. */
274
		unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
275 276
	}
}
277
#endif /* !CONFIG_CPU_IDLE */
278

279 280 281 282
#ifdef ARCH_APICTIMER_STOPS_ON_C3

/*
 * Some BIOS implementations switch to C3 in the published C2 state.
283 284 285
 * This seems to be a common problem on AMD boxen, but other vendors
 * are affected too. We pick the most conservative approach: we assume
 * that the local APIC stops in both C2 and C3.
286 287 288 289 290
 */
static void acpi_timer_check_state(int state, struct acpi_processor *pr,
				   struct acpi_processor_cx *cx)
{
	struct acpi_processor_power *pwr = &pr->power;
291
	u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
292 293 294 295 296 297 298 299

	/*
	 * Check, if one of the previous states already marked the lapic
	 * unstable
	 */
	if (pwr->timer_broadcast_on_state < state)
		return;

300
	if (cx->type >= type)
301
		pr->power.timer_broadcast_on_state = state;
302 303 304 305
}

static void acpi_propagate_timer_broadcast(struct acpi_processor *pr)
{
306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327
	unsigned long reason;

	reason = pr->power.timer_broadcast_on_state < INT_MAX ?
		CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;

	clockevents_notify(reason, &pr->id);
}

/* Power(C) State timer broadcast control */
static void acpi_state_timer_broadcast(struct acpi_processor *pr,
				       struct acpi_processor_cx *cx,
				       int broadcast)
{
	int state = cx - pr->power.states;

	if (state >= pr->power.timer_broadcast_on_state) {
		unsigned long reason;

		reason = broadcast ?  CLOCK_EVT_NOTIFY_BROADCAST_ENTER :
			CLOCK_EVT_NOTIFY_BROADCAST_EXIT;
		clockevents_notify(reason, &pr->id);
	}
328 329 330 331 332 333 334
}

#else

static void acpi_timer_check_state(int state, struct acpi_processor *pr,
				   struct acpi_processor_cx *cstate) { }
static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) { }
335 336 337 338 339
static void acpi_state_timer_broadcast(struct acpi_processor *pr,
				       struct acpi_processor_cx *cx,
				       int broadcast)
{
}
340 341 342

#endif

343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359
/*
 * Suspend / resume control
 */
static int acpi_idle_suspend;

int acpi_processor_suspend(struct acpi_device * device, pm_message_t state)
{
	acpi_idle_suspend = 1;
	return 0;
}

int acpi_processor_resume(struct acpi_device * device)
{
	acpi_idle_suspend = 0;
	return 0;
}

360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379
#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
static int tsc_halts_in_c(int state)
{
	switch (boot_cpu_data.x86_vendor) {
	case X86_VENDOR_AMD:
		/*
		 * AMD Fam10h TSC will tick in all
		 * C/P/S0/S1 states when this bit is set.
		 */
		if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
			return 0;
		/*FALL THROUGH*/
	case X86_VENDOR_INTEL:
		/* Several cases known where TSC halts in C2 too */
	default:
		return state > ACPI_STATE_C1;
	}
}
#endif

380
#ifndef CONFIG_CPU_IDLE
L
Len Brown 已提交
381
static void acpi_processor_idle(void)
L
Linus Torvalds 已提交
382
{
L
Len Brown 已提交
383
	struct acpi_processor *pr = NULL;
L
Linus Torvalds 已提交
384 385
	struct acpi_processor_cx *cx = NULL;
	struct acpi_processor_cx *next_state = NULL;
L
Len Brown 已提交
386 387
	int sleep_ticks = 0;
	u32 t1, t2 = 0;
L
Linus Torvalds 已提交
388 389 390 391 392 393 394

	/*
	 * Interrupts must be disabled during bus mastering calculations and
	 * for C2/C3 transitions.
	 */
	local_irq_disable();

395 396 397 398 399 400
	pr = processors[smp_processor_id()];
	if (!pr) {
		local_irq_enable();
		return;
	}

L
Linus Torvalds 已提交
401 402 403 404 405 406 407 408 409 410
	/*
	 * Check whether we truly need to go idle, or should
	 * reschedule:
	 */
	if (unlikely(need_resched())) {
		local_irq_enable();
		return;
	}

	cx = pr->power.state;
411
	if (!cx || acpi_idle_suspend) {
412 413 414 415 416 417
		if (pm_idle_save)
			pm_idle_save();
		else
			acpi_safe_halt();
		return;
	}
L
Linus Torvalds 已提交
418 419 420 421 422 423 424 425

	/*
	 * Check BM Activity
	 * -----------------
	 * Check for bus mastering activity (if required), record, and check
	 * for demotion.
	 */
	if (pr->flags.bm_check) {
L
Len Brown 已提交
426 427
		u32 bm_status = 0;
		unsigned long diff = jiffies - pr->power.bm_check_timestamp;
L
Linus Torvalds 已提交
428

429 430
		if (diff > 31)
			diff = 31;
L
Linus Torvalds 已提交
431

432
		pr->power.bm_activity <<= diff;
L
Linus Torvalds 已提交
433

434
		acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
L
Linus Torvalds 已提交
435
		if (bm_status) {
436
			pr->power.bm_activity |= 0x1;
437
			acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
L
Linus Torvalds 已提交
438 439 440 441 442 443 444 445
		}
		/*
		 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
		 * the true state of bus mastering activity; forcing us to
		 * manually check the BMIDEA bit of each IDE channel.
		 */
		else if (errata.piix4.bmisx) {
			if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
L
Len Brown 已提交
446
			    || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
447
				pr->power.bm_activity |= 0x1;
L
Linus Torvalds 已提交
448 449 450 451 452
		}

		pr->power.bm_check_timestamp = jiffies;

		/*
453
		 * If bus mastering is or was active this jiffy, demote
L
Linus Torvalds 已提交
454 455
		 * to avoid a faulty transition.  Note that the processor
		 * won't enter a low-power state during this call (to this
456
		 * function) but should upon the next.
L
Linus Torvalds 已提交
457 458 459 460 461 462 463
		 *
		 * TBD: A better policy might be to fallback to the demotion
		 *      state (use it for this quantum only) istead of
		 *      demoting -- and rely on duration as our sole demotion
		 *      qualification.  This may, however, introduce DMA
		 *      issues (e.g. floppy DMA transfer overrun/underrun).
		 */
464 465
		if ((pr->power.bm_activity & 0x1) &&
		    cx->demotion.threshold.bm) {
L
Linus Torvalds 已提交
466 467 468 469 470 471
			local_irq_enable();
			next_state = cx->demotion.state;
			goto end;
		}
	}

472 473 474 475 476 477
#ifdef CONFIG_HOTPLUG_CPU
	/*
	 * Check for P_LVL2_UP flag before entering C2 and above on
	 * an SMP system. We do it here instead of doing it at _CST/P_LVL
	 * detection phase, to work cleanly with logical CPU hotplug.
	 */
478
	if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
479
	    !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
480
		cx = &pr->power.states[ACPI_STATE_C1];
481
#endif
482

L
Linus Torvalds 已提交
483 484 485 486 487
	/*
	 * Sleep:
	 * ------
	 * Invoke the current Cx state to put the processor to sleep.
	 */
488
	if (cx->type == ACPI_STATE_C2 || cx->type == ACPI_STATE_C3) {
489
		current_thread_info()->status &= ~TS_POLLING;
490 491 492 493 494
		/*
		 * TS_POLLING-cleared state must be visible before we
		 * test NEED_RESCHED:
		 */
		smp_mb();
495
		if (need_resched()) {
496
			current_thread_info()->status |= TS_POLLING;
497
			local_irq_enable();
498 499 500 501
			return;
		}
	}

L
Linus Torvalds 已提交
502 503 504 505 506 507 508 509 510 511 512
	switch (cx->type) {

	case ACPI_STATE_C1:
		/*
		 * Invoke C1.
		 * Use the appropriate idle routine, the one that would
		 * be used without acpi C-states.
		 */
		if (pm_idle_save)
			pm_idle_save();
		else
513 514
			acpi_safe_halt();

L
Linus Torvalds 已提交
515
		/*
L
Len Brown 已提交
516
		 * TBD: Can't get time duration while in C1, as resumes
L
Linus Torvalds 已提交
517 518
		 *      go to an ISR rather than here.  Need to instrument
		 *      base interrupt handler.
519 520 521
		 *
		 * Note: the TSC better not stop in C1, sched_clock() will
		 *       skew otherwise.
L
Linus Torvalds 已提交
522 523 524 525 526 527
		 */
		sleep_ticks = 0xFFFFFFFF;
		break;

	case ACPI_STATE_C2:
		/* Get start time (ticks) */
528
		t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
529 530
		/* Tell the scheduler that we are going deep-idle: */
		sched_clock_idle_sleep_event();
L
Linus Torvalds 已提交
531
		/* Invoke C2 */
532
		acpi_state_timer_broadcast(pr, cx, 1);
533
		acpi_cstate_enter(cx);
L
Linus Torvalds 已提交
534
		/* Get end time (ticks) */
535
		t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
536

537
#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
538
		/* TSC halts in C2, so notify users */
539 540
		if (tsc_halts_in_c(ACPI_STATE_C2))
			mark_tsc_unstable("possible TSC halt in C2");
541
#endif
542 543 544 545 546 547
		/* Compute time (ticks) that we were actually asleep */
		sleep_ticks = ticks_elapsed(t1, t2);

		/* Tell the scheduler how much we idled: */
		sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);

L
Linus Torvalds 已提交
548 549
		/* Re-enable interrupts */
		local_irq_enable();
550 551 552
		/* Do not account our idle-switching overhead: */
		sleep_ticks -= cx->latency_ticks + C2_OVERHEAD;

553
		current_thread_info()->status |= TS_POLLING;
554
		acpi_state_timer_broadcast(pr, cx, 0);
L
Linus Torvalds 已提交
555 556 557
		break;

	case ACPI_STATE_C3:
558
		acpi_unlazy_tlb(smp_processor_id());
559 560 561 562 563
		/*
		 * Must be done before busmaster disable as we might
		 * need to access HPET !
		 */
		acpi_state_timer_broadcast(pr, cx, 1);
564 565 566 567 568 569 570 571 572 573 574
		/*
		 * disable bus master
		 * bm_check implies we need ARB_DIS
		 * !bm_check implies we need cache flush
		 * bm_control implies whether we can do ARB_DIS
		 *
		 * That leaves a case where bm_check is set and bm_control is
		 * not set. In that case we cannot do much, we enter C3
		 * without doing anything.
		 */
		if (pr->flags.bm_check && pr->flags.bm_control) {
575
			if (atomic_inc_return(&c3_cpu_count) ==
L
Len Brown 已提交
576
			    num_online_cpus()) {
577 578 579 580
				/*
				 * All CPUs are trying to go to C3
				 * Disable bus master arbitration
				 */
581
				acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
582
			}
583
		} else if (!pr->flags.bm_check) {
584 585 586
			/* SMP with no shared cache... Invalidate cache  */
			ACPI_FLUSH_CPU_CACHE();
		}
L
Len Brown 已提交
587

L
Linus Torvalds 已提交
588
		/* Get start time (ticks) */
589
		t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
L
Linus Torvalds 已提交
590
		/* Invoke C3 */
591 592
		/* Tell the scheduler that we are going deep-idle: */
		sched_clock_idle_sleep_event();
593
		acpi_cstate_enter(cx);
L
Linus Torvalds 已提交
594
		/* Get end time (ticks) */
595
		t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
596
		if (pr->flags.bm_check && pr->flags.bm_control) {
597 598
			/* Enable bus master arbitration */
			atomic_dec(&c3_cpu_count);
599
			acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
600 601
		}

602
#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
603
		/* TSC halts in C3, so notify users */
604 605
		if (tsc_halts_in_c(ACPI_STATE_C3))
			mark_tsc_unstable("TSC halts in C3");
606
#endif
607 608 609 610 611
		/* Compute time (ticks) that we were actually asleep */
		sleep_ticks = ticks_elapsed(t1, t2);
		/* Tell the scheduler how much we idled: */
		sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);

L
Linus Torvalds 已提交
612 613
		/* Re-enable interrupts */
		local_irq_enable();
614 615 616
		/* Do not account our idle-switching overhead: */
		sleep_ticks -= cx->latency_ticks + C3_OVERHEAD;

617
		current_thread_info()->status |= TS_POLLING;
618
		acpi_state_timer_broadcast(pr, cx, 0);
L
Linus Torvalds 已提交
619 620 621 622 623 624
		break;

	default:
		local_irq_enable();
		return;
	}
625 626 627
	cx->usage++;
	if ((cx->type != ACPI_STATE_C1) && (sleep_ticks > 0))
		cx->time += sleep_ticks;
L
Linus Torvalds 已提交
628 629 630

	next_state = pr->power.state;

631 632 633
#ifdef CONFIG_HOTPLUG_CPU
	/* Don't do promotion/demotion */
	if ((cx->type == ACPI_STATE_C1) && (num_online_cpus() > 1) &&
634
	    !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) {
635 636 637 638 639
		next_state = cx;
		goto end;
	}
#endif

L
Linus Torvalds 已提交
640 641 642 643 644 645 646 647 648 649
	/*
	 * Promotion?
	 * ----------
	 * Track the number of longs (time asleep is greater than threshold)
	 * and promote when the count threshold is reached.  Note that bus
	 * mastering activity may prevent promotions.
	 * Do not promote above max_cstate.
	 */
	if (cx->promotion.state &&
	    ((cx->promotion.state - pr->power.states) <= max_cstate)) {
650 651
		if (sleep_ticks > cx->promotion.threshold.ticks &&
		  cx->promotion.state->latency <= system_latency_constraint()) {
L
Linus Torvalds 已提交
652
			cx->promotion.count++;
L
Len Brown 已提交
653 654 655
			cx->demotion.count = 0;
			if (cx->promotion.count >=
			    cx->promotion.threshold.count) {
L
Linus Torvalds 已提交
656
				if (pr->flags.bm_check) {
L
Len Brown 已提交
657 658 659 660 661
					if (!
					    (pr->power.bm_activity & cx->
					     promotion.threshold.bm)) {
						next_state =
						    cx->promotion.state;
L
Linus Torvalds 已提交
662 663
						goto end;
					}
L
Len Brown 已提交
664
				} else {
L
Linus Torvalds 已提交
665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688
					next_state = cx->promotion.state;
					goto end;
				}
			}
		}
	}

	/*
	 * Demotion?
	 * ---------
	 * Track the number of shorts (time asleep is less than time threshold)
	 * and demote when the usage threshold is reached.
	 */
	if (cx->demotion.state) {
		if (sleep_ticks < cx->demotion.threshold.ticks) {
			cx->demotion.count++;
			cx->promotion.count = 0;
			if (cx->demotion.count >= cx->demotion.threshold.count) {
				next_state = cx->demotion.state;
				goto end;
			}
		}
	}

L
Len Brown 已提交
689
      end:
L
Linus Torvalds 已提交
690 691
	/*
	 * Demote if current state exceeds max_cstate
692
	 * or if the latency of the current state is unacceptable
L
Linus Torvalds 已提交
693
	 */
694 695
	if ((pr->power.state - pr->power.states) > max_cstate ||
		pr->power.state->latency > system_latency_constraint()) {
L
Linus Torvalds 已提交
696 697 698 699 700 701 702 703 704 705 706 707 708 709
		if (cx->demotion.state)
			next_state = cx->demotion.state;
	}

	/*
	 * New Cx State?
	 * -------------
	 * If we're going to start using a new Cx state we must clean up
	 * from the previous and prepare to use the new.
	 */
	if (next_state != pr->power.state)
		acpi_processor_power_activate(pr, next_state);
}

L
Len Brown 已提交
710
static int acpi_processor_set_power_policy(struct acpi_processor *pr)
L
Linus Torvalds 已提交
711 712 713 714 715 716 717 718 719
{
	unsigned int i;
	unsigned int state_is_set = 0;
	struct acpi_processor_cx *lower = NULL;
	struct acpi_processor_cx *higher = NULL;
	struct acpi_processor_cx *cx;


	if (!pr)
720
		return -EINVAL;
L
Linus Torvalds 已提交
721 722 723 724 725 726 727 728 729 730 731

	/*
	 * This function sets the default Cx state policy (OS idle handler).
	 * Our scheme is to promote quickly to C2 but more conservatively
	 * to C3.  We're favoring C2  for its characteristics of low latency
	 * (quick response), good power savings, and ability to allow bus
	 * mastering activity.  Note that the Cx state policy is completely
	 * customizable and can be altered dynamically.
	 */

	/* startup state */
L
Len Brown 已提交
732
	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
L
Linus Torvalds 已提交
733 734 735 736 737 738 739 740
		cx = &pr->power.states[i];
		if (!cx->valid)
			continue;

		if (!state_is_set)
			pr->power.state = cx;
		state_is_set++;
		break;
L
Len Brown 已提交
741
	}
L
Linus Torvalds 已提交
742 743

	if (!state_is_set)
744
		return -ENODEV;
L
Linus Torvalds 已提交
745 746

	/* demotion */
L
Len Brown 已提交
747
	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
L
Linus Torvalds 已提交
748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769
		cx = &pr->power.states[i];
		if (!cx->valid)
			continue;

		if (lower) {
			cx->demotion.state = lower;
			cx->demotion.threshold.ticks = cx->latency_ticks;
			cx->demotion.threshold.count = 1;
			if (cx->type == ACPI_STATE_C3)
				cx->demotion.threshold.bm = bm_history;
		}

		lower = cx;
	}

	/* promotion */
	for (i = (ACPI_PROCESSOR_MAX_POWER - 1); i > 0; i--) {
		cx = &pr->power.states[i];
		if (!cx->valid)
			continue;

		if (higher) {
L
Len Brown 已提交
770
			cx->promotion.state = higher;
L
Linus Torvalds 已提交
771 772 773 774 775 776 777 778 779 780 781 782
			cx->promotion.threshold.ticks = cx->latency_ticks;
			if (cx->type >= ACPI_STATE_C2)
				cx->promotion.threshold.count = 4;
			else
				cx->promotion.threshold.count = 10;
			if (higher->type == ACPI_STATE_C3)
				cx->promotion.threshold.bm = bm_history;
		}

		higher = cx;
	}

783
	return 0;
L
Linus Torvalds 已提交
784
}
785
#endif /* !CONFIG_CPU_IDLE */
L
Linus Torvalds 已提交
786

L
Len Brown 已提交
787
static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
L
Linus Torvalds 已提交
788 789 790
{

	if (!pr)
791
		return -EINVAL;
L
Linus Torvalds 已提交
792 793

	if (!pr->pblk)
794
		return -ENODEV;
L
Linus Torvalds 已提交
795 796 797 798 799

	/* if info is obtained from pblk/fadt, type equals state */
	pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
	pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;

800 801 802
#ifndef CONFIG_HOTPLUG_CPU
	/*
	 * Check for P_LVL2_UP flag before entering C2 and above on
803
	 * an SMP system.
804
	 */
805
	if ((num_online_cpus() > 1) &&
806
	    !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
807
		return -ENODEV;
808 809
#endif

L
Linus Torvalds 已提交
810 811 812 813 814
	/* determine C2 and C3 address from pblk */
	pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
	pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;

	/* determine latencies from FADT */
815 816
	pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency;
	pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency;
L
Linus Torvalds 已提交
817 818 819 820 821 822

	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
			  "lvl2[0x%08x] lvl3[0x%08x]\n",
			  pr->power.states[ACPI_STATE_C2].address,
			  pr->power.states[ACPI_STATE_C3].address));

823
	return 0;
L
Linus Torvalds 已提交
824 825
}

826
static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
827
{
828 829 830 831 832 833 834
	if (!pr->power.states[ACPI_STATE_C1].valid) {
		/* set the first C-State to C1 */
		/* all processors need to support C1 */
		pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
		pr->power.states[ACPI_STATE_C1].valid = 1;
	}
	/* the C0 state only exists as a filler in our array */
835
	pr->power.states[ACPI_STATE_C0].valid = 1;
836
	return 0;
837 838
}

L
Len Brown 已提交
839
static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
L
Linus Torvalds 已提交
840
{
L
Len Brown 已提交
841 842
	acpi_status status = 0;
	acpi_integer count;
843
	int current_count;
L
Len Brown 已提交
844 845 846
	int i;
	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
	union acpi_object *cst;
L
Linus Torvalds 已提交
847 848 849


	if (nocst)
850
		return -ENODEV;
L
Linus Torvalds 已提交
851

852
	current_count = 0;
L
Linus Torvalds 已提交
853 854 855 856

	status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
	if (ACPI_FAILURE(status)) {
		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
857
		return -ENODEV;
L
Len Brown 已提交
858
	}
L
Linus Torvalds 已提交
859

860
	cst = buffer.pointer;
L
Linus Torvalds 已提交
861 862 863

	/* There must be at least 2 elements */
	if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
864
		printk(KERN_ERR PREFIX "not enough elements in _CST\n");
L
Linus Torvalds 已提交
865 866 867 868 869 870 871 872
		status = -EFAULT;
		goto end;
	}

	count = cst->package.elements[0].integer.value;

	/* Validate number of power states. */
	if (count < 1 || count != cst->package.count - 1) {
873
		printk(KERN_ERR PREFIX "count given by _CST is not valid\n");
L
Linus Torvalds 已提交
874 875 876 877 878 879 880 881 882 883 884 885 886 887 888
		status = -EFAULT;
		goto end;
	}

	/* Tell driver that at least _CST is supported. */
	pr->flags.has_cst = 1;

	for (i = 1; i <= count; i++) {
		union acpi_object *element;
		union acpi_object *obj;
		struct acpi_power_register *reg;
		struct acpi_processor_cx cx;

		memset(&cx, 0, sizeof(cx));

889
		element = &(cst->package.elements[i]);
L
Linus Torvalds 已提交
890 891 892 893 894 895
		if (element->type != ACPI_TYPE_PACKAGE)
			continue;

		if (element->package.count != 4)
			continue;

896
		obj = &(element->package.elements[0]);
L
Linus Torvalds 已提交
897 898 899 900

		if (obj->type != ACPI_TYPE_BUFFER)
			continue;

L
Len Brown 已提交
901
		reg = (struct acpi_power_register *)obj->buffer.pointer;
L
Linus Torvalds 已提交
902 903

		if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
L
Len Brown 已提交
904
		    (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
L
Linus Torvalds 已提交
905 906 907
			continue;

		/* There should be an easy way to extract an integer... */
908
		obj = &(element->package.elements[1]);
L
Linus Torvalds 已提交
909 910 911 912
		if (obj->type != ACPI_TYPE_INTEGER)
			continue;

		cx.type = obj->integer.value;
913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939
		/*
		 * Some buggy BIOSes won't list C1 in _CST -
		 * Let acpi_processor_get_power_info_default() handle them later
		 */
		if (i == 1 && cx.type != ACPI_STATE_C1)
			current_count++;

		cx.address = reg->address;
		cx.index = current_count + 1;

		cx.space_id = ACPI_CSTATE_SYSTEMIO;
		if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
			if (acpi_processor_ffh_cstate_probe
					(pr->id, &cx, reg) == 0) {
				cx.space_id = ACPI_CSTATE_FFH;
			} else if (cx.type != ACPI_STATE_C1) {
				/*
				 * C1 is a special case where FIXED_HARDWARE
				 * can be handled in non-MWAIT way as well.
				 * In that case, save this _CST entry info.
				 * That is, we retain space_id of SYSTEM_IO for
				 * halt based C1.
				 * Otherwise, ignore this info and continue.
				 */
				continue;
			}
		}
L
Linus Torvalds 已提交
940

941
		obj = &(element->package.elements[2]);
L
Linus Torvalds 已提交
942 943 944 945 946
		if (obj->type != ACPI_TYPE_INTEGER)
			continue;

		cx.latency = obj->integer.value;

947
		obj = &(element->package.elements[3]);
L
Linus Torvalds 已提交
948 949 950 951 952
		if (obj->type != ACPI_TYPE_INTEGER)
			continue;

		cx.power = obj->integer.value;

953 954 955 956 957 958 959 960 961 962 963 964 965 966 967
		current_count++;
		memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));

		/*
		 * We support total ACPI_PROCESSOR_MAX_POWER - 1
		 * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1)
		 */
		if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) {
			printk(KERN_WARNING
			       "Limiting number of power states to max (%d)\n",
			       ACPI_PROCESSOR_MAX_POWER);
			printk(KERN_WARNING
			       "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
			break;
		}
L
Linus Torvalds 已提交
968 969
	}

L
Len Brown 已提交
970
	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n",
971
			  current_count));
L
Linus Torvalds 已提交
972 973

	/* Validate number of power states discovered */
974
	if (current_count < 2)
975
		status = -EFAULT;
L
Linus Torvalds 已提交
976

L
Len Brown 已提交
977
      end:
978
	kfree(buffer.pointer);
L
Linus Torvalds 已提交
979

980
	return status;
L
Linus Torvalds 已提交
981 982 983 984 985 986
}

static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx)
{

	if (!cx->address)
987
		return;
L
Linus Torvalds 已提交
988 989 990 991 992 993 994

	/*
	 * C2 latency must be less than or equal to 100
	 * microseconds.
	 */
	else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
L
Len Brown 已提交
995
				  "latency too large [%d]\n", cx->latency));
996
		return;
L
Linus Torvalds 已提交
997 998 999 1000 1001 1002 1003
	}

	/*
	 * Otherwise we've met all of our C2 requirements.
	 * Normalize the C2 latency to expidite policy
	 */
	cx->valid = 1;
1004 1005

#ifndef CONFIG_CPU_IDLE
L
Linus Torvalds 已提交
1006
	cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
1007 1008 1009
#else
	cx->latency_ticks = cx->latency;
#endif
L
Linus Torvalds 已提交
1010

1011
	return;
L
Linus Torvalds 已提交
1012 1013
}

L
Len Brown 已提交
1014 1015
static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
					   struct acpi_processor_cx *cx)
L
Linus Torvalds 已提交
1016
{
1017 1018
	static int bm_check_flag;

L
Linus Torvalds 已提交
1019 1020

	if (!cx->address)
1021
		return;
L
Linus Torvalds 已提交
1022 1023 1024 1025 1026 1027 1028

	/*
	 * C3 latency must be less than or equal to 1000
	 * microseconds.
	 */
	else if (cx->latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
L
Len Brown 已提交
1029
				  "latency too large [%d]\n", cx->latency));
1030
		return;
L
Linus Torvalds 已提交
1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041
	}

	/*
	 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
	 * DMA transfers are used by any ISA device to avoid livelock.
	 * Note that we could disable Type-F DMA (as recommended by
	 * the erratum), but this is known to disrupt certain ISA
	 * devices thus we take the conservative approach.
	 */
	else if (errata.piix4.fdma) {
		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
L
Len Brown 已提交
1042
				  "C3 not supported on PIIX4 with Type-F DMA\n"));
1043
		return;
L
Linus Torvalds 已提交
1044 1045
	}

1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
	/* All the logic here assumes flags.bm_check is same across all CPUs */
	if (!bm_check_flag) {
		/* Determine whether bm_check is needed based on CPU  */
		acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
		bm_check_flag = pr->flags.bm_check;
	} else {
		pr->flags.bm_check = bm_check_flag;
	}

	if (pr->flags.bm_check) {
		if (!pr->flags.bm_control) {
1057 1058 1059 1060 1061 1062 1063 1064 1065 1066
			if (pr->flags.has_cst != 1) {
				/* bus mastering control is necessary */
				ACPI_DEBUG_PRINT((ACPI_DB_INFO,
					"C3 support requires BM control\n"));
				return;
			} else {
				/* Here we enter C3 without bus mastering */
				ACPI_DEBUG_PRINT((ACPI_DB_INFO,
					"C3 support without BM control\n"));
			}
1067 1068 1069 1070 1071 1072
		}
	} else {
		/*
		 * WBINVD should be set in fadt, for C3 state to be
		 * supported on when bm_check is not required.
		 */
1073
		if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
1074
			ACPI_DEBUG_PRINT((ACPI_DB_INFO,
L
Len Brown 已提交
1075 1076
					  "Cache invalidation should work properly"
					  " for C3 to be enabled on SMP systems\n"));
1077
			return;
1078
		}
1079
		acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
1080 1081
	}

L
Linus Torvalds 已提交
1082 1083 1084 1085 1086 1087 1088
	/*
	 * Otherwise we've met all of our C3 requirements.
	 * Normalize the C3 latency to expidite policy.  Enable
	 * checking of bus mastering status (bm_check) so we can
	 * use this in our C3 policy
	 */
	cx->valid = 1;
1089 1090

#ifndef CONFIG_CPU_IDLE
L
Linus Torvalds 已提交
1091
	cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
1092 1093 1094
#else
	cx->latency_ticks = cx->latency;
#endif
L
Linus Torvalds 已提交
1095

1096
	return;
L
Linus Torvalds 已提交
1097 1098 1099 1100 1101 1102
}

static int acpi_processor_power_verify(struct acpi_processor *pr)
{
	unsigned int i;
	unsigned int working = 0;
1103

1104
	pr->power.timer_broadcast_on_state = INT_MAX;
1105

L
Len Brown 已提交
1106
	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
L
Linus Torvalds 已提交
1107 1108 1109 1110 1111 1112 1113 1114 1115
		struct acpi_processor_cx *cx = &pr->power.states[i];

		switch (cx->type) {
		case ACPI_STATE_C1:
			cx->valid = 1;
			break;

		case ACPI_STATE_C2:
			acpi_processor_power_verify_c2(cx);
1116
			if (cx->valid)
1117
				acpi_timer_check_state(i, pr, cx);
L
Linus Torvalds 已提交
1118 1119 1120 1121
			break;

		case ACPI_STATE_C3:
			acpi_processor_power_verify_c3(pr, cx);
1122
			if (cx->valid)
1123
				acpi_timer_check_state(i, pr, cx);
L
Linus Torvalds 已提交
1124 1125 1126 1127 1128 1129
			break;
		}

		if (cx->valid)
			working++;
	}
1130

1131
	acpi_propagate_timer_broadcast(pr);
L
Linus Torvalds 已提交
1132 1133 1134 1135

	return (working);
}

L
Len Brown 已提交
1136
static int acpi_processor_get_power_info(struct acpi_processor *pr)
L
Linus Torvalds 已提交
1137 1138 1139 1140 1141 1142 1143 1144
{
	unsigned int i;
	int result;


	/* NOTE: the idle thread may not be running while calling
	 * this function */

1145 1146 1147
	/* Zero initialize all the C-states info. */
	memset(pr->power.states, 0, sizeof(pr->power.states));

L
Linus Torvalds 已提交
1148
	result = acpi_processor_get_power_info_cst(pr);
1149
	if (result == -ENODEV)
1150
		result = acpi_processor_get_power_info_fadt(pr);
1151

1152 1153 1154 1155 1156
	if (result)
		return result;

	acpi_processor_get_power_info_default(pr);

1157
	pr->power.count = acpi_processor_power_verify(pr);
L
Linus Torvalds 已提交
1158

1159
#ifndef CONFIG_CPU_IDLE
L
Linus Torvalds 已提交
1160 1161 1162 1163 1164 1165 1166 1167 1168 1169
	/*
	 * Set Default Policy
	 * ------------------
	 * Now that we know which states are supported, set the default
	 * policy.  Note that this policy can be changed dynamically
	 * (e.g. encourage deeper sleeps to conserve battery life when
	 * not on AC).
	 */
	result = acpi_processor_set_power_policy(pr);
	if (result)
1170
		return result;
1171
#endif
L
Linus Torvalds 已提交
1172 1173 1174 1175 1176 1177

	/*
	 * if one state of type C2 or C3 is available, mark this
	 * CPU as being "idle manageable"
	 */
	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
1178
		if (pr->power.states[i].valid) {
L
Linus Torvalds 已提交
1179
			pr->power.count = i;
1180 1181
			if (pr->power.states[i].type >= ACPI_STATE_C2)
				pr->flags.power = 1;
1182
		}
L
Linus Torvalds 已提交
1183 1184
	}

1185
	return 0;
L
Linus Torvalds 已提交
1186 1187 1188 1189
}

static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
{
1190
	struct acpi_processor *pr = seq->private;
L
Len Brown 已提交
1191
	unsigned int i;
L
Linus Torvalds 已提交
1192 1193 1194 1195 1196 1197


	if (!pr)
		goto end;

	seq_printf(seq, "active state:            C%zd\n"
L
Len Brown 已提交
1198
		   "max_cstate:              C%d\n"
1199 1200
		   "bus master activity:     %08x\n"
		   "maximum allowed latency: %d usec\n",
L
Len Brown 已提交
1201
		   pr->power.state ? pr->power.state - pr->power.states : 0,
1202 1203
		   max_cstate, (unsigned)pr->power.bm_activity,
		   system_latency_constraint());
L
Linus Torvalds 已提交
1204 1205 1206 1207 1208

	seq_puts(seq, "states:\n");

	for (i = 1; i <= pr->power.count; i++) {
		seq_printf(seq, "   %cC%d:                  ",
L
Len Brown 已提交
1209 1210
			   (&pr->power.states[i] ==
			    pr->power.state ? '*' : ' '), i);
L
Linus Torvalds 已提交
1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233

		if (!pr->power.states[i].valid) {
			seq_puts(seq, "<not supported>\n");
			continue;
		}

		switch (pr->power.states[i].type) {
		case ACPI_STATE_C1:
			seq_printf(seq, "type[C1] ");
			break;
		case ACPI_STATE_C2:
			seq_printf(seq, "type[C2] ");
			break;
		case ACPI_STATE_C3:
			seq_printf(seq, "type[C3] ");
			break;
		default:
			seq_printf(seq, "type[--] ");
			break;
		}

		if (pr->power.states[i].promotion.state)
			seq_printf(seq, "promotion[C%zd] ",
L
Len Brown 已提交
1234 1235
				   (pr->power.states[i].promotion.state -
				    pr->power.states));
L
Linus Torvalds 已提交
1236 1237 1238 1239 1240
		else
			seq_puts(seq, "promotion[--] ");

		if (pr->power.states[i].demotion.state)
			seq_printf(seq, "demotion[C%zd] ",
L
Len Brown 已提交
1241 1242
				   (pr->power.states[i].demotion.state -
				    pr->power.states));
L
Linus Torvalds 已提交
1243 1244 1245
		else
			seq_puts(seq, "demotion[--] ");

1246
		seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n",
L
Len Brown 已提交
1247
			   pr->power.states[i].latency,
1248
			   pr->power.states[i].usage,
1249
			   (unsigned long long)pr->power.states[i].time);
L
Linus Torvalds 已提交
1250 1251
	}

L
Len Brown 已提交
1252
      end:
1253
	return 0;
L
Linus Torvalds 已提交
1254 1255 1256 1257 1258
}

static int acpi_processor_power_open_fs(struct inode *inode, struct file *file)
{
	return single_open(file, acpi_processor_power_seq_show,
L
Len Brown 已提交
1259
			   PDE(inode)->data);
L
Linus Torvalds 已提交
1260 1261
}

1262
static const struct file_operations acpi_processor_power_fops = {
L
Len Brown 已提交
1263 1264 1265 1266
	.open = acpi_processor_power_open_fs,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release,
L
Linus Torvalds 已提交
1267 1268
};

1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297
#ifndef CONFIG_CPU_IDLE

int acpi_processor_cst_has_changed(struct acpi_processor *pr)
{
	int result = 0;


	if (!pr)
		return -EINVAL;

	if (nocst) {
		return -ENODEV;
	}

	if (!pr->flags.power_setup_done)
		return -ENODEV;

	/* Fall back to the default idle loop */
	pm_idle = pm_idle_save;
	synchronize_sched();	/* Relies on interrupts forcing exit from idle. */

	pr->flags.power = 0;
	result = acpi_processor_get_power_info(pr);
	if ((pr->flags.power == 1) && (pr->flags.power_setup_done))
		pm_idle = acpi_processor_idle;

	return result;
}

1298
#ifdef CONFIG_SMP
1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319
static void smp_callback(void *v)
{
	/* we already woke the CPU up, nothing more to do */
}

/*
 * This function gets called when a part of the kernel has a new latency
 * requirement.  This means we need to get all processors out of their C-state,
 * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
 * wakes them all right up.
 */
static int acpi_processor_latency_notify(struct notifier_block *b,
		unsigned long l, void *v)
{
	smp_call_function(smp_callback, NULL, 0, 1);
	return NOTIFY_OK;
}

static struct notifier_block acpi_processor_latency_notifier = {
	.notifier_call = acpi_processor_latency_notify,
};
1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406

#endif

#else /* CONFIG_CPU_IDLE */

/**
 * acpi_idle_bm_check - checks if bus master activity was detected
 */
static int acpi_idle_bm_check(void)
{
	u32 bm_status = 0;

	acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
	if (bm_status)
		acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
	/*
	 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
	 * the true state of bus mastering activity; forcing us to
	 * manually check the BMIDEA bit of each IDE channel.
	 */
	else if (errata.piix4.bmisx) {
		if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
		    || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
			bm_status = 1;
	}
	return bm_status;
}

/**
 * acpi_idle_update_bm_rld - updates the BM_RLD bit depending on target state
 * @pr: the processor
 * @target: the new target state
 */
static inline void acpi_idle_update_bm_rld(struct acpi_processor *pr,
					   struct acpi_processor_cx *target)
{
	if (pr->flags.bm_rld_set && target->type != ACPI_STATE_C3) {
		acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
		pr->flags.bm_rld_set = 0;
	}

	if (!pr->flags.bm_rld_set && target->type == ACPI_STATE_C3) {
		acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
		pr->flags.bm_rld_set = 1;
	}
}

/**
 * acpi_idle_do_entry - a helper function that does C2 and C3 type entry
 * @cx: cstate data
 */
static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
{
	if (cx->space_id == ACPI_CSTATE_FFH) {
		/* Call into architectural FFH based C-state */
		acpi_processor_ffh_cstate_enter(cx);
	} else {
		int unused;
		/* IO port based C-state */
		inb(cx->address);
		/* Dummy wait op - must do something useless after P_LVL2 read
		   because chipsets cannot guarantee that STPCLK# signal
		   gets asserted in time to freeze execution properly. */
		unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
	}
}

/**
 * acpi_idle_enter_c1 - enters an ACPI C1 state-type
 * @dev: the target CPU
 * @state: the state data
 *
 * This is equivalent to the HALT instruction.
 */
static int acpi_idle_enter_c1(struct cpuidle_device *dev,
			      struct cpuidle_state *state)
{
	struct acpi_processor *pr;
	struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
	pr = processors[smp_processor_id()];

	if (unlikely(!pr))
		return 0;

	if (pr->flags.bm_check)
		acpi_idle_update_bm_rld(pr, cx);

1407
	acpi_safe_halt();
1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424

	cx->usage++;

	return 0;
}

/**
 * acpi_idle_enter_simple - enters an ACPI state without BM handling
 * @dev: the target CPU
 * @state: the state data
 */
static int acpi_idle_enter_simple(struct cpuidle_device *dev,
				  struct cpuidle_state *state)
{
	struct acpi_processor *pr;
	struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
	u32 t1, t2;
1425 1426
	int sleep_ticks = 0;

1427 1428 1429 1430 1431
	pr = processors[smp_processor_id()];

	if (unlikely(!pr))
		return 0;

1432 1433 1434
	if (acpi_idle_suspend)
		return(acpi_idle_enter_c1(dev, state));

1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448
	local_irq_disable();
	current_thread_info()->status &= ~TS_POLLING;
	/*
	 * TS_POLLING-cleared state must be visible before we test
	 * NEED_RESCHED:
	 */
	smp_mb();

	if (unlikely(need_resched())) {
		current_thread_info()->status |= TS_POLLING;
		local_irq_enable();
		return 0;
	}

1449
	acpi_unlazy_tlb(smp_processor_id());
1450 1451 1452 1453 1454 1455 1456 1457 1458
	/*
	 * Must be done before busmaster disable as we might need to
	 * access HPET !
	 */
	acpi_state_timer_broadcast(pr, cx, 1);

	if (pr->flags.bm_check)
		acpi_idle_update_bm_rld(pr, cx);

1459 1460 1461 1462
	if (cx->type == ACPI_STATE_C3)
		ACPI_FLUSH_CPU_CACHE();

	t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1463 1464
	/* Tell the scheduler that we are going deep-idle: */
	sched_clock_idle_sleep_event();
1465 1466 1467 1468 1469
	acpi_idle_do_entry(cx);
	t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);

#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
	/* TSC could halt in idle, so notify users */
1470 1471
	if (tsc_halts_in_c(cx->type))
		mark_tsc_unstable("TSC halts in idle");;
1472
#endif
1473 1474 1475 1476
	sleep_ticks = ticks_elapsed(t1, t2);

	/* Tell the scheduler how much we idled: */
	sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
1477 1478 1479 1480 1481 1482 1483

	local_irq_enable();
	current_thread_info()->status |= TS_POLLING;

	cx->usage++;

	acpi_state_timer_broadcast(pr, cx, 0);
1484
	cx->time += sleep_ticks;
1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503
	return ticks_elapsed_in_us(t1, t2);
}

static int c3_cpu_count;
static DEFINE_SPINLOCK(c3_lock);

/**
 * acpi_idle_enter_bm - enters C3 with proper BM handling
 * @dev: the target CPU
 * @state: the state data
 *
 * If BM is detected, the deepest non-C3 idle state is entered instead.
 */
static int acpi_idle_enter_bm(struct cpuidle_device *dev,
			      struct cpuidle_state *state)
{
	struct acpi_processor *pr;
	struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
	u32 t1, t2;
1504 1505
	int sleep_ticks = 0;

1506 1507 1508 1509 1510
	pr = processors[smp_processor_id()];

	if (unlikely(!pr))
		return 0;

1511 1512 1513
	if (acpi_idle_suspend)
		return(acpi_idle_enter_c1(dev, state));

1514 1515 1516 1517 1518 1519 1520 1521 1522
	if (acpi_idle_bm_check()) {
		if (dev->safe_state) {
			return dev->safe_state->enter(dev, dev->safe_state);
		} else {
			acpi_safe_halt();
			return 0;
		}
	}

1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536
	local_irq_disable();
	current_thread_info()->status &= ~TS_POLLING;
	/*
	 * TS_POLLING-cleared state must be visible before we test
	 * NEED_RESCHED:
	 */
	smp_mb();

	if (unlikely(need_resched())) {
		current_thread_info()->status |= TS_POLLING;
		local_irq_enable();
		return 0;
	}

1537 1538
	/* Tell the scheduler that we are going deep-idle: */
	sched_clock_idle_sleep_event();
1539 1540 1541 1542 1543 1544
	/*
	 * Must be done before busmaster disable as we might need to
	 * access HPET !
	 */
	acpi_state_timer_broadcast(pr, cx, 1);

1545
	acpi_idle_update_bm_rld(pr, cx);
1546

1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557
	/*
	 * disable bus master
	 * bm_check implies we need ARB_DIS
	 * !bm_check implies we need cache flush
	 * bm_control implies whether we can do ARB_DIS
	 *
	 * That leaves a case where bm_check is set and bm_control is
	 * not set. In that case we cannot do much, we enter C3
	 * without doing anything.
	 */
	if (pr->flags.bm_check && pr->flags.bm_control) {
1558 1559 1560 1561 1562 1563
		spin_lock(&c3_lock);
		c3_cpu_count++;
		/* Disable bus master arbitration when all CPUs are in C3 */
		if (c3_cpu_count == num_online_cpus())
			acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
		spin_unlock(&c3_lock);
1564 1565 1566
	} else if (!pr->flags.bm_check) {
		ACPI_FLUSH_CPU_CACHE();
	}
1567

1568 1569 1570
	t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
	acpi_idle_do_entry(cx);
	t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1571

1572 1573
	/* Re-enable bus master arbitration */
	if (pr->flags.bm_check && pr->flags.bm_control) {
1574
		spin_lock(&c3_lock);
1575
		acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
1576 1577 1578 1579 1580 1581
		c3_cpu_count--;
		spin_unlock(&c3_lock);
	}

#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
	/* TSC could halt in idle, so notify users */
1582 1583
	if (tsc_halts_in_c(ACPI_STATE_C3))
		mark_tsc_unstable("TSC halts in idle");
1584
#endif
1585 1586 1587
	sleep_ticks = ticks_elapsed(t1, t2);
	/* Tell the scheduler how much we idled: */
	sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
1588 1589 1590 1591 1592 1593 1594

	local_irq_enable();
	current_thread_info()->status |= TS_POLLING;

	cx->usage++;

	acpi_state_timer_broadcast(pr, cx, 0);
1595
	cx->time += sleep_ticks;
1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633
	return ticks_elapsed_in_us(t1, t2);
}

struct cpuidle_driver acpi_idle_driver = {
	.name =		"acpi_idle",
	.owner =	THIS_MODULE,
};

/**
 * acpi_processor_setup_cpuidle - prepares and configures CPUIDLE
 * @pr: the ACPI processor
 */
static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
{
	int i, count = 0;
	struct acpi_processor_cx *cx;
	struct cpuidle_state *state;
	struct cpuidle_device *dev = &pr->power.dev;

	if (!pr->flags.power_setup_done)
		return -EINVAL;

	if (pr->flags.power == 0) {
		return -EINVAL;
	}

	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
		cx = &pr->power.states[i];
		state = &dev->states[count];

		if (!cx->valid)
			continue;

#ifdef CONFIG_HOTPLUG_CPU
		if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
		    !pr->flags.has_cst &&
		    !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
			continue;
1634
#endif
1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646
		cpuidle_set_statedata(state, cx);

		snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
		state->exit_latency = cx->latency;
		state->target_residency = cx->latency * 6;
		state->power_usage = cx->power;

		state->flags = 0;
		switch (cx->type) {
			case ACPI_STATE_C1:
			state->flags |= CPUIDLE_FLAG_SHALLOW;
			state->enter = acpi_idle_enter_c1;
1647
			dev->safe_state = state;
1648 1649 1650 1651 1652 1653
			break;

			case ACPI_STATE_C2:
			state->flags |= CPUIDLE_FLAG_BALANCED;
			state->flags |= CPUIDLE_FLAG_TIME_VALID;
			state->enter = acpi_idle_enter_simple;
1654
			dev->safe_state = state;
1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702
			break;

			case ACPI_STATE_C3:
			state->flags |= CPUIDLE_FLAG_DEEP;
			state->flags |= CPUIDLE_FLAG_TIME_VALID;
			state->flags |= CPUIDLE_FLAG_CHECK_BM;
			state->enter = pr->flags.bm_check ?
					acpi_idle_enter_bm :
					acpi_idle_enter_simple;
			break;
		}

		count++;
	}

	dev->state_count = count;

	if (!count)
		return -EINVAL;

	return 0;
}

int acpi_processor_cst_has_changed(struct acpi_processor *pr)
{
	int ret;

	if (!pr)
		return -EINVAL;

	if (nocst) {
		return -ENODEV;
	}

	if (!pr->flags.power_setup_done)
		return -ENODEV;

	cpuidle_pause_and_lock();
	cpuidle_disable_device(&pr->power.dev);
	acpi_processor_get_power_info(pr);
	acpi_processor_setup_cpuidle(pr);
	ret = cpuidle_enable_device(&pr->power.dev);
	cpuidle_resume_and_unlock();

	return ret;
}

#endif /* CONFIG_CPU_IDLE */
1703

1704
int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
L
Len Brown 已提交
1705
			      struct acpi_device *device)
L
Linus Torvalds 已提交
1706
{
L
Len Brown 已提交
1707
	acpi_status status = 0;
1708
	static int first_run;
L
Len Brown 已提交
1709
	struct proc_dir_entry *entry = NULL;
L
Linus Torvalds 已提交
1710 1711 1712 1713 1714
	unsigned int i;


	if (!first_run) {
		dmi_check_system(processor_power_dmi_table);
1715
		max_cstate = acpi_processor_cstate_check(max_cstate);
L
Linus Torvalds 已提交
1716
		if (max_cstate < ACPI_C_STATES_MAX)
L
Len Brown 已提交
1717 1718 1719
			printk(KERN_NOTICE
			       "ACPI: processor limited to max C-state %d\n",
			       max_cstate);
L
Linus Torvalds 已提交
1720
		first_run++;
1721
#if !defined (CONFIG_CPU_IDLE) && defined (CONFIG_SMP)
1722
		register_latency_notifier(&acpi_processor_latency_notifier);
1723
#endif
L
Linus Torvalds 已提交
1724 1725
	}

1726
	if (!pr)
1727
		return -EINVAL;
1728

1729
	if (acpi_gbl_FADT.cst_control && !nocst) {
L
Len Brown 已提交
1730
		status =
1731
		    acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);
L
Linus Torvalds 已提交
1732
		if (ACPI_FAILURE(status)) {
1733 1734
			ACPI_EXCEPTION((AE_INFO, status,
					"Notifying BIOS of _CST ability failed"));
L
Linus Torvalds 已提交
1735 1736 1737 1738
		}
	}

	acpi_processor_get_power_info(pr);
1739
	pr->flags.power_setup_done = 1;
L
Linus Torvalds 已提交
1740 1741 1742 1743 1744 1745 1746

	/*
	 * Install the idle handler if processor power management is supported.
	 * Note that we use previously set idle handler will be used on
	 * platforms that only support C1.
	 */
	if ((pr->flags.power) && (!boot_option_idle_override)) {
1747 1748 1749 1750 1751 1752 1753
#ifdef CONFIG_CPU_IDLE
		acpi_processor_setup_cpuidle(pr);
		pr->power.dev.cpu = pr->id;
		if (cpuidle_register_device(&pr->power.dev))
			return -EIO;
#endif

L
Linus Torvalds 已提交
1754 1755 1756
		printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id);
		for (i = 1; i <= pr->power.count; i++)
			if (pr->power.states[i].valid)
L
Len Brown 已提交
1757 1758
				printk(" C%d[C%d]", i,
				       pr->power.states[i].type);
L
Linus Torvalds 已提交
1759 1760
		printk(")\n");

1761
#ifndef CONFIG_CPU_IDLE
L
Linus Torvalds 已提交
1762 1763 1764 1765
		if (pr->id == 0) {
			pm_idle_save = pm_idle;
			pm_idle = acpi_processor_idle;
		}
1766
#endif
L
Linus Torvalds 已提交
1767 1768 1769 1770
	}

	/* 'power' [R] */
	entry = create_proc_entry(ACPI_PROCESSOR_FILE_POWER,
L
Len Brown 已提交
1771
				  S_IRUGO, acpi_device_dir(device));
L
Linus Torvalds 已提交
1772
	if (!entry)
1773
		return -EIO;
L
Linus Torvalds 已提交
1774 1775 1776 1777 1778 1779
	else {
		entry->proc_fops = &acpi_processor_power_fops;
		entry->data = acpi_driver_data(device);
		entry->owner = THIS_MODULE;
	}

1780
	return 0;
L
Linus Torvalds 已提交
1781 1782
}

L
Len Brown 已提交
1783 1784
int acpi_processor_power_exit(struct acpi_processor *pr,
			      struct acpi_device *device)
L
Linus Torvalds 已提交
1785
{
1786 1787 1788 1789
#ifdef CONFIG_CPU_IDLE
	if ((pr->flags.power) && (!boot_option_idle_override))
		cpuidle_unregister_device(&pr->power.dev);
#endif
L
Linus Torvalds 已提交
1790 1791 1792
	pr->flags.power_setup_done = 0;

	if (acpi_device_dir(device))
L
Len Brown 已提交
1793 1794
		remove_proc_entry(ACPI_PROCESSOR_FILE_POWER,
				  acpi_device_dir(device));
L
Linus Torvalds 已提交
1795

1796 1797
#ifndef CONFIG_CPU_IDLE

L
Linus Torvalds 已提交
1798 1799 1800 1801 1802 1803 1804 1805 1806 1807
	/* Unregister the idle handler when processor #0 is removed. */
	if (pr->id == 0) {
		pm_idle = pm_idle_save;

		/*
		 * We are about to unload the current idle thread pm callback
		 * (pm_idle), Wait for all processors to update cached/local
		 * copies of pm_idle before proceeding.
		 */
		cpu_idle_wait();
1808
#ifdef CONFIG_SMP
1809
		unregister_latency_notifier(&acpi_processor_latency_notifier);
1810
#endif
L
Linus Torvalds 已提交
1811
	}
1812
#endif
L
Linus Torvalds 已提交
1813

1814
	return 0;
L
Linus Torvalds 已提交
1815
}