提交 35e51fe8 编写于 作者: L Linus Torvalds

Merge branch 'idle-release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-idle-2.6

* 'idle-release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-idle-2.6:
  cpuidle: stop depending on pm_idle
  x86 idle: move mwait_idle_with_hints() to where it is used
  cpuidle: replace xen access to x86 pm_idle and default_idle
  cpuidle: create bootparam "cpuidle.off=1"
  mrst_pmu: driver for Intel Moorestown Power Management Unit
......@@ -551,6 +551,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
/proc/<pid>/coredump_filter.
See also Documentation/filesystems/proc.txt.
cpuidle.off=1 [CPU_IDLE]
disable the cpuidle sub-system
cpcihp_generic= [HW,PCI] Generic port I/O CompactPCI driver
Format:
<first_slot>,<last_slot>,<port>,<enum_bit>[,<debug>]
......
......@@ -3367,6 +3367,12 @@ F: drivers/net/ixgb/
F: drivers/net/ixgbe/
F: drivers/net/ixgbevf/
INTEL MRST PMU DRIVER
M: Len Brown <len.brown@intel.com>
L: linux-pm@lists.linux-foundation.org
S: Supported
F: arch/x86/platform/mrst/pmu.*
INTEL PRO/WIRELESS 2100 NETWORK CONNECTION SUPPORT
L: linux-wireless@vger.kernel.org
S: Orphan
......
......@@ -30,6 +30,7 @@
#include <linux/uaccess.h>
#include <linux/random.h>
#include <linux/hw_breakpoint.h>
#include <linux/cpuidle.h>
#include <asm/cacheflush.h>
#include <asm/leds.h>
......@@ -196,7 +197,8 @@ void cpu_idle(void)
cpu_relax();
} else {
stop_critical_timings();
pm_idle();
if (cpuidle_call_idle())
pm_idle();
start_critical_timings();
/*
* This will eventually be removed - pm_idle
......
......@@ -16,12 +16,13 @@
#include <linux/thread_info.h>
#include <linux/irqflags.h>
#include <linux/smp.h>
#include <linux/cpuidle.h>
#include <asm/pgalloc.h>
#include <asm/system.h>
#include <linux/atomic.h>
#include <asm/smp.h>
void (*pm_idle)(void) = NULL;
static void (*pm_idle)(void);
static int hlt_counter;
......@@ -100,7 +101,8 @@ void cpu_idle(void)
local_irq_disable();
/* Don't trace irqs off for idle */
stop_critical_timings();
pm_idle();
if (cpuidle_call_idle())
pm_idle();
/*
* Sanity check to ensure that pm_idle() returns
* with IRQs enabled
......
......@@ -751,8 +751,6 @@ static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
:: "a" (eax), "c" (ecx));
}
extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
extern void select_idle_routine(const struct cpuinfo_x86 *c);
extern void init_amd_e400_c1e_mask(void);
......
......@@ -149,6 +149,29 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
}
EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe);
/*
* This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
* which can obviate IPI to trigger checking of need_resched.
* We execute MONITOR against need_resched and enter optimized wait state
* through MWAIT. Whenever someone changes need_resched, we would be woken
* up from MWAIT (without an IPI).
*
* New with Core Duo processors, MWAIT can take some hints based on CPU
* capability.
*/
void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
{
if (!need_resched()) {
if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
clflush((void *)&current_thread_info()->flags);
__monitor((void *)&current_thread_info()->flags, 0, 0);
smp_mb();
if (!need_resched())
__mwait(ax, cx);
}
}
void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx)
{
unsigned int cpu = smp_processor_id();
......
......@@ -438,29 +438,6 @@ void cpu_idle_wait(void)
}
EXPORT_SYMBOL_GPL(cpu_idle_wait);
/*
* This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
* which can obviate IPI to trigger checking of need_resched.
* We execute MONITOR against need_resched and enter optimized wait state
* through MWAIT. Whenever someone changes need_resched, we would be woken
* up from MWAIT (without an IPI).
*
* New with Core Duo processors, MWAIT can take some hints based on CPU
* capability.
*/
void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
{
if (!need_resched()) {
if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
clflush((void *)&current_thread_info()->flags);
__monitor((void *)&current_thread_info()->flags, 0, 0);
smp_mb();
if (!need_resched())
__mwait(ax, cx);
}
}
/* Default MONITOR/MWAIT with no hints, used for default C1 state */
static void mwait_idle(void)
{
......
......@@ -38,6 +38,7 @@
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/kdebug.h>
#include <linux/cpuidle.h>
#include <asm/pgtable.h>
#include <asm/system.h>
......@@ -109,7 +110,8 @@ void cpu_idle(void)
local_irq_disable();
/* Don't trace irqs off for idle */
stop_critical_timings();
pm_idle();
if (cpuidle_idle_call())
pm_idle();
start_critical_timings();
}
tick_nohz_restart_sched_tick();
......
......@@ -37,6 +37,7 @@
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/ftrace.h>
#include <linux/cpuidle.h>
#include <asm/pgtable.h>
#include <asm/system.h>
......@@ -136,7 +137,8 @@ void cpu_idle(void)
enter_idle();
/* Don't trace irqs off for idle */
stop_critical_timings();
pm_idle();
if (cpuidle_idle_call())
pm_idle();
start_critical_timings();
/* In many cases the interrupt that ended idle
......
obj-$(CONFIG_X86_MRST) += mrst.o
obj-$(CONFIG_X86_MRST) += vrtc.o
obj-$(CONFIG_EARLY_PRINTK_MRST) += early_printk_mrst.o
obj-$(CONFIG_X86_MRST) += pmu.o
此差异已折叠。
/*
* mrst/pmu.h - private definitions for MRST Power Management Unit mrst/pmu.c
*
* Copyright (c) 2011, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
#ifndef _MRST_PMU_H_
#define _MRST_PMU_H_
#define PCI_DEV_ID_MRST_PMU 0x0810
#define MRST_PMU_DRV_NAME "mrst_pmu"
#define PCI_SUB_CLASS_MASK 0xFF00
#define PCI_VENDOR_CAP_LOG_ID_MASK 0x7F
#define PCI_VENDOR_CAP_LOG_SS_MASK 0x80
#define SUB_SYS_ALL_D0I1 0x01155555
#define S0I3_WAKE_SOURCES 0x00001FFF
#define PM_S0I3_COMMAND \
((0 << 31) | /* Reserved */ \
(0 << 30) | /* Core must be idle */ \
(0xc2 << 22) | /* ACK C6 trigger */ \
(3 << 19) | /* Trigger on DMI message */ \
(3 << 16) | /* Enter S0i3 */ \
(0 << 13) | /* Numeric mode ID (sw) */ \
(3 << 9) | /* Trigger mode */ \
(0 << 8) | /* Do not interrupt */ \
(1 << 0)) /* Set configuration */
#define LSS_DMI 0
#define LSS_SD_HC0 1
#define LSS_SD_HC1 2
#define LSS_NAND 3
#define LSS_IMAGING 4
#define LSS_SECURITY 5
#define LSS_DISPLAY 6
#define LSS_USB_HC 7
#define LSS_USB_OTG 8
#define LSS_AUDIO 9
#define LSS_AUDIO_LPE 9
#define LSS_AUDIO_SSP 9
#define LSS_I2C0 10
#define LSS_I2C1 10
#define LSS_I2C2 10
#define LSS_KBD 10
#define LSS_SPI0 10
#define LSS_SPI1 10
#define LSS_SPI2 10
#define LSS_GPIO 10
#define LSS_SRAM 11 /* used by SCU, do not touch */
#define LSS_SD_HC2 12
/* LSS hardware bits 15,14,13 are hardwired to 0, thus unusable */
#define MRST_NUM_LSS 13
#define MIN(a, b) (((a) < (b)) ? (a) : (b))
#define SSMSK(mask, lss) ((mask) << ((lss) * 2))
#define D0 0
#define D0i1 1
#define D0i2 2
#define D0i3 3
#define S0I3_SSS_TARGET ( \
SSMSK(D0i1, LSS_DMI) | \
SSMSK(D0i3, LSS_SD_HC0) | \
SSMSK(D0i3, LSS_SD_HC1) | \
SSMSK(D0i3, LSS_NAND) | \
SSMSK(D0i3, LSS_SD_HC2) | \
SSMSK(D0i3, LSS_IMAGING) | \
SSMSK(D0i3, LSS_SECURITY) | \
SSMSK(D0i3, LSS_DISPLAY) | \
SSMSK(D0i3, LSS_USB_HC) | \
SSMSK(D0i3, LSS_USB_OTG) | \
SSMSK(D0i3, LSS_AUDIO) | \
SSMSK(D0i1, LSS_I2C0))
/*
* D0i1 on Langwell is Autonomous Clock Gating (ACG).
* Enable ACG on every LSS except camera and audio
*/
#define D0I1_ACG_SSS_TARGET \
(SUB_SYS_ALL_D0I1 & ~SSMSK(D0i1, LSS_IMAGING) & ~SSMSK(D0i1, LSS_AUDIO))
enum cm_mode {
CM_NOP, /* ignore the config mode value */
CM_IMMEDIATE,
CM_DELAY,
CM_TRIGGER,
CM_INVALID
};
enum sys_state {
SYS_STATE_S0I0,
SYS_STATE_S0I1,
SYS_STATE_S0I2,
SYS_STATE_S0I3,
SYS_STATE_S3,
SYS_STATE_S5
};
#define SET_CFG_CMD 1
enum int_status {
INT_SPURIOUS = 0,
INT_CMD_DONE = 1,
INT_CMD_ERR = 2,
INT_WAKE_RX = 3,
INT_SS_ERROR = 4,
INT_S0IX_MISS = 5,
INT_NO_ACKC6 = 6,
INT_INVALID = 7,
};
/* PMU register interface */
static struct mrst_pmu_reg {
u32 pm_sts; /* 0x00 */
u32 pm_cmd; /* 0x04 */
u32 pm_ics; /* 0x08 */
u32 _resv1; /* 0x0C */
u32 pm_wkc[2]; /* 0x10 */
u32 pm_wks[2]; /* 0x18 */
u32 pm_ssc[4]; /* 0x20 */
u32 pm_sss[4]; /* 0x30 */
u32 pm_wssc[4]; /* 0x40 */
u32 pm_c3c4; /* 0x50 */
u32 pm_c5c6; /* 0x54 */
u32 pm_msi_disable; /* 0x58 */
} *pmu_reg;
static inline u32 pmu_read_sts(void) { return readl(&pmu_reg->pm_sts); }
static inline u32 pmu_read_ics(void) { return readl(&pmu_reg->pm_ics); }
static inline u32 pmu_read_wks(void) { return readl(&pmu_reg->pm_wks[0]); }
static inline u32 pmu_read_sss(void) { return readl(&pmu_reg->pm_sss[0]); }
static inline void pmu_write_cmd(u32 arg) { writel(arg, &pmu_reg->pm_cmd); }
static inline void pmu_write_ics(u32 arg) { writel(arg, &pmu_reg->pm_ics); }
static inline void pmu_write_wkc(u32 arg) { writel(arg, &pmu_reg->pm_wkc[0]); }
static inline void pmu_write_ssc(u32 arg) { writel(arg, &pmu_reg->pm_ssc[0]); }
static inline void pmu_write_wssc(u32 arg)
{ writel(arg, &pmu_reg->pm_wssc[0]); }
static inline void pmu_msi_enable(void) { writel(0, &pmu_reg->pm_msi_disable); }
static inline u32 pmu_msi_is_disabled(void)
{ return readl(&pmu_reg->pm_msi_disable); }
union pmu_pm_ics {
struct {
u32 cause:8;
u32 enable:1;
u32 pending:1;
u32 reserved:22;
} bits;
u32 value;
};
static inline void pmu_irq_enable(void)
{
union pmu_pm_ics pmu_ics;
pmu_ics.value = pmu_read_ics();
pmu_ics.bits.enable = 1;
pmu_write_ics(pmu_ics.value);
}
union pmu_pm_status {
struct {
u32 pmu_rev:8;
u32 pmu_busy:1;
u32 mode_id:4;
u32 Reserved:19;
} pmu_status_parts;
u32 pmu_status_value;
};
static inline int pmu_read_busy_status(void)
{
union pmu_pm_status result;
result.pmu_status_value = pmu_read_sts();
return result.pmu_status_parts.pmu_busy;
}
/* pmu set config parameters */
struct cfg_delay_param_t {
u32 cmd:8;
u32 ioc:1;
u32 cfg_mode:4;
u32 mode_id:3;
u32 sys_state:3;
u32 cfg_delay:8;
u32 rsvd:5;
};
struct cfg_trig_param_t {
u32 cmd:8;
u32 ioc:1;
u32 cfg_mode:4;
u32 mode_id:3;
u32 sys_state:3;
u32 cfg_trig_type:3;
u32 cfg_trig_val:8;
u32 cmbi:1;
u32 rsvd1:1;
};
union pmu_pm_set_cfg_cmd_t {
union {
struct cfg_delay_param_t d_param;
struct cfg_trig_param_t t_param;
} pmu2_params;
u32 pmu_pm_set_cfg_cmd_value;
};
#ifdef FUTURE_PATCH
extern int mrst_s0i3_entry(u32 regval, u32 *regaddr);
#else
static inline int mrst_s0i3_entry(u32 regval, u32 *regaddr) { return -1; }
#endif
#endif
......@@ -9,6 +9,7 @@
#include <linux/mm.h>
#include <linux/pm.h>
#include <linux/memblock.h>
#include <linux/cpuidle.h>
#include <asm/elf.h>
#include <asm/vdso.h>
......@@ -426,7 +427,7 @@ void __init xen_arch_setup(void)
#ifdef CONFIG_X86_32
boot_cpu_data.hlt_works_ok = 1;
#endif
pm_idle = default_idle;
disable_cpuidle();
boot_option_idle_override = IDLE_HALT;
fiddle_vdso();
......
......@@ -25,9 +25,19 @@ DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
DEFINE_MUTEX(cpuidle_lock);
LIST_HEAD(cpuidle_detected_devices);
static void (*pm_idle_old)(void);
static int enabled_devices;
static int off __read_mostly;
static int initialized __read_mostly;
int cpuidle_disabled(void)
{
return off;
}
void disable_cpuidle(void)
{
off = 1;
}
#if defined(CONFIG_ARCH_HAS_CPU_IDLE_WAIT)
static void cpuidle_kick_cpus(void)
......@@ -46,25 +56,23 @@ static int __cpuidle_register_device(struct cpuidle_device *dev);
* cpuidle_idle_call - the main idle loop
*
* NOTE: no locks or semaphores should be used here
* return non-zero on failure
*/
static void cpuidle_idle_call(void)
int cpuidle_idle_call(void)
{
struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
struct cpuidle_state *target_state;
int next_state;
if (off)
return -ENODEV;
if (!initialized)
return -ENODEV;
/* check if the device is ready */
if (!dev || !dev->enabled) {
if (pm_idle_old)
pm_idle_old();
else
#if defined(CONFIG_ARCH_HAS_DEFAULT_IDLE)
default_idle();
#else
local_irq_enable();
#endif
return;
}
if (!dev || !dev->enabled)
return -EBUSY;
#if 0
/* shows regressions, re-enable for 2.6.29 */
......@@ -89,7 +97,7 @@ static void cpuidle_idle_call(void)
next_state = cpuidle_curr_governor->select(dev);
if (need_resched()) {
local_irq_enable();
return;
return 0;
}
target_state = &dev->states[next_state];
......@@ -114,6 +122,8 @@ static void cpuidle_idle_call(void)
/* give the governor an opportunity to reflect on the outcome */
if (cpuidle_curr_governor->reflect)
cpuidle_curr_governor->reflect(dev);
return 0;
}
/**
......@@ -121,10 +131,10 @@ static void cpuidle_idle_call(void)
*/
void cpuidle_install_idle_handler(void)
{
if (enabled_devices && (pm_idle != cpuidle_idle_call)) {
if (enabled_devices) {
/* Make sure all changes finished before we switch to new idle */
smp_wmb();
pm_idle = cpuidle_idle_call;
initialized = 1;
}
}
......@@ -133,8 +143,8 @@ void cpuidle_install_idle_handler(void)
*/
void cpuidle_uninstall_idle_handler(void)
{
if (enabled_devices && pm_idle_old && (pm_idle != pm_idle_old)) {
pm_idle = pm_idle_old;
if (enabled_devices) {
initialized = 0;
cpuidle_kick_cpus();
}
}
......@@ -427,7 +437,8 @@ static int __init cpuidle_init(void)
{
int ret;
pm_idle_old = pm_idle;
if (cpuidle_disabled())
return -ENODEV;
ret = cpuidle_add_class_sysfs(&cpu_sysdev_class);
if (ret)
......@@ -438,4 +449,5 @@ static int __init cpuidle_init(void)
return 0;
}
module_param(off, int, 0444);
core_initcall(cpuidle_init);
......@@ -13,6 +13,7 @@ extern struct list_head cpuidle_governors;
extern struct list_head cpuidle_detected_devices;
extern struct mutex cpuidle_lock;
extern spinlock_t cpuidle_driver_lock;
extern int cpuidle_disabled(void);
/* idle loop */
extern void cpuidle_install_idle_handler(void);
......
......@@ -26,6 +26,9 @@ int cpuidle_register_driver(struct cpuidle_driver *drv)
if (!drv)
return -EINVAL;
if (cpuidle_disabled())
return -ENODEV;
spin_lock(&cpuidle_driver_lock);
if (cpuidle_curr_driver) {
spin_unlock(&cpuidle_driver_lock);
......
......@@ -81,6 +81,9 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
if (!gov || !gov->select)
return -EINVAL;
if (cpuidle_disabled())
return -ENODEV;
mutex_lock(&cpuidle_lock);
if (__cpuidle_find_governor(gov->name) == NULL) {
ret = 0;
......
......@@ -122,6 +122,8 @@ struct cpuidle_driver {
};
#ifdef CONFIG_CPU_IDLE
extern void disable_cpuidle(void);
extern int cpuidle_idle_call(void);
extern int cpuidle_register_driver(struct cpuidle_driver *drv);
struct cpuidle_driver *cpuidle_get_driver(void);
......@@ -135,6 +137,8 @@ extern int cpuidle_enable_device(struct cpuidle_device *dev);
extern void cpuidle_disable_device(struct cpuidle_device *dev);
#else
static inline void disable_cpuidle(void) { }
static inline int cpuidle_idle_call(void) { return -ENODEV; }
static inline int cpuidle_register_driver(struct cpuidle_driver *drv)
{return -ENODEV; }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册