提交 d08d528d 编写于 作者: L Linus Torvalds

Merge tag 'pm+acpi-3.9-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm

Pull ACPI and power management fixes from Rafael Wysocki:

 - Revert of a recent cpuidle change that caused Nehalem machines to
   hang on boot from Alex Shi.

 - USB power management fix addressing a crash in the port device
   object's release routine from Rafael J Wysocki.

 - Device PM QoS fix for a potential deadlock related to sysfs interface
   from Rafael J Wysocki.

 - Fix for a cpufreq crash when the /cpus Device Tree node is missing
   from Paolo Pisati.

 - Fix for a build issue on ia64 related to the Boot Graphics Resource
   Table (BGRT) from Tony Luck.

 - Two fixes for ACPI handles being set incorrectly for device objects
   that don't correspond to any ACPI namespace nodes in the I2C and SPI
   subsystems from Rafael J Wysocki.

 - Fix for compiler warnings related to CONFIG_PM_DEVFREQ being unset
   from Rajagopal Venkat.

 - Fix for a symbol definition typo in cpufreq_governor.h from Borislav
   Petkov.

* tag 'pm+acpi-3.9-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
  ACPI / BGRT: Don't let users configure BGRT on non X86 systems
  cpuidle / ACPI: recover percpu ACPI processor cstate
  ACPI / I2C: Use parent's ACPI_HANDLE() in acpi_i2c_register_devices()
  cpufreq: Correct header guards typo
  ACPI / SPI: Use parent's ACPI_HANDLE() in acpi_register_spi_devices()
  cpufreq: check OF node /cpus presence before dereferencing it
  PM / devfreq: Fix compiler warnings for CONFIG_PM_DEVFREQ unset
  PM / QoS: Avoid possible deadlock related to sysfs access
  USB / PM: Don't try to hide PM QoS flags from usb_port_device_release()
......@@ -396,7 +396,7 @@ config ACPI_CUSTOM_METHOD
config ACPI_BGRT
bool "Boottime Graphics Resource Table support"
depends on EFI
depends on EFI && X86
help
This driver adds support for exposing the ACPI Boottime Graphics
Resource Table, which allows the operating system to obtain
......
......@@ -90,7 +90,7 @@ void acpi_i2c_register_devices(struct i2c_adapter *adapter)
acpi_handle handle;
acpi_status status;
handle = ACPI_HANDLE(&adapter->dev);
handle = ACPI_HANDLE(adapter->dev.parent);
if (!handle)
return;
......
......@@ -66,7 +66,8 @@ module_param(latency_factor, uint, 0644);
static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device);
static struct acpi_processor_cx *acpi_cstate[CPUIDLE_STATE_MAX];
static DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX],
acpi_cstate);
static int disabled_by_idle_boot_param(void)
{
......@@ -722,7 +723,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
struct acpi_processor *pr;
struct acpi_processor_cx *cx = acpi_cstate[index];
struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
pr = __this_cpu_read(processors);
......@@ -745,7 +746,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
*/
static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
{
struct acpi_processor_cx *cx = acpi_cstate[index];
struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
ACPI_FLUSH_CPU_CACHE();
......@@ -775,7 +776,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
struct acpi_processor *pr;
struct acpi_processor_cx *cx = acpi_cstate[index];
struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
pr = __this_cpu_read(processors);
......@@ -833,7 +834,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
struct acpi_processor *pr;
struct acpi_processor_cx *cx = acpi_cstate[index];
struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
pr = __this_cpu_read(processors);
......@@ -960,7 +961,7 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
!(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
continue;
#endif
acpi_cstate[count] = cx;
per_cpu(acpi_cstate[count], dev->cpu) = cx;
count++;
if (count == CPUIDLE_STATE_MAX)
......
......@@ -46,6 +46,7 @@
#include "power.h"
static DEFINE_MUTEX(dev_pm_qos_mtx);
static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx);
static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers);
......@@ -216,12 +217,17 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
struct pm_qos_constraints *c;
struct pm_qos_flags *f;
mutex_lock(&dev_pm_qos_mtx);
mutex_lock(&dev_pm_qos_sysfs_mtx);
/*
* If the device's PM QoS resume latency limit or PM QoS flags have been
* exposed to user space, they have to be hidden at this point.
*/
pm_qos_sysfs_remove_latency(dev);
pm_qos_sysfs_remove_flags(dev);
mutex_lock(&dev_pm_qos_mtx);
__dev_pm_qos_hide_latency_limit(dev);
__dev_pm_qos_hide_flags(dev);
......@@ -254,6 +260,8 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
out:
mutex_unlock(&dev_pm_qos_mtx);
mutex_unlock(&dev_pm_qos_sysfs_mtx);
}
/**
......@@ -558,6 +566,14 @@ static void __dev_pm_qos_drop_user_request(struct device *dev,
kfree(req);
}
static void dev_pm_qos_drop_user_request(struct device *dev,
enum dev_pm_qos_req_type type)
{
mutex_lock(&dev_pm_qos_mtx);
__dev_pm_qos_drop_user_request(dev, type);
mutex_unlock(&dev_pm_qos_mtx);
}
/**
* dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space.
* @dev: Device whose PM QoS latency limit is to be exposed to user space.
......@@ -581,6 +597,8 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
return ret;
}
mutex_lock(&dev_pm_qos_sysfs_mtx);
mutex_lock(&dev_pm_qos_mtx);
if (IS_ERR_OR_NULL(dev->power.qos))
......@@ -591,26 +609,27 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
if (ret < 0) {
__dev_pm_qos_remove_request(req);
kfree(req);
mutex_unlock(&dev_pm_qos_mtx);
goto out;
}
dev->power.qos->latency_req = req;
mutex_unlock(&dev_pm_qos_mtx);
ret = pm_qos_sysfs_add_latency(dev);
if (ret)
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
out:
mutex_unlock(&dev_pm_qos_mtx);
mutex_unlock(&dev_pm_qos_sysfs_mtx);
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
static void __dev_pm_qos_hide_latency_limit(struct device *dev)
{
if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req) {
pm_qos_sysfs_remove_latency(dev);
if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req)
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
}
}
/**
......@@ -619,9 +638,15 @@ static void __dev_pm_qos_hide_latency_limit(struct device *dev)
*/
void dev_pm_qos_hide_latency_limit(struct device *dev)
{
mutex_lock(&dev_pm_qos_sysfs_mtx);
pm_qos_sysfs_remove_latency(dev);
mutex_lock(&dev_pm_qos_mtx);
__dev_pm_qos_hide_latency_limit(dev);
mutex_unlock(&dev_pm_qos_mtx);
mutex_unlock(&dev_pm_qos_sysfs_mtx);
}
EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
......@@ -649,6 +674,8 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val)
}
pm_runtime_get_sync(dev);
mutex_lock(&dev_pm_qos_sysfs_mtx);
mutex_lock(&dev_pm_qos_mtx);
if (IS_ERR_OR_NULL(dev->power.qos))
......@@ -659,16 +686,19 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val)
if (ret < 0) {
__dev_pm_qos_remove_request(req);
kfree(req);
mutex_unlock(&dev_pm_qos_mtx);
goto out;
}
dev->power.qos->flags_req = req;
mutex_unlock(&dev_pm_qos_mtx);
ret = pm_qos_sysfs_add_flags(dev);
if (ret)
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
out:
mutex_unlock(&dev_pm_qos_mtx);
mutex_unlock(&dev_pm_qos_sysfs_mtx);
pm_runtime_put(dev);
return ret;
}
......@@ -676,10 +706,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
static void __dev_pm_qos_hide_flags(struct device *dev)
{
if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req) {
pm_qos_sysfs_remove_flags(dev);
if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req)
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
}
}
/**
......@@ -689,9 +717,15 @@ static void __dev_pm_qos_hide_flags(struct device *dev)
void dev_pm_qos_hide_flags(struct device *dev)
{
pm_runtime_get_sync(dev);
mutex_lock(&dev_pm_qos_sysfs_mtx);
pm_qos_sysfs_remove_flags(dev);
mutex_lock(&dev_pm_qos_mtx);
__dev_pm_qos_hide_flags(dev);
mutex_unlock(&dev_pm_qos_mtx);
mutex_unlock(&dev_pm_qos_sysfs_mtx);
pm_runtime_put(dev);
}
EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
......
......@@ -178,10 +178,16 @@ static struct cpufreq_driver cpu0_cpufreq_driver = {
static int cpu0_cpufreq_probe(struct platform_device *pdev)
{
struct device_node *np;
struct device_node *np, *parent;
int ret;
for_each_child_of_node(of_find_node_by_path("/cpus"), np) {
parent = of_find_node_by_path("/cpus");
if (!parent) {
pr_err("failed to find OF /cpus\n");
return -ENOENT;
}
for_each_child_of_node(parent, np) {
if (of_get_property(np, "operating-points", NULL))
break;
}
......
......@@ -14,8 +14,8 @@
* published by the Free Software Foundation.
*/
#ifndef _CPUFREQ_GOVERNER_H
#define _CPUFREQ_GOVERNER_H
#ifndef _CPUFREQ_GOVERNOR_H
#define _CPUFREQ_GOVERNOR_H
#include <linux/cpufreq.h>
#include <linux/kobject.h>
......@@ -175,4 +175,4 @@ bool need_load_eval(struct cpu_dbs_common_info *cdbs,
unsigned int sampling_rate);
int cpufreq_governor_dbs(struct dbs_data *dbs_data,
struct cpufreq_policy *policy, unsigned int event);
#endif /* _CPUFREQ_GOVERNER_H */
#endif /* _CPUFREQ_GOVERNOR_H */
......@@ -182,7 +182,6 @@ static int dw_i2c_probe(struct platform_device *pdev)
adap->algo = &i2c_dw_algo;
adap->dev.parent = &pdev->dev;
adap->dev.of_node = pdev->dev.of_node;
ACPI_HANDLE_SET(&adap->dev, ACPI_HANDLE(&pdev->dev));
r = i2c_add_numbered_adapter(adap);
if (r) {
......
......@@ -1168,7 +1168,6 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
master->dev.parent = &pdev->dev;
master->dev.of_node = pdev->dev.of_node;
ACPI_HANDLE_SET(&master->dev, ACPI_HANDLE(&pdev->dev));
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
......
......@@ -984,7 +984,7 @@ static void acpi_register_spi_devices(struct spi_master *master)
acpi_status status;
acpi_handle handle;
handle = ACPI_HANDLE(&master->dev);
handle = ACPI_HANDLE(master->dev.parent);
if (!handle)
return;
......
......@@ -67,7 +67,6 @@ static void usb_port_device_release(struct device *dev)
{
struct usb_port *port_dev = to_usb_port(dev);
dev_pm_qos_hide_flags(dev);
kfree(port_dev);
}
......
......@@ -213,7 +213,7 @@ struct devfreq_simple_ondemand_data {
#endif
#else /* !CONFIG_PM_DEVFREQ */
static struct devfreq *devfreq_add_device(struct device *dev,
static inline struct devfreq *devfreq_add_device(struct device *dev,
struct devfreq_dev_profile *profile,
const char *governor_name,
void *data)
......@@ -221,34 +221,34 @@ static struct devfreq *devfreq_add_device(struct device *dev,
return NULL;
}
static int devfreq_remove_device(struct devfreq *devfreq)
static inline int devfreq_remove_device(struct devfreq *devfreq)
{
return 0;
}
static int devfreq_suspend_device(struct devfreq *devfreq)
static inline int devfreq_suspend_device(struct devfreq *devfreq)
{
return 0;
}
static int devfreq_resume_device(struct devfreq *devfreq)
static inline int devfreq_resume_device(struct devfreq *devfreq)
{
return 0;
}
static struct opp *devfreq_recommended_opp(struct device *dev,
static inline struct opp *devfreq_recommended_opp(struct device *dev,
unsigned long *freq, u32 flags)
{
return -EINVAL;
return ERR_PTR(-EINVAL);
}
static int devfreq_register_opp_notifier(struct device *dev,
static inline int devfreq_register_opp_notifier(struct device *dev,
struct devfreq *devfreq)
{
return -EINVAL;
}
static int devfreq_unregister_opp_notifier(struct device *dev,
static inline int devfreq_unregister_opp_notifier(struct device *dev,
struct devfreq *devfreq)
{
return -EINVAL;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册