提交 080e4168 编写于 作者: L Linus Torvalds

Merge tag 'pm-extra-4.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm

Pull more power management updates deom Rafael Wysocki:
 "These fix two bugs introduced by recent power management updates (in
  the cpuidle menu governor and intel_pstate) and a few other issues,
  clean up things and remove unused code.

  Specifics:

   - Fix for a cpuidle menu governor problem that started to take an
     unnecessary spinlock after one of the recent updates and that did
     not play well with the RT patch (Rafael Wysocki).

   - Fix for the new intel_pstate operation mode switching feature added
     recently that did not reinitialize P-state limits properly when
     switching operation modes (Rafael Wysocki).

   - Removal of unused global notifiers from the PM QoS framework
     (Viresh Kumar).

   - Generic power domains framework update to make it handle
     asynchronous invocations of PM callbacks in the "noirq" phases of
     system suspend/hibernation correctly (Ulf Hansson).

   - Two hibernation core cleanups (Rafael Wysocki).

   - intel_idle cleanup related to the sysfs interface (Len Brown).

   - Off-by-one bug fix in the OPP (Operating Performance Points)
     framework (Andrzej Hajda).

   - OPP framework's documentation fix (Viresh Kumar).

   - cpufreq qoriq driver cleanup (Tang Yuantian).

   - Fixes for typos in comments in the device runtime PM framework
     (Christophe Jaillet)"

* tag 'pm-extra-4.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
  PM / OPP: Documentation: Fix opp-microvolt in examples
  intel_idle: stop exposing platform acronyms in sysfs
  cpufreq: intel_pstate: Fix limits issue with operation mode switching
  PM / hibernate: Define pr_fmt() and use pr_*() instead of printk()
  PM / hibernate: Untangle power_down()
  cpuidle: menu: Avoid taking spinlock for accessing QoS values
  PM / QoS: Remove global notifiers
  PM / runtime: Fix some typos
  cpufreq: qoriq: clean up unused code
  PM / OPP: fix off-by-one bug in dev_pm_opp_get_max_volt_latency loop
  PM / Domains: Power off masters immediately in the power off sequence
  PM / Domains: Rename is_async to one_dev_on for genpd_power_off()
  PM / Domains: Move genpd_power_off() above genpd_power_on()
......@@ -188,14 +188,14 @@ Example 1: Single cluster Dual-core ARM cortex A9, switch DVFS states together.
opp@1000000000 {
opp-hz = /bits/ 64 <1000000000>;
opp-microvolt = <970000 975000 985000>;
opp-microvolt = <975000 970000 985000>;
opp-microamp = <70000>;
clock-latency-ns = <300000>;
opp-suspend;
};
opp@1100000000 {
opp-hz = /bits/ 64 <1100000000>;
opp-microvolt = <980000 1000000 1010000>;
opp-microvolt = <1000000 980000 1010000>;
opp-microamp = <80000>;
clock-latency-ns = <310000>;
};
......@@ -267,14 +267,14 @@ independently.
opp@1000000000 {
opp-hz = /bits/ 64 <1000000000>;
opp-microvolt = <970000 975000 985000>;
opp-microvolt = <975000 970000 985000>;
opp-microamp = <70000>;
clock-latency-ns = <300000>;
opp-suspend;
};
opp@1100000000 {
opp-hz = /bits/ 64 <1100000000>;
opp-microvolt = <980000 1000000 1010000>;
opp-microvolt = <1000000 980000 1010000>;
opp-microamp = <80000>;
clock-latency-ns = <310000>;
};
......@@ -343,14 +343,14 @@ DVFS state together.
opp@1000000000 {
opp-hz = /bits/ 64 <1000000000>;
opp-microvolt = <970000 975000 985000>;
opp-microvolt = <975000 970000 985000>;
opp-microamp = <70000>;
clock-latency-ns = <300000>;
opp-suspend;
};
opp@1100000000 {
opp-hz = /bits/ 64 <1100000000>;
opp-microvolt = <980000 1000000 1010000>;
opp-microvolt = <1000000 980000 1010000>;
opp-microamp = <80000>;
clock-latency-ns = <310000>;
};
......@@ -369,7 +369,7 @@ DVFS state together.
opp@1300000000 {
opp-hz = /bits/ 64 <1300000000>;
opp-microvolt = <1045000 1050000 1055000>;
opp-microvolt = <1050000 1045000 1055000>;
opp-microamp = <95000>;
clock-latency-ns = <400000>;
opp-suspend;
......@@ -382,7 +382,7 @@ DVFS state together.
};
opp@1500000000 {
opp-hz = /bits/ 64 <1500000000>;
opp-microvolt = <1010000 1100000 1110000>;
opp-microvolt = <1100000 1010000 1110000>;
opp-microamp = <95000>;
clock-latency-ns = <400000>;
turbo-mode;
......@@ -424,9 +424,9 @@ Example 4: Handling multiple regulators
opp@1000000000 {
opp-hz = /bits/ 64 <1000000000>;
opp-microvolt = <970000 975000 985000>, /* Supply 0 */
<960000 965000 975000>, /* Supply 1 */
<960000 965000 975000>; /* Supply 2 */
opp-microvolt = <975000 970000 985000>, /* Supply 0 */
<965000 960000 975000>, /* Supply 1 */
<965000 960000 975000>; /* Supply 2 */
opp-microamp = <70000>, /* Supply 0 */
<70000>, /* Supply 1 */
<70000>; /* Supply 2 */
......@@ -437,9 +437,9 @@ Example 4: Handling multiple regulators
opp@1000000000 {
opp-hz = /bits/ 64 <1000000000>;
opp-microvolt = <970000 975000 985000>, /* Supply 0 */
<960000 965000 975000>, /* Supply 1 */
<960000 965000 975000>; /* Supply 2 */
opp-microvolt = <975000 970000 985000>, /* Supply 0 */
<965000 960000 975000>, /* Supply 1 */
<965000 960000 975000>; /* Supply 2 */
opp-microamp = <70000>, /* Supply 0 */
<0>, /* Supply 1 doesn't need this */
<70000>; /* Supply 2 */
......@@ -474,7 +474,7 @@ Example 5: opp-supported-hw
*/
opp-supported-hw = <0xF 0xFFFFFFFF 0xFFFFFFFF>
opp-hz = /bits/ 64 <600000000>;
opp-microvolt = <900000 915000 925000>;
opp-microvolt = <915000 900000 925000>;
...
};
......@@ -487,7 +487,7 @@ Example 5: opp-supported-hw
*/
opp-supported-hw = <0x20 0xff0000ff 0x0000f4f0>
opp-hz = /bits/ 64 <800000000>;
opp-microvolt = <900000 915000 925000>;
opp-microvolt = <915000 900000 925000>;
...
};
};
......@@ -512,18 +512,18 @@ Example 6: opp-microvolt-<name>, opp-microamp-<name>:
opp@1000000000 {
opp-hz = /bits/ 64 <1000000000>;
opp-microvolt-slow = <900000 915000 925000>;
opp-microvolt-fast = <970000 975000 985000>;
opp-microvolt-slow = <915000 900000 925000>;
opp-microvolt-fast = <975000 970000 985000>;
opp-microamp-slow = <70000>;
opp-microamp-fast = <71000>;
};
opp@1200000000 {
opp-hz = /bits/ 64 <1200000000>;
opp-microvolt-slow = <900000 915000 925000>, /* Supply vcc0 */
<910000 925000 935000>; /* Supply vcc1 */
opp-microvolt-fast = <970000 975000 985000>, /* Supply vcc0 */
<960000 965000 975000>; /* Supply vcc1 */
opp-microvolt-slow = <915000 900000 925000>, /* Supply vcc0 */
<925000 910000 935000>; /* Supply vcc1 */
opp-microvolt-fast = <975000 970000 985000>, /* Supply vcc0 */
<965000 960000 975000>; /* Supply vcc1 */
opp-microamp = <70000>; /* Will be used for both slow/fast */
};
};
......
......@@ -163,8 +163,7 @@ of flags and remove sysfs attributes pm_qos_no_power_off and pm_qos_remote_wakeu
under the device's power directory.
Notification mechanisms:
The per-device PM QoS framework has 2 different and distinct notification trees:
a per-device notification tree and a global notification tree.
The per-device PM QoS framework has a per-device notification tree.
int dev_pm_qos_add_notifier(device, notifier):
Adds a notification callback function for the device.
......@@ -174,16 +173,6 @@ is changed (for resume latency device PM QoS only).
int dev_pm_qos_remove_notifier(device, notifier):
Removes the notification callback function for the device.
int dev_pm_qos_add_global_notifier(notifier):
Adds a notification callback function in the global notification tree of the
framework.
The callback is called when the aggregated value for any device is changed
(for resume latency device PM QoS only).
int dev_pm_qos_remove_global_notifier(notifier):
Removes the notification callback function from the global notification tree
of the framework.
Active state latency tolerance
......
......@@ -100,7 +100,7 @@ knows what to do to handle the device).
* If the suspend callback returns an error code different from -EBUSY and
-EAGAIN, the PM core regards this as a fatal error and will refuse to run
the helper functions described in Section 4 for the device until its status
is directly set to either'active', or 'suspended' (the PM core provides
is directly set to either 'active', or 'suspended' (the PM core provides
special helper functions for this purpose).
In particular, if the driver requires remote wakeup capability (i.e. hardware
......@@ -217,7 +217,7 @@ defined in include/linux/pm.h:
one to complete
spinlock_t lock;
- lock used for synchronisation
- lock used for synchronization
atomic_t usage_count;
- the usage counter of the device
......@@ -565,7 +565,7 @@ appropriate to ensure that the device is not put back to sleep during the
probe. This can happen with systems such as the network device layer.
It may be desirable to suspend the device once ->probe() has finished.
Therefore the driver core uses the asyncronous pm_request_idle() to submit a
Therefore the driver core uses the asynchronous pm_request_idle() to submit a
request to execute the subsystem-level idle callback for the device at that
time. A driver that makes use of the runtime autosuspend feature, may want to
update the last busy mark before returning from ->probe().
......
......@@ -273,6 +273,93 @@ static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
queue_work(pm_wq, &genpd->power_off_work);
}
/**
* genpd_power_off - Remove power from a given PM domain.
* @genpd: PM domain to power down.
* @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the
* RPM status of the releated device is in an intermediate state, not yet turned
* into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not
* be RPM_SUSPENDED, while it tries to power off the PM domain.
*
* If all of the @genpd's devices have been suspended and all of its subdomains
* have been powered down, remove power from @genpd.
*/
static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
unsigned int depth)
{
struct pm_domain_data *pdd;
struct gpd_link *link;
unsigned int not_suspended = 0;
/*
* Do not try to power off the domain in the following situations:
* (1) The domain is already in the "power off" state.
* (2) System suspend is in progress.
*/
if (genpd->status == GPD_STATE_POWER_OFF
|| genpd->prepared_count > 0)
return 0;
if (atomic_read(&genpd->sd_count) > 0)
return -EBUSY;
list_for_each_entry(pdd, &genpd->dev_list, list_node) {
enum pm_qos_flags_status stat;
stat = dev_pm_qos_flags(pdd->dev,
PM_QOS_FLAG_NO_POWER_OFF
| PM_QOS_FLAG_REMOTE_WAKEUP);
if (stat > PM_QOS_FLAGS_NONE)
return -EBUSY;
/*
* Do not allow PM domain to be powered off, when an IRQ safe
* device is part of a non-IRQ safe domain.
*/
if (!pm_runtime_suspended(pdd->dev) ||
irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd))
not_suspended++;
}
if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
return -EBUSY;
if (genpd->gov && genpd->gov->power_down_ok) {
if (!genpd->gov->power_down_ok(&genpd->domain))
return -EAGAIN;
}
if (genpd->power_off) {
int ret;
if (atomic_read(&genpd->sd_count) > 0)
return -EBUSY;
/*
* If sd_count > 0 at this point, one of the subdomains hasn't
* managed to call genpd_power_on() for the master yet after
* incrementing it. In that case genpd_power_on() will wait
* for us to drop the lock, so we can call .power_off() and let
* the genpd_power_on() restore power for us (this shouldn't
* happen very often).
*/
ret = _genpd_power_off(genpd, true);
if (ret)
return ret;
}
genpd->status = GPD_STATE_POWER_OFF;
list_for_each_entry(link, &genpd->slave_links, slave_node) {
genpd_sd_counter_dec(link->master);
genpd_lock_nested(link->master, depth + 1);
genpd_power_off(link->master, false, depth + 1);
genpd_unlock(link->master);
}
return 0;
}
/**
* genpd_power_on - Restore power to a given PM domain and its masters.
* @genpd: PM domain to power up.
......@@ -321,7 +408,9 @@ static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
&genpd->slave_links,
slave_node) {
genpd_sd_counter_dec(link->master);
genpd_queue_power_off_work(link->master);
genpd_lock_nested(link->master, depth + 1);
genpd_power_off(link->master, false, depth + 1);
genpd_unlock(link->master);
}
return ret;
......@@ -367,87 +456,6 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
return NOTIFY_DONE;
}
/**
* genpd_power_off - Remove power from a given PM domain.
* @genpd: PM domain to power down.
* @is_async: PM domain is powered down from a scheduled work
*
* If all of the @genpd's devices have been suspended and all of its subdomains
* have been powered down, remove power from @genpd.
*/
static int genpd_power_off(struct generic_pm_domain *genpd, bool is_async)
{
struct pm_domain_data *pdd;
struct gpd_link *link;
unsigned int not_suspended = 0;
/*
* Do not try to power off the domain in the following situations:
* (1) The domain is already in the "power off" state.
* (2) System suspend is in progress.
*/
if (genpd->status == GPD_STATE_POWER_OFF
|| genpd->prepared_count > 0)
return 0;
if (atomic_read(&genpd->sd_count) > 0)
return -EBUSY;
list_for_each_entry(pdd, &genpd->dev_list, list_node) {
enum pm_qos_flags_status stat;
stat = dev_pm_qos_flags(pdd->dev,
PM_QOS_FLAG_NO_POWER_OFF
| PM_QOS_FLAG_REMOTE_WAKEUP);
if (stat > PM_QOS_FLAGS_NONE)
return -EBUSY;
/*
* Do not allow PM domain to be powered off, when an IRQ safe
* device is part of a non-IRQ safe domain.
*/
if (!pm_runtime_suspended(pdd->dev) ||
irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd))
not_suspended++;
}
if (not_suspended > 1 || (not_suspended == 1 && is_async))
return -EBUSY;
if (genpd->gov && genpd->gov->power_down_ok) {
if (!genpd->gov->power_down_ok(&genpd->domain))
return -EAGAIN;
}
if (genpd->power_off) {
int ret;
if (atomic_read(&genpd->sd_count) > 0)
return -EBUSY;
/*
* If sd_count > 0 at this point, one of the subdomains hasn't
* managed to call genpd_power_on() for the master yet after
* incrementing it. In that case genpd_power_on() will wait
* for us to drop the lock, so we can call .power_off() and let
* the genpd_power_on() restore power for us (this shouldn't
* happen very often).
*/
ret = _genpd_power_off(genpd, true);
if (ret)
return ret;
}
genpd->status = GPD_STATE_POWER_OFF;
list_for_each_entry(link, &genpd->slave_links, slave_node) {
genpd_sd_counter_dec(link->master);
genpd_queue_power_off_work(link->master);
}
return 0;
}
/**
* genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
* @work: Work structure used for scheduling the execution of this function.
......@@ -459,7 +467,7 @@ static void genpd_power_off_work_fn(struct work_struct *work)
genpd = container_of(work, struct generic_pm_domain, power_off_work);
genpd_lock(genpd);
genpd_power_off(genpd, true);
genpd_power_off(genpd, false, 0);
genpd_unlock(genpd);
}
......@@ -578,7 +586,7 @@ static int genpd_runtime_suspend(struct device *dev)
return 0;
genpd_lock(genpd);
genpd_power_off(genpd, false);
genpd_power_off(genpd, true, 0);
genpd_unlock(genpd);
return 0;
......@@ -658,7 +666,7 @@ static int genpd_runtime_resume(struct device *dev)
if (!pm_runtime_is_irq_safe(dev) ||
(pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) {
genpd_lock(genpd);
genpd_power_off(genpd, 0);
genpd_power_off(genpd, true, 0);
genpd_unlock(genpd);
}
......
......@@ -231,7 +231,8 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
* The caller needs to ensure that opp_table (and hence the regulator)
* isn't freed, while we are executing this routine.
*/
for (i = 0; reg = regulators[i], i < count; i++) {
for (i = 0; i < count; i++) {
reg = regulators[i];
ret = regulator_set_voltage_time(reg, uV[i].min, uV[i].max);
if (ret > 0)
latency_ns += ret * 1000;
......
......@@ -17,12 +17,9 @@
*
* This QoS design is best effort based. Dependents register their QoS needs.
* Watchers register to keep track of the current QoS needs of the system.
* Watchers can register different types of notification callbacks:
* . a per-device notification callback using the dev_pm_qos_*_notifier API.
* The notification chain data is stored in the per-device constraint
* data struct.
* . a system-wide notification callback using the dev_pm_qos_*_global_notifier
* API. The notification chain data is stored in a static variable.
* Watchers can register a per-device notification callback using the
* dev_pm_qos_*_notifier API. The notification chain data is stored in the
* per-device constraint data struct.
*
* Note about the per-device constraint data struct allocation:
* . The per-device constraints data struct ptr is tored into the device
......@@ -49,8 +46,6 @@
static DEFINE_MUTEX(dev_pm_qos_mtx);
static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx);
static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers);
/**
* __dev_pm_qos_flags - Check PM QoS flags for a given device.
* @dev: Device to check the PM QoS flags for.
......@@ -108,8 +103,7 @@ s32 __dev_pm_qos_read_value(struct device *dev)
{
lockdep_assert_held(&dev->power.lock);
return IS_ERR_OR_NULL(dev->power.qos) ?
0 : pm_qos_read_value(&dev->power.qos->resume_latency);
return dev_pm_qos_raw_read_value(dev);
}
/**
......@@ -135,8 +129,7 @@ s32 dev_pm_qos_read_value(struct device *dev)
* @value: Value to assign to the QoS request.
*
* Internal function to update the constraints list using the PM QoS core
* code and if needed call the per-device and the global notification
* callbacks
* code and if needed call the per-device callbacks.
*/
static int apply_constraint(struct dev_pm_qos_request *req,
enum pm_qos_req_action action, s32 value)
......@@ -148,12 +141,6 @@ static int apply_constraint(struct dev_pm_qos_request *req,
case DEV_PM_QOS_RESUME_LATENCY:
ret = pm_qos_update_target(&qos->resume_latency,
&req->data.pnode, action, value);
if (ret) {
value = pm_qos_read_value(&qos->resume_latency);
blocking_notifier_call_chain(&dev_pm_notifiers,
(unsigned long)value,
req);
}
break;
case DEV_PM_QOS_LATENCY_TOLERANCE:
ret = pm_qos_update_target(&qos->latency_tolerance,
......@@ -535,36 +522,6 @@ int dev_pm_qos_remove_notifier(struct device *dev,
}
EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
/**
* dev_pm_qos_add_global_notifier - sets notification entry for changes to
* target value of the PM QoS constraints for any device
*
* @notifier: notifier block managed by caller.
*
* Will register the notifier into a notification chain that gets called
* upon changes to the target value for any device.
*/
int dev_pm_qos_add_global_notifier(struct notifier_block *notifier)
{
return blocking_notifier_chain_register(&dev_pm_notifiers, notifier);
}
EXPORT_SYMBOL_GPL(dev_pm_qos_add_global_notifier);
/**
* dev_pm_qos_remove_global_notifier - deletes notification for changes to
* target value of PM QoS constraints for any device
*
* @notifier: notifier block to be removed.
*
* Will remove the notifier from the notification chain that gets called
* upon changes to the target value for any device.
*/
int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier)
{
return blocking_notifier_chain_unregister(&dev_pm_notifiers, notifier);
}
EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier);
/**
* dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
* @dev: Device whose ancestor to add the request for.
......
......@@ -364,37 +364,25 @@ static bool driver_registered __read_mostly;
static bool acpi_ppc;
#endif
static struct perf_limits performance_limits = {
.no_turbo = 0,
.turbo_disabled = 0,
.max_perf_pct = 100,
.max_perf = int_ext_tofp(1),
.min_perf_pct = 100,
.min_perf = int_ext_tofp(1),
.max_policy_pct = 100,
.max_sysfs_pct = 100,
.min_policy_pct = 0,
.min_sysfs_pct = 0,
};
static struct perf_limits performance_limits;
static struct perf_limits powersave_limits;
static struct perf_limits *limits;
static struct perf_limits powersave_limits = {
.no_turbo = 0,
.turbo_disabled = 0,
.max_perf_pct = 100,
.max_perf = int_ext_tofp(1),
.min_perf_pct = 0,
.min_perf = 0,
.max_policy_pct = 100,
.max_sysfs_pct = 100,
.min_policy_pct = 0,
.min_sysfs_pct = 0,
};
static void intel_pstate_init_limits(struct perf_limits *limits)
{
memset(limits, 0, sizeof(*limits));
limits->max_perf_pct = 100;
limits->max_perf = int_ext_tofp(1);
limits->max_policy_pct = 100;
limits->max_sysfs_pct = 100;
}
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE
static struct perf_limits *limits = &performance_limits;
#else
static struct perf_limits *limits = &powersave_limits;
#endif
static void intel_pstate_set_performance_limits(struct perf_limits *limits)
{
intel_pstate_init_limits(limits);
limits->min_perf_pct = 100;
limits->min_perf = int_ext_tofp(1);
}
static DEFINE_MUTEX(intel_pstate_driver_lock);
static DEFINE_MUTEX(intel_pstate_limits_lock);
......@@ -2084,20 +2072,6 @@ static void intel_pstate_clear_update_util_hook(unsigned int cpu)
synchronize_sched();
}
static void intel_pstate_set_performance_limits(struct perf_limits *limits)
{
limits->no_turbo = 0;
limits->turbo_disabled = 0;
limits->max_perf_pct = 100;
limits->max_perf = int_ext_tofp(1);
limits->min_perf_pct = 100;
limits->min_perf = int_ext_tofp(1);
limits->max_policy_pct = 100;
limits->max_sysfs_pct = 100;
limits->min_policy_pct = 0;
limits->min_sysfs_pct = 0;
}
static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
struct perf_limits *limits)
{
......@@ -2466,6 +2440,11 @@ static int intel_pstate_register_driver(void)
{
int ret;
intel_pstate_init_limits(&powersave_limits);
intel_pstate_set_performance_limits(&performance_limits);
limits = IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE) ?
&performance_limits : &powersave_limits;
ret = cpufreq_register_driver(intel_pstate_driver);
if (ret) {
intel_pstate_driver_cleanup();
......
......@@ -23,10 +23,6 @@
#include <linux/slab.h>
#include <linux/smp.h>
#if !defined(CONFIG_ARM)
#include <asm/smp.h> /* for get_hard_smp_processor_id() in UP configs */
#endif
/**
* struct cpu_data
* @pclk: the parent clock of cpu
......
......@@ -287,7 +287,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
unsigned int interactivity_req;
unsigned int expected_interval;
unsigned long nr_iowaiters, cpu_load;
int resume_latency = dev_pm_qos_read_value(device);
int resume_latency = dev_pm_qos_raw_read_value(device);
if (data->needs_update) {
menu_update(drv, dev);
......
此差异已折叠。
......@@ -146,8 +146,6 @@ int dev_pm_qos_add_notifier(struct device *dev,
struct notifier_block *notifier);
int dev_pm_qos_remove_notifier(struct device *dev,
struct notifier_block *notifier);
int dev_pm_qos_add_global_notifier(struct notifier_block *notifier);
int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier);
void dev_pm_qos_constraints_init(struct device *dev);
void dev_pm_qos_constraints_destroy(struct device *dev);
int dev_pm_qos_add_ancestor_request(struct device *dev,
......@@ -172,6 +170,12 @@ static inline s32 dev_pm_qos_requested_flags(struct device *dev)
{
return dev->power.qos->flags_req->data.flr.flags;
}
static inline s32 dev_pm_qos_raw_read_value(struct device *dev)
{
return IS_ERR_OR_NULL(dev->power.qos) ?
0 : pm_qos_read_value(&dev->power.qos->resume_latency);
}
#else
static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev,
s32 mask)
......@@ -199,12 +203,6 @@ static inline int dev_pm_qos_add_notifier(struct device *dev,
static inline int dev_pm_qos_remove_notifier(struct device *dev,
struct notifier_block *notifier)
{ return 0; }
static inline int dev_pm_qos_add_global_notifier(
struct notifier_block *notifier)
{ return 0; }
static inline int dev_pm_qos_remove_global_notifier(
struct notifier_block *notifier)
{ return 0; }
static inline void dev_pm_qos_constraints_init(struct device *dev)
{
dev->power.power_state = PMSG_ON;
......@@ -236,6 +234,7 @@ static inline void dev_pm_qos_hide_latency_tolerance(struct device *dev) {}
static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) { return 0; }
static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; }
static inline s32 dev_pm_qos_raw_read_value(struct device *dev) { return 0; }
#endif
#endif
......@@ -10,6 +10,8 @@
* This file is released under the GPLv2.
*/
#define pr_fmt(fmt) "PM: " fmt
#include <linux/export.h>
#include <linux/suspend.h>
#include <linux/syscalls.h>
......@@ -104,7 +106,7 @@ EXPORT_SYMBOL(system_entering_hibernation);
#ifdef CONFIG_PM_DEBUG
static void hibernation_debug_sleep(void)
{
printk(KERN_INFO "hibernation debug: Waiting for 5 seconds.\n");
pr_info("hibernation debug: Waiting for 5 seconds.\n");
mdelay(5000);
}
......@@ -250,10 +252,9 @@ void swsusp_show_speed(ktime_t start, ktime_t stop,
centisecs = 1; /* avoid div-by-zero */
k = nr_pages * (PAGE_SIZE / 1024);
kps = (k * 100) / centisecs;
printk(KERN_INFO "PM: %s %u kbytes in %u.%02u seconds (%u.%02u MB/s)\n",
msg, k,
centisecs / 100, centisecs % 100,
kps / 1000, (kps % 1000) / 10);
pr_info("%s %u kbytes in %u.%02u seconds (%u.%02u MB/s)\n",
msg, k, centisecs / 100, centisecs % 100, kps / 1000,
(kps % 1000) / 10);
}
/**
......@@ -271,8 +272,7 @@ static int create_image(int platform_mode)
error = dpm_suspend_end(PMSG_FREEZE);
if (error) {
printk(KERN_ERR "PM: Some devices failed to power down, "
"aborting hibernation\n");
pr_err("Some devices failed to power down, aborting hibernation\n");
return error;
}
......@@ -288,8 +288,7 @@ static int create_image(int platform_mode)
error = syscore_suspend();
if (error) {
printk(KERN_ERR "PM: Some system devices failed to power down, "
"aborting hibernation\n");
pr_err("Some system devices failed to power down, aborting hibernation\n");
goto Enable_irqs;
}
......@@ -304,8 +303,8 @@ static int create_image(int platform_mode)
restore_processor_state();
trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, false);
if (error)
printk(KERN_ERR "PM: Error %d creating hibernation image\n",
error);
pr_err("Error %d creating hibernation image\n", error);
if (!in_suspend) {
events_check_enabled = false;
clear_free_pages();
......@@ -432,8 +431,7 @@ static int resume_target_kernel(bool platform_mode)
error = dpm_suspend_end(PMSG_QUIESCE);
if (error) {
printk(KERN_ERR "PM: Some devices failed to power down, "
"aborting resume\n");
pr_err("Some devices failed to power down, aborting resume\n");
return error;
}
......@@ -608,6 +606,22 @@ static void power_down(void)
{
#ifdef CONFIG_SUSPEND
int error;
if (hibernation_mode == HIBERNATION_SUSPEND) {
error = suspend_devices_and_enter(PM_SUSPEND_MEM);
if (error) {
hibernation_mode = hibernation_ops ?
HIBERNATION_PLATFORM :
HIBERNATION_SHUTDOWN;
} else {
/* Restore swap signature. */
error = swsusp_unmark();
if (error)
pr_err("Swap will be unusable! Try swapon -a.\n");
return;
}
}
#endif
switch (hibernation_mode) {
......@@ -620,32 +634,13 @@ static void power_down(void)
if (pm_power_off)
kernel_power_off();
break;
#ifdef CONFIG_SUSPEND
case HIBERNATION_SUSPEND:
error = suspend_devices_and_enter(PM_SUSPEND_MEM);
if (error) {
if (hibernation_ops)
hibernation_mode = HIBERNATION_PLATFORM;
else
hibernation_mode = HIBERNATION_SHUTDOWN;
power_down();
}
/*
* Restore swap signature.
*/
error = swsusp_unmark();
if (error)
printk(KERN_ERR "PM: Swap will be unusable! "
"Try swapon -a.\n");
return;
#endif
}
kernel_halt();
/*
* Valid image is on the disk, if we continue we risk serious data
* corruption after resume.
*/
printk(KERN_CRIT "PM: Please power down manually\n");
pr_crit("Power down manually\n");
while (1)
cpu_relax();
}
......@@ -655,7 +650,7 @@ static int load_image_and_restore(void)
int error;
unsigned int flags;
pr_debug("PM: Loading hibernation image.\n");
pr_debug("Loading hibernation image.\n");
lock_device_hotplug();
error = create_basic_memory_bitmaps();
......@@ -667,7 +662,7 @@ static int load_image_and_restore(void)
if (!error)
hibernation_restore(flags & SF_PLATFORM_MODE);
printk(KERN_ERR "PM: Failed to load hibernation image, recovering.\n");
pr_err("Failed to load hibernation image, recovering.\n");
swsusp_free();
free_basic_memory_bitmaps();
Unlock:
......@@ -685,7 +680,7 @@ int hibernate(void)
bool snapshot_test = false;
if (!hibernation_available()) {
pr_debug("PM: Hibernation not available.\n");
pr_debug("Hibernation not available.\n");
return -EPERM;
}
......@@ -703,9 +698,9 @@ int hibernate(void)
goto Exit;
}
printk(KERN_INFO "PM: Syncing filesystems ... ");
pr_info("Syncing filesystems ... \n");
sys_sync();
printk("done.\n");
pr_info("done.\n");
error = freeze_processes();
if (error)
......@@ -731,7 +726,7 @@ int hibernate(void)
else
flags |= SF_CRC32_MODE;
pr_debug("PM: writing image.\n");
pr_debug("Writing image.\n");
error = swsusp_write(flags);
swsusp_free();
if (!error) {
......@@ -743,7 +738,7 @@ int hibernate(void)
in_suspend = 0;
pm_restore_gfp_mask();
} else {
pr_debug("PM: Image restored successfully.\n");
pr_debug("Image restored successfully.\n");
}
Free_bitmaps:
......@@ -751,7 +746,7 @@ int hibernate(void)
Thaw:
unlock_device_hotplug();
if (snapshot_test) {
pr_debug("PM: Checking hibernation image\n");
pr_debug("Checking hibernation image\n");
error = swsusp_check();
if (!error)
error = load_image_and_restore();
......@@ -815,10 +810,10 @@ static int software_resume(void)
goto Unlock;
}
pr_debug("PM: Checking hibernation image partition %s\n", resume_file);
pr_debug("Checking hibernation image partition %s\n", resume_file);
if (resume_delay) {
printk(KERN_INFO "Waiting %dsec before reading resume device...\n",
pr_info("Waiting %dsec before reading resume device ...\n",
resume_delay);
ssleep(resume_delay);
}
......@@ -857,10 +852,10 @@ static int software_resume(void)
}
Check_image:
pr_debug("PM: Hibernation image partition %d:%d present\n",
pr_debug("Hibernation image partition %d:%d present\n",
MAJOR(swsusp_resume_device), MINOR(swsusp_resume_device));
pr_debug("PM: Looking for hibernation image.\n");
pr_debug("Looking for hibernation image.\n");
error = swsusp_check();
if (error)
goto Unlock;
......@@ -879,7 +874,7 @@ static int software_resume(void)
goto Close_Finish;
}
pr_debug("PM: Preparing processes for restore.\n");
pr_debug("Preparing processes for restore.\n");
error = freeze_processes();
if (error)
goto Close_Finish;
......@@ -892,7 +887,7 @@ static int software_resume(void)
/* For success case, the suspend path will release the lock */
Unlock:
mutex_unlock(&pm_mutex);
pr_debug("PM: Hibernation image not present or could not be loaded.\n");
pr_debug("Hibernation image not present or could not be loaded.\n");
return error;
Close_Finish:
swsusp_close(FMODE_READ);
......@@ -1016,7 +1011,7 @@ static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr,
error = -EINVAL;
if (!error)
pr_debug("PM: Hibernation mode set to '%s'\n",
pr_debug("Hibernation mode set to '%s'\n",
hibernation_modes[mode]);
unlock_system_sleep();
return error ? error : n;
......@@ -1052,7 +1047,7 @@ static ssize_t resume_store(struct kobject *kobj, struct kobj_attribute *attr,
lock_system_sleep();
swsusp_resume_device = res;
unlock_system_sleep();
printk(KERN_INFO "PM: Starting manual resume from disk\n");
pr_info("Starting manual resume from disk\n");
noresume = 0;
software_resume();
return n;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册