提交 ab232ba5 编写于 作者: R Rafael J. Wysocki

Merge branches 'pm-sleep' and 'pm-runtime'

* pm-sleep:
  PM / sleep: trace_device_pm_callback coverage in dpm_prepare/complete
  PM / wakeup: add a dummy wakeup_source to record statistics
  PM / sleep: Make suspend-to-idle-specific code depend on CONFIG_SUSPEND
  PM / sleep: Return -EBUSY from suspend_enter() on wakeup detection
  PM / tick: Add tracepoints for suspend-to-idle diagnostics
  PM / sleep: Fix symbol name in a comment in kernel/power/main.c
  leds / PM: fix hibernation on arm when gpio-led used with CPU led trigger
  ARM: omap-device: use SET_NOIRQ_SYSTEM_SLEEP_PM_OPS
  bus: omap_l3_noc: add missed callbacks for suspend-to-disk
  PM / sleep: Add macro to define common noirq system PM callbacks
  PM / sleep: Refine diagnostic messages in enter_state()
  PM / wakeup: validate wakeup source before activating it.

* pm-runtime:
  PM / Runtime: Update last_busy in rpm_resume
  PM / runtime: add note about re-calling in during device probe()
......@@ -556,6 +556,12 @@ helper functions described in Section 4. In that case, pm_runtime_resume()
should be used. Of course, for this purpose the device's runtime PM has to be
enabled earlier by calling pm_runtime_enable().
Note, if the device may execute pm_runtime calls during the probe (such as
if it is registers with a subsystem that may call back in) then the
pm_runtime_get_sync() call paired with a pm_runtime_put() call will be
appropriate to ensure that the device is not put back to sleep during the
probe. This can happen with systems such as the network device layer.
It may be desirable to suspend the device once ->probe() has finished.
Therefore the driver core uses the asyncronous pm_request_idle() to submit a
request to execute the subsystem-level idle callback for the device at that
......
......@@ -688,11 +688,8 @@ struct dev_pm_domain omap_device_pm_domain = {
SET_RUNTIME_PM_OPS(_od_runtime_suspend, _od_runtime_resume,
NULL)
USE_PLATFORM_PM_SLEEP_OPS
.suspend_noirq = _od_suspend_noirq,
.resume_noirq = _od_resume_noirq,
.freeze_noirq = _od_suspend_noirq,
.thaw_noirq = _od_resume_noirq,
.restore_noirq = _od_resume_noirq,
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(_od_suspend_noirq,
_od_resume_noirq)
}
};
......
......@@ -920,9 +920,7 @@ static void device_complete(struct device *dev, pm_message_t state)
if (callback) {
pm_dev_dbg(dev, state, info);
trace_device_pm_callback_start(dev, info, state.event);
callback(dev);
trace_device_pm_callback_end(dev, 0);
}
device_unlock(dev);
......@@ -954,7 +952,9 @@ void dpm_complete(pm_message_t state)
list_move(&dev->power.entry, &list);
mutex_unlock(&dpm_list_mtx);
trace_device_pm_callback_start(dev, "", state.event);
device_complete(dev, state);
trace_device_pm_callback_end(dev, 0);
mutex_lock(&dpm_list_mtx);
put_device(dev);
......@@ -1585,11 +1585,8 @@ static int device_prepare(struct device *dev, pm_message_t state)
callback = dev->driver->pm->prepare;
}
if (callback) {
trace_device_pm_callback_start(dev, info, state.event);
if (callback)
ret = callback(dev);
trace_device_pm_callback_end(dev, ret);
}
device_unlock(dev);
......@@ -1631,7 +1628,9 @@ int dpm_prepare(pm_message_t state)
get_device(dev);
mutex_unlock(&dpm_list_mtx);
trace_device_pm_callback_start(dev, "", state.event);
error = device_prepare(dev, state);
trace_device_pm_callback_end(dev, error);
mutex_lock(&dpm_list_mtx);
if (error) {
......
......@@ -741,6 +741,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
} else {
no_callback:
__update_runtime_status(dev, RPM_ACTIVE);
pm_runtime_mark_last_busy(dev);
if (parent)
atomic_inc(&parent->power.child_count);
}
......
......@@ -56,6 +56,11 @@ static LIST_HEAD(wakeup_sources);
static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue);
static struct wakeup_source deleted_ws = {
.name = "deleted",
.lock = __SPIN_LOCK_UNLOCKED(deleted_ws.lock),
};
/**
* wakeup_source_prepare - Prepare a new wakeup source for initialization.
* @ws: Wakeup source to prepare.
......@@ -107,6 +112,34 @@ void wakeup_source_drop(struct wakeup_source *ws)
}
EXPORT_SYMBOL_GPL(wakeup_source_drop);
/*
* Record wakeup_source statistics being deleted into a dummy wakeup_source.
*/
static void wakeup_source_record(struct wakeup_source *ws)
{
unsigned long flags;
spin_lock_irqsave(&deleted_ws.lock, flags);
if (ws->event_count) {
deleted_ws.total_time =
ktime_add(deleted_ws.total_time, ws->total_time);
deleted_ws.prevent_sleep_time =
ktime_add(deleted_ws.prevent_sleep_time,
ws->prevent_sleep_time);
deleted_ws.max_time =
ktime_compare(deleted_ws.max_time, ws->max_time) > 0 ?
deleted_ws.max_time : ws->max_time;
deleted_ws.event_count += ws->event_count;
deleted_ws.active_count += ws->active_count;
deleted_ws.relax_count += ws->relax_count;
deleted_ws.expire_count += ws->expire_count;
deleted_ws.wakeup_count += ws->wakeup_count;
}
spin_unlock_irqrestore(&deleted_ws.lock, flags);
}
/**
* wakeup_source_destroy - Destroy a struct wakeup_source object.
* @ws: Wakeup source to destroy.
......@@ -119,6 +152,7 @@ void wakeup_source_destroy(struct wakeup_source *ws)
return;
wakeup_source_drop(ws);
wakeup_source_record(ws);
kfree(ws->name);
kfree(ws);
}
......@@ -351,6 +385,20 @@ int device_set_wakeup_enable(struct device *dev, bool enable)
}
EXPORT_SYMBOL_GPL(device_set_wakeup_enable);
/**
* wakeup_source_not_registered - validate the given wakeup source.
* @ws: Wakeup source to be validated.
*/
static bool wakeup_source_not_registered(struct wakeup_source *ws)
{
/*
* Use timer struct to check if the given source is initialized
* by wakeup_source_add.
*/
return ws->timer.function != pm_wakeup_timer_fn ||
ws->timer.data != (unsigned long)ws;
}
/*
* The functions below use the observation that each wakeup event starts a
* period in which the system should not be suspended. The moment this period
......@@ -391,6 +439,10 @@ static void wakeup_source_activate(struct wakeup_source *ws)
{
unsigned int cec;
if (WARN_ONCE(wakeup_source_not_registered(ws),
"unregistered wakeup source\n"))
return;
/*
* active wakeup source should bring the system
* out of PM_SUSPEND_FREEZE state
......@@ -894,6 +946,8 @@ static int wakeup_sources_stats_show(struct seq_file *m, void *unused)
print_wakeup_source_stats(m, ws);
rcu_read_unlock();
print_wakeup_source_stats(m, &deleted_ws);
return 0;
}
......
......@@ -301,7 +301,7 @@ static int omap_l3_probe(struct platform_device *pdev)
return ret;
}
#ifdef CONFIG_PM
#ifdef CONFIG_PM_SLEEP
/**
* l3_resume_noirq() - resume function for l3_noc
......@@ -347,7 +347,7 @@ static int l3_resume_noirq(struct device *dev)
}
static const struct dev_pm_ops l3_dev_pm_ops = {
.resume_noirq = l3_resume_noirq,
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, l3_resume_noirq)
};
#define L3_DEV_PM_OPS (&l3_dev_pm_ops)
......
......@@ -97,6 +97,7 @@ static int find_deepest_state(struct cpuidle_driver *drv,
return ret;
}
#ifdef CONFIG_SUSPEND
/**
* cpuidle_find_deepest_state - Find the deepest available idle state.
* @drv: cpuidle driver for the given CPU.
......@@ -150,6 +151,7 @@ int cpuidle_enter_freeze(struct cpuidle_driver *drv, struct cpuidle_device *dev)
return index;
}
#endif /* CONFIG_SUSPEND */
/**
* cpuidle_enter_state - enter the state and update stats
......
......@@ -187,6 +187,7 @@ void led_classdev_resume(struct led_classdev *led_cdev)
}
EXPORT_SYMBOL_GPL(led_classdev_resume);
#ifdef CONFIG_PM_SLEEP
static int led_suspend(struct device *dev)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
......@@ -206,11 +207,9 @@ static int led_resume(struct device *dev)
return 0;
}
#endif
static const struct dev_pm_ops leds_class_dev_pm_ops = {
.suspend = led_suspend,
.resume = led_resume,
};
static SIMPLE_DEV_PM_OPS(leds_class_dev_pm_ops, led_suspend, led_resume);
static int match_name(struct device *dev, const void *data)
{
......
......@@ -151,10 +151,6 @@ extern void cpuidle_resume(void);
extern int cpuidle_enable_device(struct cpuidle_device *dev);
extern void cpuidle_disable_device(struct cpuidle_device *dev);
extern int cpuidle_play_dead(void);
extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev);
extern int cpuidle_enter_freeze(struct cpuidle_driver *drv,
struct cpuidle_device *dev);
extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev);
#else
......@@ -190,14 +186,22 @@ static inline int cpuidle_enable_device(struct cpuidle_device *dev)
{return -ENODEV; }
static inline void cpuidle_disable_device(struct cpuidle_device *dev) { }
static inline int cpuidle_play_dead(void) {return -ENODEV; }
static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
struct cpuidle_device *dev) {return NULL; }
#endif
#if defined(CONFIG_CPU_IDLE) && defined(CONFIG_SUSPEND)
extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev);
extern int cpuidle_enter_freeze(struct cpuidle_driver *drv,
struct cpuidle_device *dev);
#else
static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{return -ENODEV; }
static inline int cpuidle_enter_freeze(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{return -ENODEV; }
static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
struct cpuidle_device *dev) {return NULL; }
#endif
/* kernel/sched/idle.c */
......
......@@ -342,6 +342,18 @@ struct dev_pm_ops {
#define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
#endif
#ifdef CONFIG_PM_SLEEP
#define SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
.suspend_noirq = suspend_fn, \
.resume_noirq = resume_fn, \
.freeze_noirq = suspend_fn, \
.thaw_noirq = resume_fn, \
.poweroff_noirq = suspend_fn, \
.restore_noirq = resume_fn,
#else
#define SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
#endif
#ifdef CONFIG_PM
#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
.runtime_suspend = suspend_fn, \
......
......@@ -13,8 +13,6 @@
#ifdef CONFIG_GENERIC_CLOCKEVENTS
extern void __init tick_init(void);
extern void tick_freeze(void);
extern void tick_unfreeze(void);
/* Should be core only, but ARM BL switcher requires it */
extern void tick_suspend_local(void);
/* Should be core only, but XEN resume magic and ARM BL switcher require it */
......@@ -23,14 +21,20 @@ extern void tick_handover_do_timer(void);
extern void tick_cleanup_dead_cpu(int cpu);
#else /* CONFIG_GENERIC_CLOCKEVENTS */
static inline void tick_init(void) { }
static inline void tick_freeze(void) { }
static inline void tick_unfreeze(void) { }
static inline void tick_suspend_local(void) { }
static inline void tick_resume_local(void) { }
static inline void tick_handover_do_timer(void) { }
static inline void tick_cleanup_dead_cpu(int cpu) { }
#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
#if defined(CONFIG_GENERIC_CLOCKEVENTS) && defined(CONFIG_SUSPEND)
extern void tick_freeze(void);
extern void tick_unfreeze(void);
#else
static inline void tick_freeze(void) { }
static inline void tick_unfreeze(void) { }
#endif
#ifdef CONFIG_TICK_ONESHOT
extern void tick_irq_enter(void);
# ifndef arch_needs_cpu
......
......@@ -272,7 +272,7 @@ static inline void pm_print_times_init(void)
{
pm_print_times_enabled = !!initcall_debug;
}
#else /* !CONFIG_PP_SLEEP_DEBUG */
#else /* !CONFIG_PM_SLEEP_DEBUG */
static inline void pm_print_times_init(void) {}
#endif /* CONFIG_PM_SLEEP_DEBUG */
......
......@@ -366,6 +366,8 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
trace_suspend_resume(TPS("machine_suspend"),
state, false);
events_check_enabled = false;
} else if (*wakeup) {
error = -EBUSY;
}
syscore_resume();
}
......@@ -468,7 +470,7 @@ static int enter_state(suspend_state_t state)
if (state == PM_SUSPEND_FREEZE) {
#ifdef CONFIG_PM_DEBUG
if (pm_test_level != TEST_NONE && pm_test_level <= TEST_CPUS) {
pr_warning("PM: Unsupported test mode for freeze state,"
pr_warning("PM: Unsupported test mode for suspend to idle,"
"please choose none/freezer/devices/platform.\n");
return -EAGAIN;
}
......@@ -488,7 +490,7 @@ static int enter_state(suspend_state_t state)
printk("done.\n");
trace_suspend_resume(TPS("sync_filesystems"), 0, false);
pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]);
pr_debug("PM: Preparing system for sleep (%s)\n", pm_states[state]);
error = suspend_prepare(state);
if (error)
goto Unlock;
......@@ -497,7 +499,7 @@ static int enter_state(suspend_state_t state)
goto Finish;
trace_suspend_resume(TPS("suspend_enter"), state, false);
pr_debug("PM: Entering %s sleep\n", pm_states[state]);
pr_debug("PM: Suspending system (%s)\n", pm_states[state]);
pm_restrict_gfp_mask();
error = suspend_devices_and_enter(state);
pm_restore_gfp_mask();
......
......@@ -19,6 +19,7 @@
#include <linux/profile.h>
#include <linux/sched.h>
#include <linux/module.h>
#include <trace/events/power.h>
#include <asm/irq_regs.h>
......@@ -440,6 +441,7 @@ void tick_resume(void)
tick_resume_local();
}
#ifdef CONFIG_SUSPEND
static DEFINE_RAW_SPINLOCK(tick_freeze_lock);
static unsigned int tick_freeze_depth;
......@@ -457,10 +459,13 @@ void tick_freeze(void)
raw_spin_lock(&tick_freeze_lock);
tick_freeze_depth++;
if (tick_freeze_depth == num_online_cpus())
if (tick_freeze_depth == num_online_cpus()) {
trace_suspend_resume(TPS("timekeeping_freeze"),
smp_processor_id(), true);
timekeeping_suspend();
else
} else {
tick_suspend_local();
}
raw_spin_unlock(&tick_freeze_lock);
}
......@@ -478,15 +483,19 @@ void tick_unfreeze(void)
{
raw_spin_lock(&tick_freeze_lock);
if (tick_freeze_depth == num_online_cpus())
if (tick_freeze_depth == num_online_cpus()) {
timekeeping_resume();
else
trace_suspend_resume(TPS("timekeeping_freeze"),
smp_processor_id(), false);
} else {
tick_resume_local();
}
tick_freeze_depth--;
raw_spin_unlock(&tick_freeze_lock);
}
#endif /* CONFIG_SUSPEND */
/**
* tick_init - initialize the tick control
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册