提交 dad3de7d 编写于 作者: L Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/suspend-2.6

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/suspend-2.6:
  PM: Add flag for devices capable of generating run-time wake-up events
  PM / Runtime: Remove unnecessary braces in __pm_runtime_set_status()
  PM / Runtime: Make documentation of runtime_idle() agree with the code
  PM / Runtime: Ensure timer_expires is nonzero in pm_schedule_suspend()
  PM / Runtime: Use deferred_resume flag in pm_request_resume
  PM / Runtime: Export the PM runtime workqueue
  PM / Runtime: Fix lockdep warning in __pm_runtime_set_status()
  PM / Hibernate: Swap, use KERN_CONT
  PM / Hibernate: Shift remaining code from swsusp.c to hibernate.c
  PM / Hibernate: Move swap functions to kernel/power/swap.c.
  PM / freezer: Don't get over-anxious while waiting
......@@ -38,7 +38,7 @@ struct dev_pm_ops {
...
int (*runtime_suspend)(struct device *dev);
int (*runtime_resume)(struct device *dev);
void (*runtime_idle)(struct device *dev);
int (*runtime_idle)(struct device *dev);
...
};
......@@ -71,9 +71,9 @@ what to do to handle the device).
purpose).
In particular, if the driver requires remote wakeup capability for proper
functioning and device_may_wakeup() returns 'false' for the device, then
functioning and device_run_wake() returns 'false' for the device, then
->runtime_suspend() should return -EBUSY. On the other hand, if
device_may_wakeup() returns 'true' for the device and the device is put
device_run_wake() returns 'true' for the device and the device is put
into a low power state during the execution of its bus type's
->runtime_suspend(), it is expected that remote wake-up (i.e. hardware mechanism
allowing the device to request a change of its power state, such as PCI PME)
......@@ -114,7 +114,8 @@ The action performed by a bus type's ->runtime_idle() callback is totally
dependent on the bus type in question, but the expected and recommended action
is to check if the device can be suspended (i.e. if all of the conditions
necessary for suspending the device are satisfied) and to queue up a suspend
request for the device in that case.
request for the device in that case. The value returned by this callback is
ignored by the PM core.
The helper functions provided by the PM core, described in Section 4, guarantee
that the following constraints are met with respect to the bus type's run-time
......@@ -214,6 +215,9 @@ defined in include/linux/pm.h:
being executed for that device and it is not practical to wait for the
suspend to complete; means "start a resume as soon as you've suspended"
unsigned int run_wake;
- set if the device is capable of generating run-time wake-up events
enum rpm_status runtime_status;
- the run-time PM status of the device; this field's initial value is
RPM_SUSPENDED, which means that each device is initially regarded by the
......
......@@ -185,6 +185,7 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq)
}
dev->power.runtime_status = RPM_SUSPENDING;
dev->power.deferred_resume = false;
if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) {
spin_unlock_irq(&dev->power.lock);
......@@ -200,7 +201,6 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq)
if (retval) {
dev->power.runtime_status = RPM_ACTIVE;
pm_runtime_cancel_pending(dev);
dev->power.deferred_resume = false;
if (retval == -EAGAIN || retval == -EBUSY) {
notify = true;
......@@ -217,7 +217,6 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq)
wake_up_all(&dev->power.wait_queue);
if (dev->power.deferred_resume) {
dev->power.deferred_resume = false;
__pm_runtime_resume(dev, false);
retval = -EAGAIN;
goto out;
......@@ -626,6 +625,8 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay)
goto out;
dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
if (!dev->power.timer_expires)
dev->power.timer_expires = 1;
mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
out:
......@@ -659,13 +660,17 @@ static int __pm_request_resume(struct device *dev)
pm_runtime_deactivate_timer(dev);
if (dev->power.runtime_status == RPM_SUSPENDING) {
dev->power.deferred_resume = true;
return retval;
}
if (dev->power.request_pending) {
/* If non-resume request is pending, we can overtake it. */
dev->power.request = retval ? RPM_REQ_NONE : RPM_REQ_RESUME;
return retval;
} else if (retval) {
return retval;
}
if (retval)
return retval;
dev->power.request = RPM_REQ_RESUME;
dev->power.request_pending = true;
......@@ -777,7 +782,7 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status)
}
if (parent) {
spin_lock(&parent->power.lock);
spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
/*
* It is invalid to put an active child under a parent that is
......@@ -786,12 +791,10 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status)
*/
if (!parent->power.disable_depth
&& !parent->power.ignore_children
&& parent->power.runtime_status != RPM_ACTIVE) {
&& parent->power.runtime_status != RPM_ACTIVE)
error = -EBUSY;
} else {
if (dev->power.runtime_status == RPM_SUSPENDED)
atomic_inc(&parent->power.child_count);
}
else if (dev->power.runtime_status == RPM_SUSPENDED)
atomic_inc(&parent->power.child_count);
spin_unlock(&parent->power.lock);
......
......@@ -178,9 +178,10 @@ typedef struct pm_message {
* This need not mean that the device should be put into a low power state.
* For example, if the device is behind a link which is about to be turned
* off, the device may remain at full power. If the device does go to low
* power and if device_may_wakeup(dev) is true, remote wake-up (i.e., a
* hardware mechanism allowing the device to request a change of its power
* state, such as PCI PME) should be enabled for it.
* power and is capable of generating run-time wake-up events, remote
* wake-up (i.e., a hardware mechanism allowing the device to request a
* change of its power state via a wake-up event, such as PCI PME) should
* be enabled for it.
*
* @runtime_resume: Put the device into the fully active state in response to a
* wake-up event generated by hardware or at the request of software. If
......@@ -428,6 +429,7 @@ struct dev_pm_info {
unsigned int idle_notification:1;
unsigned int request_pending:1;
unsigned int deferred_resume:1;
unsigned int run_wake:1;
enum rpm_request request;
enum rpm_status runtime_status;
int runtime_error;
......
......@@ -50,6 +50,16 @@ static inline void pm_runtime_put_noidle(struct device *dev)
atomic_add_unless(&dev->power.usage_count, -1, 0);
}
static inline bool device_run_wake(struct device *dev)
{
return dev->power.run_wake;
}
static inline void device_set_run_wake(struct device *dev, bool enable)
{
dev->power.run_wake = enable;
}
#else /* !CONFIG_PM_RUNTIME */
static inline int pm_runtime_idle(struct device *dev) { return -ENOSYS; }
......@@ -73,6 +83,8 @@ static inline bool pm_children_suspended(struct device *dev) { return false; }
static inline void pm_suspend_ignore_children(struct device *dev, bool en) {}
static inline void pm_runtime_get_noresume(struct device *dev) {}
static inline void pm_runtime_put_noidle(struct device *dev) {}
static inline bool device_run_wake(struct device *dev) { return false; }
static inline void device_set_run_wake(struct device *dev, bool enable) {}
#endif /* !CONFIG_PM_RUNTIME */
......
......@@ -8,7 +8,7 @@ obj-$(CONFIG_PM_SLEEP) += console.o
obj-$(CONFIG_FREEZER) += process.o
obj-$(CONFIG_SUSPEND) += suspend.o
obj-$(CONFIG_PM_TEST_SUSPEND) += suspend_test.o
obj-$(CONFIG_HIBERNATION) += swsusp.o hibernate.o snapshot.o swap.o user.o
obj-$(CONFIG_HIBERNATION) += hibernate.o snapshot.o swap.o user.o
obj-$(CONFIG_HIBERNATION_NVS) += hibernate_nvs.o
obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o
......@@ -32,6 +32,7 @@ static int noresume = 0;
static char resume_file[256] = CONFIG_PM_STD_PARTITION;
dev_t swsusp_resume_device;
sector_t swsusp_resume_block;
int in_suspend __nosavedata = 0;
enum {
HIBERNATION_INVALID,
......@@ -201,6 +202,35 @@ static void platform_recover(int platform_mode)
hibernation_ops->recover();
}
/**
* swsusp_show_speed - print the time elapsed between two events.
* @start: Starting event.
* @stop: Final event.
* @nr_pages - number of pages processed between @start and @stop
* @msg - introductory message to print
*/
void swsusp_show_speed(struct timeval *start, struct timeval *stop,
unsigned nr_pages, char *msg)
{
s64 elapsed_centisecs64;
int centisecs;
int k;
int kps;
elapsed_centisecs64 = timeval_to_ns(stop) - timeval_to_ns(start);
do_div(elapsed_centisecs64, NSEC_PER_SEC / 100);
centisecs = elapsed_centisecs64;
if (centisecs == 0)
centisecs = 1; /* avoid div-by-zero */
k = nr_pages * (PAGE_SIZE / 1024);
kps = (k * 100) / centisecs;
printk(KERN_INFO "PM: %s %d kbytes in %d.%02d seconds (%d.%02d MB/s)\n",
msg, k,
centisecs / 100, centisecs % 100,
kps / 1000, (kps % 1000) / 10);
}
/**
* create_image - freeze devices that need to be frozen with interrupts
* off, create the hibernation image and thaw those devices. Control
......
......@@ -220,6 +220,7 @@ static struct attribute_group attr_group = {
#ifdef CONFIG_PM_RUNTIME
struct workqueue_struct *pm_wq;
EXPORT_SYMBOL_GPL(pm_wq);
static int __init pm_start_workqueue(void)
{
......
......@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/syscalls.h>
#include <linux/freezer.h>
#include <linux/delay.h>
/*
* Timeout for stopping processes
......@@ -41,7 +42,7 @@ static int try_to_freeze_tasks(bool sig_only)
do_gettimeofday(&start);
end_time = jiffies + TIMEOUT;
do {
while (true) {
todo = 0;
read_lock(&tasklist_lock);
do_each_thread(g, p) {
......@@ -62,10 +63,15 @@ static int try_to_freeze_tasks(bool sig_only)
todo++;
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
yield(); /* Yield is okay here */
if (time_after(jiffies, end_time))
if (!todo || time_after(jiffies, end_time))
break;
} while (todo);
/*
* We need to retry, but first give the freezing tasks some
* time to enter the regrigerator.
*/
msleep(10);
}
do_gettimeofday(&end);
elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
......
......@@ -38,6 +38,107 @@ struct swsusp_header {
static struct swsusp_header *swsusp_header;
/**
* The following functions are used for tracing the allocated
* swap pages, so that they can be freed in case of an error.
*/
struct swsusp_extent {
struct rb_node node;
unsigned long start;
unsigned long end;
};
static struct rb_root swsusp_extents = RB_ROOT;
static int swsusp_extents_insert(unsigned long swap_offset)
{
struct rb_node **new = &(swsusp_extents.rb_node);
struct rb_node *parent = NULL;
struct swsusp_extent *ext;
/* Figure out where to put the new node */
while (*new) {
ext = container_of(*new, struct swsusp_extent, node);
parent = *new;
if (swap_offset < ext->start) {
/* Try to merge */
if (swap_offset == ext->start - 1) {
ext->start--;
return 0;
}
new = &((*new)->rb_left);
} else if (swap_offset > ext->end) {
/* Try to merge */
if (swap_offset == ext->end + 1) {
ext->end++;
return 0;
}
new = &((*new)->rb_right);
} else {
/* It already is in the tree */
return -EINVAL;
}
}
/* Add the new node and rebalance the tree. */
ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL);
if (!ext)
return -ENOMEM;
ext->start = swap_offset;
ext->end = swap_offset;
rb_link_node(&ext->node, parent, new);
rb_insert_color(&ext->node, &swsusp_extents);
return 0;
}
/**
* alloc_swapdev_block - allocate a swap page and register that it has
* been allocated, so that it can be freed in case of an error.
*/
sector_t alloc_swapdev_block(int swap)
{
unsigned long offset;
offset = swp_offset(get_swap_page_of_type(swap));
if (offset) {
if (swsusp_extents_insert(offset))
swap_free(swp_entry(swap, offset));
else
return swapdev_block(swap, offset);
}
return 0;
}
/**
* free_all_swap_pages - free swap pages allocated for saving image data.
* It also frees the extents used to register which swap entres had been
* allocated.
*/
void free_all_swap_pages(int swap)
{
struct rb_node *node;
while ((node = swsusp_extents.rb_node)) {
struct swsusp_extent *ext;
unsigned long offset;
ext = container_of(node, struct swsusp_extent, node);
rb_erase(node, &swsusp_extents);
for (offset = ext->start; offset <= ext->end; offset++)
swap_free(swp_entry(swap, offset));
kfree(ext);
}
}
int swsusp_swap_in_use(void)
{
return (swsusp_extents.rb_node != NULL);
}
/*
* General things
*/
......@@ -336,7 +437,7 @@ static int save_image(struct swap_map_handle *handle,
if (ret)
break;
if (!(nr_pages % m))
printk("\b\b\b\b%3d%%", nr_pages / m);
printk(KERN_CONT "\b\b\b\b%3d%%", nr_pages / m);
nr_pages++;
}
err2 = wait_on_bio_chain(&bio);
......@@ -344,9 +445,9 @@ static int save_image(struct swap_map_handle *handle,
if (!ret)
ret = err2;
if (!ret)
printk("\b\b\b\bdone\n");
printk(KERN_CONT "\b\b\b\bdone\n");
else
printk("\n");
printk(KERN_CONT "\n");
swsusp_show_speed(&start, &stop, nr_to_write, "Wrote");
return ret;
}
......
......@@ -56,133 +56,3 @@
#include "power.h"
int in_suspend __nosavedata = 0;
/**
* The following functions are used for tracing the allocated
* swap pages, so that they can be freed in case of an error.
*/
struct swsusp_extent {
struct rb_node node;
unsigned long start;
unsigned long end;
};
static struct rb_root swsusp_extents = RB_ROOT;
static int swsusp_extents_insert(unsigned long swap_offset)
{
struct rb_node **new = &(swsusp_extents.rb_node);
struct rb_node *parent = NULL;
struct swsusp_extent *ext;
/* Figure out where to put the new node */
while (*new) {
ext = container_of(*new, struct swsusp_extent, node);
parent = *new;
if (swap_offset < ext->start) {
/* Try to merge */
if (swap_offset == ext->start - 1) {
ext->start--;
return 0;
}
new = &((*new)->rb_left);
} else if (swap_offset > ext->end) {
/* Try to merge */
if (swap_offset == ext->end + 1) {
ext->end++;
return 0;
}
new = &((*new)->rb_right);
} else {
/* It already is in the tree */
return -EINVAL;
}
}
/* Add the new node and rebalance the tree. */
ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL);
if (!ext)
return -ENOMEM;
ext->start = swap_offset;
ext->end = swap_offset;
rb_link_node(&ext->node, parent, new);
rb_insert_color(&ext->node, &swsusp_extents);
return 0;
}
/**
* alloc_swapdev_block - allocate a swap page and register that it has
* been allocated, so that it can be freed in case of an error.
*/
sector_t alloc_swapdev_block(int swap)
{
unsigned long offset;
offset = swp_offset(get_swap_page_of_type(swap));
if (offset) {
if (swsusp_extents_insert(offset))
swap_free(swp_entry(swap, offset));
else
return swapdev_block(swap, offset);
}
return 0;
}
/**
* free_all_swap_pages - free swap pages allocated for saving image data.
* It also frees the extents used to register which swap entres had been
* allocated.
*/
void free_all_swap_pages(int swap)
{
struct rb_node *node;
while ((node = swsusp_extents.rb_node)) {
struct swsusp_extent *ext;
unsigned long offset;
ext = container_of(node, struct swsusp_extent, node);
rb_erase(node, &swsusp_extents);
for (offset = ext->start; offset <= ext->end; offset++)
swap_free(swp_entry(swap, offset));
kfree(ext);
}
}
int swsusp_swap_in_use(void)
{
return (swsusp_extents.rb_node != NULL);
}
/**
* swsusp_show_speed - print the time elapsed between two events represented by
* @start and @stop
*
* @nr_pages - number of pages processed between @start and @stop
* @msg - introductory message to print
*/
void swsusp_show_speed(struct timeval *start, struct timeval *stop,
unsigned nr_pages, char *msg)
{
s64 elapsed_centisecs64;
int centisecs;
int k;
int kps;
elapsed_centisecs64 = timeval_to_ns(stop) - timeval_to_ns(start);
do_div(elapsed_centisecs64, NSEC_PER_SEC / 100);
centisecs = elapsed_centisecs64;
if (centisecs == 0)
centisecs = 1; /* avoid div-by-zero */
k = nr_pages * (PAGE_SIZE / 1024);
kps = (k * 100) / centisecs;
printk(KERN_INFO "PM: %s %d kbytes in %d.%02d seconds (%d.%02d MB/s)\n",
msg, k,
centisecs / 100, centisecs % 100,
kps / 1000, (kps % 1000) / 10);
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册