pm.h 27.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 *  pm.h - Power management interface
 *
 *  Copyright (C) 2000 Andrew Henroid
 *
 *  This program is free software; you can redistribute it and/or modify
 *  it under the terms of the GNU General Public License as published by
 *  the Free Software Foundation; either version 2 of the License, or
 *  (at your option) any later version.
 *
 *  This program is distributed in the hope that it will be useful,
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *  GNU General Public License for more details.
 *
 *  You should have received a copy of the GNU General Public License
 *  along with this program; if not, write to the Free Software
 *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 */

#ifndef _LINUX_PM_H
#define _LINUX_PM_H

#include <linux/list.h>
25 26 27 28
#include <linux/workqueue.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/timer.h>
29
#include <linux/completion.h>
L
Linus Torvalds 已提交
30 31 32 33 34 35

/*
 * Callbacks for platform drivers to implement.
 */
extern void (*pm_idle)(void);
extern void (*pm_power_off)(void);
36
extern void (*pm_power_off_prepare)(void);
L
Linus Torvalds 已提交
37 38 39 40 41 42 43

/*
 * Device power management
 */

struct device;

A
Alan Stern 已提交
44 45 46 47 48 49
#ifdef CONFIG_PM
extern const char power_group_name[];		/* = "power" */
#else
#define power_group_name	NULL
#endif

50 51 52
typedef struct pm_message {
	int event;
} pm_message_t;
L
Linus Torvalds 已提交
53

54
/**
55
 * struct dev_pm_ops - device PM callbacks
56
 *
57
 * Several device power state transitions are externally visible, affecting
D
David Brownell 已提交
58 59
 * the state of pending I/O queues and (for drivers that touch hardware)
 * interrupts, wakeups, DMA, and other hardware state.  There may also be
60
 * internal transitions to various low-power modes which are transparent
D
David Brownell 已提交
61 62
 * to the rest of the driver stack (such as a driver that's ON gating off
 * clocks which are not in active use).
L
Linus Torvalds 已提交
63
 *
64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93
 * The externally visible transitions are handled with the help of callbacks
 * included in this structure in such a way that two levels of callbacks are
 * involved.  First, the PM core executes callbacks provided by PM domains,
 * device types, classes and bus types.  They are the subsystem-level callbacks
 * supposed to execute callbacks provided by device drivers, although they may
 * choose not to do that.  If the driver callbacks are executed, they have to
 * collaborate with the subsystem-level callbacks to achieve the goals
 * appropriate for the given system transition, given transition phase and the
 * subsystem the device belongs to.
 *
 * @prepare: The principal role of this callback is to prevent new children of
 *	the device from being registered after it has returned (the driver's
 *	subsystem and generally the rest of the kernel is supposed to prevent
 *	new calls to the probe method from being made too once @prepare() has
 *	succeeded).  If @prepare() detects a situation it cannot handle (e.g.
 *	registration of a child already in progress), it may return -EAGAIN, so
 *	that the PM core can execute it once again (e.g. after a new child has
 *	been registered) to recover from the race condition.
 *	This method is executed for all kinds of suspend transitions and is
 *	followed by one of the suspend callbacks: @suspend(), @freeze(), or
 *	@poweroff().  The PM core executes subsystem-level @prepare() for all
 *	devices before starting to invoke suspend callbacks for any of them, so
 *	generally devices may be assumed to be functional or to respond to
 *	runtime resume requests while @prepare() is being executed.  However,
 *	device drivers may NOT assume anything about the availability of user
 *	space at that time and it is NOT valid to request firmware from within
 *	@prepare() (it's too late to do that).  It also is NOT valid to allocate
 *	substantial amounts of memory from @prepare() in the GFP_KERNEL mode.
 *	[To work around these limitations, drivers may register suspend and
 *	hibernation notifiers to be executed before the freezing of tasks.]
94 95 96 97
 *
 * @complete: Undo the changes made by @prepare().  This method is executed for
 *	all kinds of resume transitions, following one of the resume callbacks:
 *	@resume(), @thaw(), @restore().  Also called if the state transition
98 99
 *	fails before the driver's suspend callback: @suspend(), @freeze() or
 *	@poweroff(), can be executed (e.g. if the suspend callback fails for one
100 101
 *	of the other devices that the PM core has unsuccessfully attempted to
 *	suspend earlier).
102 103
 *	The PM core executes subsystem-level @complete() after it has executed
 *	the appropriate resume callbacks for all devices.
104 105
 *
 * @suspend: Executed before putting the system into a sleep state in which the
106 107 108 109 110 111
 *	contents of main memory are preserved.  The exact action to perform
 *	depends on the device's subsystem (PM domain, device type, class or bus
 *	type), but generally the device must be quiescent after subsystem-level
 *	@suspend() has returned, so that it doesn't do any I/O or DMA.
 *	Subsystem-level @suspend() is executed for all devices after invoking
 *	subsystem-level @prepare() for all of them.
112
 *
113 114 115 116
 * @suspend_late: Continue operations started by @suspend().  For a number of
 *	devices @suspend_late() may point to the same callback routine as the
 *	runtime suspend callback.
 *
117
 * @resume: Executed after waking the system up from a sleep state in which the
118 119 120 121 122 123 124 125 126 127
 *	contents of main memory were preserved.  The exact action to perform
 *	depends on the device's subsystem, but generally the driver is expected
 *	to start working again, responding to hardware events and software
 *	requests (the device itself may be left in a low-power state, waiting
 *	for a runtime resume to occur).  The state of the device at the time its
 *	driver's @resume() callback is run depends on the platform and subsystem
 *	the device belongs to.  On most platforms, there are no restrictions on
 *	availability of resources like clocks during @resume().
 *	Subsystem-level @resume() is executed for all devices after invoking
 *	subsystem-level @resume_noirq() for all of them.
128
 *
129 130 131 132
 * @resume_early: Prepare to execute @resume().  For a number of devices
 *	@resume_early() may point to the same callback routine as the runtime
 *	resume callback.
 *
133
 * @freeze: Hibernation-specific, executed before creating a hibernation image.
134 135 136 137 138 139 140
 *	Analogous to @suspend(), but it should not enable the device to signal
 *	wakeup events or change its power state.  The majority of subsystems
 *	(with the notable exception of the PCI bus type) expect the driver-level
 *	@freeze() to save the device settings in memory to be used by @restore()
 *	during the subsequent resume from hibernation.
 *	Subsystem-level @freeze() is executed for all devices after invoking
 *	subsystem-level @prepare() for all of them.
141
 *
142 143 144 145
 * @freeze_late: Continue operations started by @freeze().  Analogous to
 *	@suspend_late(), but it should not enable the device to signal wakeup
 *	events or change its power state.
 *
146
 * @thaw: Hibernation-specific, executed after creating a hibernation image OR
147
 *	if the creation of an image has failed.  Also executed after a failing
148 149 150
 *	attempt to restore the contents of main memory from such an image.
 *	Undo the changes made by the preceding @freeze(), so the device can be
 *	operated in the same way as immediately before the call to @freeze().
151 152 153
 *	Subsystem-level @thaw() is executed for all devices after invoking
 *	subsystem-level @thaw_noirq() for all of them.  It also may be executed
 *	directly after @freeze() in case of a transition error.
154
 *
155 156 157
 * @thaw_early: Prepare to execute @thaw().  Undo the changes made by the
 *	preceding @freeze_late().
 *
158
 * @poweroff: Hibernation-specific, executed after saving a hibernation image.
159 160 161 162
 *	Analogous to @suspend(), but it need not save the device's settings in
 *	memory.
 *	Subsystem-level @poweroff() is executed for all devices after invoking
 *	subsystem-level @prepare() for all of them.
163
 *
164 165 166
 * @poweroff_late: Continue operations started by @poweroff().  Analogous to
 *	@suspend_late(), but it need not save the device's settings in memory.
 *
167
 * @restore: Hibernation-specific, executed after restoring the contents of main
168 169
 *	memory from a hibernation image, analogous to @resume().
 *
170 171
 * @restore_early: Prepare to execute @restore(), analogous to @resume_early().
 *
172 173 174 175 176 177 178 179 180
 * @suspend_noirq: Complete the actions started by @suspend().  Carry out any
 *	additional operations required for suspending the device that might be
 *	racing with its driver's interrupt handler, which is guaranteed not to
 *	run while @suspend_noirq() is being executed.
 *	It generally is expected that the device will be in a low-power state
 *	(appropriate for the target system sleep state) after subsystem-level
 *	@suspend_noirq() has returned successfully.  If the device can generate
 *	system wakeup signals and is enabled to wake up the system, it should be
 *	configured to do so at that time.  However, depending on the platform
181 182 183 184
 *	and device's subsystem, @suspend() or @suspend_late() may be allowed to
 *	put the device into the low-power state and configure it to generate
 *	wakeup signals, in which case it generally is not necessary to define
 *	@suspend_noirq().
185 186 187 188 189 190 191 192 193 194
 *
 * @resume_noirq: Prepare for the execution of @resume() by carrying out any
 *	operations required for resuming the device that might be racing with
 *	its driver's interrupt handler, which is guaranteed not to run while
 *	@resume_noirq() is being executed.
 *
 * @freeze_noirq: Complete the actions started by @freeze().  Carry out any
 *	additional operations required for freezing the device that might be
 *	racing with its driver's interrupt handler, which is guaranteed not to
 *	run while @freeze_noirq() is being executed.
195 196 197
 *	The power state of the device should not be changed by either @freeze(),
 *	or @freeze_late(), or @freeze_noirq() and it should not be configured to
 *	signal system wakeup by any of these callbacks.
198 199 200 201 202 203 204 205 206 207 208 209 210
 *
 * @thaw_noirq: Prepare for the execution of @thaw() by carrying out any
 *	operations required for thawing the device that might be racing with its
 *	driver's interrupt handler, which is guaranteed not to run while
 *	@thaw_noirq() is being executed.
 *
 * @poweroff_noirq: Complete the actions started by @poweroff().  Analogous to
 *	@suspend_noirq(), but it need not save the device's settings in memory.
 *
 * @restore_noirq: Prepare for the execution of @restore() by carrying out any
 *	operations required for thawing the device that might be racing with its
 *	driver's interrupt handler, which is guaranteed not to run while
 *	@restore_noirq() is being executed.  Analogous to @resume_noirq().
211
 *
212 213
 * All of the above callbacks, except for @complete(), return error codes.
 * However, the error codes returned by the resume operations, @resume(),
214
 * @thaw(), @restore(), @resume_noirq(), @thaw_noirq(), and @restore_noirq(), do
215
 * not cause the PM core to abort the resume transition during which they are
216
 * returned.  The error codes returned in those cases are only printed by the PM
217 218 219 220 221 222 223 224
 * core to the system logs for debugging purposes.  Still, it is recommended
 * that drivers only return error codes from their resume methods in case of an
 * unrecoverable failure (i.e. when the device being handled refuses to resume
 * and becomes unusable) to allow us to modify the PM core in the future, so
 * that it can avoid attempting to handle devices that failed to resume and
 * their children.
 *
 * It is allowed to unregister devices while the above callbacks are being
225 226 227 228 229 230 231
 * executed.  However, a callback routine must NOT try to unregister the device
 * it was called for, although it may unregister children of that device (for
 * example, if it detects that a child was unplugged while the system was
 * asleep).
 *
 * Refer to Documentation/power/devices.txt for more information about the role
 * of the above callbacks in the system suspend process.
232
 *
233 234 235 236 237 238
 * There also are callbacks related to runtime power management of devices.
 * Again, these callbacks are executed by the PM core only for subsystems
 * (PM domains, device types, classes and bus types) and the subsystem-level
 * callbacks are supposed to invoke the driver callbacks.  Moreover, the exact
 * actions to be performed by a device driver's callbacks generally depend on
 * the platform and subsystem the device belongs to.
239 240 241
 *
 * @runtime_suspend: Prepare the device for a condition in which it won't be
 *	able to communicate with the CPU(s) and RAM due to power management.
242
 *	This need not mean that the device should be put into a low-power state.
243 244
 *	For example, if the device is behind a link which is about to be turned
 *	off, the device may remain at full power.  If the device does go to low
245 246 247
 *	power and is capable of generating runtime wakeup events, remote wakeup
 *	(i.e., a hardware mechanism allowing the device to request a change of
 *	its power state via an interrupt) should be enabled for it.
248 249
 *
 * @runtime_resume: Put the device into the fully active state in response to a
250 251
 *	wakeup event generated by hardware or at the request of software.  If
 *	necessary, put the device into the full-power state and restore its
252 253
 *	registers, so that it is fully operational.
 *
254 255
 * @runtime_idle: Device appears to be inactive and it might be put into a
 *	low-power state if all of the necessary conditions are satisfied.  Check
256 257
 *	these conditions and handle the device as appropriate, possibly queueing
 *	a suspend request for it.  The return value is ignored by the PM core.
258 259 260 261
 *
 * Refer to Documentation/power/runtime_pm.txt for more information about the
 * role of the above callbacks in device runtime power management.
 *
262 263
 */

264 265 266 267 268 269 270 271 272
struct dev_pm_ops {
	int (*prepare)(struct device *dev);
	void (*complete)(struct device *dev);
	int (*suspend)(struct device *dev);
	int (*resume)(struct device *dev);
	int (*freeze)(struct device *dev);
	int (*thaw)(struct device *dev);
	int (*poweroff)(struct device *dev);
	int (*restore)(struct device *dev);
273 274 275 276 277 278
	int (*suspend_late)(struct device *dev);
	int (*resume_early)(struct device *dev);
	int (*freeze_late)(struct device *dev);
	int (*thaw_early)(struct device *dev);
	int (*poweroff_late)(struct device *dev);
	int (*restore_early)(struct device *dev);
279 280 281 282 283 284
	int (*suspend_noirq)(struct device *dev);
	int (*resume_noirq)(struct device *dev);
	int (*freeze_noirq)(struct device *dev);
	int (*thaw_noirq)(struct device *dev);
	int (*poweroff_noirq)(struct device *dev);
	int (*restore_noirq)(struct device *dev);
285 286 287
	int (*runtime_suspend)(struct device *dev);
	int (*runtime_resume)(struct device *dev);
	int (*runtime_idle)(struct device *dev);
288 289
};

290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
#ifdef CONFIG_PM_SLEEP
#define SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
	.suspend = suspend_fn, \
	.resume = resume_fn, \
	.freeze = suspend_fn, \
	.thaw = resume_fn, \
	.poweroff = suspend_fn, \
	.restore = resume_fn,
#else
#define SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
#endif

#ifdef CONFIG_PM_RUNTIME
#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
	.runtime_suspend = suspend_fn, \
	.runtime_resume = resume_fn, \
	.runtime_idle = idle_fn,
#else
#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn)
#endif

311 312 313 314 315
/*
 * Use this if you want to use the same suspend and resume callbacks for suspend
 * to RAM and hibernation.
 */
#define SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
316
const struct dev_pm_ops name = { \
317 318 319 320 321 322 323 324 325 326 327
	SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
}

/*
 * Use this for defining a set of PM operations to be used in all situations
 * (sustem suspend, hibernation or runtime PM).
 */
#define UNIVERSAL_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \
const struct dev_pm_ops name = { \
	SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
	SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
328 329
}

330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364
/**
 * PM_EVENT_ messages
 *
 * The following PM_EVENT_ messages are defined for the internal use of the PM
 * core, in order to provide a mechanism allowing the high level suspend and
 * hibernation code to convey the necessary information to the device PM core
 * code:
 *
 * ON		No transition.
 *
 * FREEZE 	System is going to hibernate, call ->prepare() and ->freeze()
 *		for all devices.
 *
 * SUSPEND	System is going to suspend, call ->prepare() and ->suspend()
 *		for all devices.
 *
 * HIBERNATE	Hibernation image has been saved, call ->prepare() and
 *		->poweroff() for all devices.
 *
 * QUIESCE	Contents of main memory are going to be restored from a (loaded)
 *		hibernation image, call ->prepare() and ->freeze() for all
 *		devices.
 *
 * RESUME	System is resuming, call ->resume() and ->complete() for all
 *		devices.
 *
 * THAW		Hibernation image has been created, call ->thaw() and
 *		->complete() for all devices.
 *
 * RESTORE	Contents of main memory have been restored from a hibernation
 *		image, call ->restore() and ->complete() for all devices.
 *
 * RECOVER	Creation of a hibernation image or restoration of the main
 *		memory contents from a hibernation image has failed, call
 *		->thaw() and ->complete() for all devices.
365 366 367 368 369 370 371 372 373 374 375 376 377 378 379
 *
 * The following PM_EVENT_ messages are defined for internal use by
 * kernel subsystems.  They are never issued by the PM core.
 *
 * USER_SUSPEND		Manual selective suspend was issued by userspace.
 *
 * USER_RESUME		Manual selective resume was issued by userspace.
 *
 * REMOTE_WAKEUP	Remote-wakeup request was received from the device.
 *
 * AUTO_SUSPEND		Automatic (device idle) runtime suspend was
 *			initiated by the subsystem.
 *
 * AUTO_RESUME		Automatic (device needed) runtime resume was
 *			requested by a driver.
380 381
 */

382
#define PM_EVENT_INVALID	(-1)
383 384 385 386 387 388 389 390 391
#define PM_EVENT_ON		0x0000
#define PM_EVENT_FREEZE 	0x0001
#define PM_EVENT_SUSPEND	0x0002
#define PM_EVENT_HIBERNATE	0x0004
#define PM_EVENT_QUIESCE	0x0008
#define PM_EVENT_RESUME		0x0010
#define PM_EVENT_THAW		0x0020
#define PM_EVENT_RESTORE	0x0040
#define PM_EVENT_RECOVER	0x0080
392 393 394
#define PM_EVENT_USER		0x0100
#define PM_EVENT_REMOTE		0x0200
#define PM_EVENT_AUTO		0x0400
395

396 397 398
#define PM_EVENT_SLEEP		(PM_EVENT_SUSPEND | PM_EVENT_HIBERNATE)
#define PM_EVENT_USER_SUSPEND	(PM_EVENT_USER | PM_EVENT_SUSPEND)
#define PM_EVENT_USER_RESUME	(PM_EVENT_USER | PM_EVENT_RESUME)
A
Alan Stern 已提交
399
#define PM_EVENT_REMOTE_RESUME	(PM_EVENT_REMOTE | PM_EVENT_RESUME)
400 401
#define PM_EVENT_AUTO_SUSPEND	(PM_EVENT_AUTO | PM_EVENT_SUSPEND)
#define PM_EVENT_AUTO_RESUME	(PM_EVENT_AUTO | PM_EVENT_RESUME)
402

403
#define PMSG_INVALID	((struct pm_message){ .event = PM_EVENT_INVALID, })
404
#define PMSG_ON		((struct pm_message){ .event = PM_EVENT_ON, })
405 406 407 408 409 410 411 412
#define PMSG_FREEZE	((struct pm_message){ .event = PM_EVENT_FREEZE, })
#define PMSG_QUIESCE	((struct pm_message){ .event = PM_EVENT_QUIESCE, })
#define PMSG_SUSPEND	((struct pm_message){ .event = PM_EVENT_SUSPEND, })
#define PMSG_HIBERNATE	((struct pm_message){ .event = PM_EVENT_HIBERNATE, })
#define PMSG_RESUME	((struct pm_message){ .event = PM_EVENT_RESUME, })
#define PMSG_THAW	((struct pm_message){ .event = PM_EVENT_THAW, })
#define PMSG_RESTORE	((struct pm_message){ .event = PM_EVENT_RESTORE, })
#define PMSG_RECOVER	((struct pm_message){ .event = PM_EVENT_RECOVER, })
A
Alan Stern 已提交
413
#define PMSG_USER_SUSPEND	((struct pm_message) \
414
					{ .event = PM_EVENT_USER_SUSPEND, })
A
Alan Stern 已提交
415
#define PMSG_USER_RESUME	((struct pm_message) \
416
					{ .event = PM_EVENT_USER_RESUME, })
A
Alan Stern 已提交
417
#define PMSG_REMOTE_RESUME	((struct pm_message) \
418
					{ .event = PM_EVENT_REMOTE_RESUME, })
A
Alan Stern 已提交
419
#define PMSG_AUTO_SUSPEND	((struct pm_message) \
420
					{ .event = PM_EVENT_AUTO_SUSPEND, })
A
Alan Stern 已提交
421
#define PMSG_AUTO_RESUME	((struct pm_message) \
422
					{ .event = PM_EVENT_AUTO_RESUME, })
423

424 425
#define PMSG_IS_AUTO(msg)	(((msg).event & PM_EVENT_AUTO) != 0)

426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464
/**
 * Device run-time power management status.
 *
 * These status labels are used internally by the PM core to indicate the
 * current status of a device with respect to the PM core operations.  They do
 * not reflect the actual power state of the device or its status as seen by the
 * driver.
 *
 * RPM_ACTIVE		Device is fully operational.  Indicates that the device
 *			bus type's ->runtime_resume() callback has completed
 *			successfully.
 *
 * RPM_SUSPENDED	Device bus type's ->runtime_suspend() callback has
 *			completed successfully.  The device is regarded as
 *			suspended.
 *
 * RPM_RESUMING		Device bus type's ->runtime_resume() callback is being
 *			executed.
 *
 * RPM_SUSPENDING	Device bus type's ->runtime_suspend() callback is being
 *			executed.
 */

enum rpm_status {
	RPM_ACTIVE = 0,
	RPM_RESUMING,
	RPM_SUSPENDED,
	RPM_SUSPENDING,
};

/**
 * Device run-time power management request types.
 *
 * RPM_REQ_NONE		Do nothing.
 *
 * RPM_REQ_IDLE		Run the device bus type's ->runtime_idle() callback
 *
 * RPM_REQ_SUSPEND	Run the device bus type's ->runtime_suspend() callback
 *
465 466 467
 * RPM_REQ_AUTOSUSPEND	Same as RPM_REQ_SUSPEND, but not until the device has
 *			been inactive for as long as power.autosuspend_delay
 *
468 469 470 471 472 473 474
 * RPM_REQ_RESUME	Run the device bus type's ->runtime_resume() callback
 */

enum rpm_request {
	RPM_REQ_NONE = 0,
	RPM_REQ_IDLE,
	RPM_REQ_SUSPEND,
475
	RPM_REQ_AUTOSUSPEND,
476 477 478
	RPM_REQ_RESUME,
};

479 480
struct wakeup_source;

481 482 483 484 485
struct pm_domain_data {
	struct list_head list_node;
	struct device *dev;
};

486 487
struct pm_subsys_data {
	spinlock_t lock;
488
	unsigned int refcount;
489 490 491
#ifdef CONFIG_PM_CLK
	struct list_head clock_list;
#endif
492
#ifdef CONFIG_PM_GENERIC_DOMAINS
493
	struct pm_domain_data *domain_data;
494
#endif
495 496
};

497 498
struct dev_pm_info {
	pm_message_t		power_state;
499
	unsigned int		can_wakeup:1;
500
	unsigned int		async_suspend:1;
501
	bool			is_prepared:1;	/* Owned by the PM core */
502
	bool			is_suspended:1;	/* Ditto */
503
	bool			ignore_children:1;
504
	spinlock_t		lock;
505
#ifdef CONFIG_PM_SLEEP
506
	struct list_head	entry;
507
	struct completion	completion;
508
	struct wakeup_source	*wakeup;
509
	bool			wakeup_path:1;
510 511
#else
	unsigned int		should_wakeup:1;
512
#endif
513 514 515 516 517 518 519 520 521 522 523
#ifdef CONFIG_PM_RUNTIME
	struct timer_list	suspend_timer;
	unsigned long		timer_expires;
	struct work_struct	work;
	wait_queue_head_t	wait_queue;
	atomic_t		usage_count;
	atomic_t		child_count;
	unsigned int		disable_depth:3;
	unsigned int		idle_notification:1;
	unsigned int		request_pending:1;
	unsigned int		deferred_resume:1;
524
	unsigned int		run_wake:1;
525
	unsigned int		runtime_auto:1;
A
Alan Stern 已提交
526
	unsigned int		no_callbacks:1;
527
	unsigned int		irq_safe:1;
528 529
	unsigned int		use_autosuspend:1;
	unsigned int		timer_autosuspends:1;
530 531 532
	enum rpm_request	request;
	enum rpm_status		runtime_status;
	int			runtime_error;
533 534
	int			autosuspend_delay;
	unsigned long		last_busy;
535 536 537
	unsigned long		active_jiffies;
	unsigned long		suspended_jiffies;
	unsigned long		accounting_timestamp;
538 539
	ktime_t			suspend_time;
	s64			max_time_suspended_ns;
540
#endif
541
	struct pm_subsys_data	*subsys_data;  /* Owned by the subsystem. */
542
	struct pm_qos_constraints *constraints;
543 544
};

545
extern void update_pm_runtime_accounting(struct device *dev);
546 547
extern int dev_pm_get_subsys_data(struct device *dev);
extern int dev_pm_put_subsys_data(struct device *dev);
548

549 550 551 552 553
/*
 * Power domains provide callbacks that are executed during system suspend,
 * hibernation, system resume and during runtime PM transitions along with
 * subsystem-level and driver-level callbacks.
 */
554
struct dev_pm_domain {
555 556
	struct dev_pm_ops	ops;
};
557

558 559 560 561 562 563 564 565 566 567
/*
 * The PM_EVENT_ messages are also used by drivers implementing the legacy
 * suspend framework, based on the ->suspend() and ->resume() callbacks common
 * for suspend and hibernation transitions, according to the rules below.
 */

/* Necessary, because several drivers use PM_EVENT_PRETHAW */
#define PM_EVENT_PRETHAW PM_EVENT_QUIESCE

/*
D
David Brownell 已提交
568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588
 * One transition is triggered by resume(), after a suspend() call; the
 * message is implicit:
 *
 * ON		Driver starts working again, responding to hardware events
 * 		and software requests.  The hardware may have gone through
 * 		a power-off reset, or it may have maintained state from the
 * 		previous suspend() which the driver will rely on while
 * 		resuming.  On most platforms, there are no restrictions on
 * 		availability of resources like clocks during resume().
 *
 * Other transitions are triggered by messages sent using suspend().  All
 * these transitions quiesce the driver, so that I/O queues are inactive.
 * That commonly entails turning off IRQs and DMA; there may be rules
 * about how to quiesce that are specific to the bus or the device's type.
 * (For example, network drivers mark the link state.)  Other details may
 * differ according to the message:
 *
 * SUSPEND	Quiesce, enter a low power device state appropriate for
 * 		the upcoming system state (such as PCI_D3hot), and enable
 * 		wakeup events as appropriate.
 *
589 590 591
 * HIBERNATE	Enter a low power device state appropriate for the hibernation
 * 		state (eg. ACPI S4) and enable wakeup events as appropriate.
 *
D
David Brownell 已提交
592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609
 * FREEZE	Quiesce operations so that a consistent image can be saved;
 * 		but do NOT otherwise enter a low power device state, and do
 * 		NOT emit system wakeup events.
 *
 * PRETHAW	Quiesce as if for FREEZE; additionally, prepare for restoring
 * 		the system from a snapshot taken after an earlier FREEZE.
 * 		Some drivers will need to reset their hardware state instead
 * 		of preserving it, to ensure that it's never mistaken for the
 * 		state which that earlier snapshot had set up.
 *
 * A minimally power-aware driver treats all messages as SUSPEND, fully
 * reinitializes its device during resume() -- whether or not it was reset
 * during the suspend/resume cycle -- and can't issue wakeup events.
 *
 * More power-aware drivers may also use low power states at runtime as
 * well as during system sleep states like PM_SUSPEND_STANDBY.  They may
 * be able to use wakeup events to exit from runtime low-power states,
 * or from system low-power states such as standby or suspend-to-RAM.
L
Linus Torvalds 已提交
610 611
 */

612
#ifdef CONFIG_PM_SLEEP
613
extern void device_pm_lock(void);
614
extern void dpm_resume_start(pm_message_t state);
615
extern void dpm_resume_end(pm_message_t state);
616 617
extern void dpm_resume(pm_message_t state);
extern void dpm_complete(pm_message_t state);
L
Linus Torvalds 已提交
618

619
extern void device_pm_unlock(void);
620
extern int dpm_suspend_end(pm_message_t state);
621
extern int dpm_suspend_start(pm_message_t state);
622 623
extern int dpm_suspend(pm_message_t state);
extern int dpm_prepare(pm_message_t state);
D
David Brownell 已提交
624

625 626 627 628
extern void __suspend_report_result(const char *function, void *fn, int ret);

#define suspend_report_result(fn, ret)					\
	do {								\
629
		__suspend_report_result(__func__, fn, ret);		\
630
	} while (0)
631

632
extern int device_pm_wait_for_dev(struct device *sub, struct device *dev);
633 634

extern int pm_generic_prepare(struct device *dev);
635
extern int pm_generic_suspend_noirq(struct device *dev);
636
extern int pm_generic_suspend(struct device *dev);
637
extern int pm_generic_resume_noirq(struct device *dev);
638
extern int pm_generic_resume(struct device *dev);
639
extern int pm_generic_freeze_noirq(struct device *dev);
640
extern int pm_generic_freeze(struct device *dev);
641
extern int pm_generic_thaw_noirq(struct device *dev);
642
extern int pm_generic_thaw(struct device *dev);
643
extern int pm_generic_restore_noirq(struct device *dev);
644
extern int pm_generic_restore(struct device *dev);
645
extern int pm_generic_poweroff_noirq(struct device *dev);
646 647 648
extern int pm_generic_poweroff(struct device *dev);
extern void pm_generic_complete(struct device *dev);

649 650
#else /* !CONFIG_PM_SLEEP */

651 652 653
#define device_pm_lock() do {} while (0)
#define device_pm_unlock() do {} while (0)

654
static inline int dpm_suspend_start(pm_message_t state)
655 656 657 658
{
	return 0;
}

659
#define suspend_report_result(fn, ret)		do {} while (0)
660

661 662 663 664
static inline int device_pm_wait_for_dev(struct device *a, struct device *b)
{
	return 0;
}
665 666 667 668 669 670 671 672 673

#define pm_generic_prepare	NULL
#define pm_generic_suspend	NULL
#define pm_generic_resume	NULL
#define pm_generic_freeze	NULL
#define pm_generic_thaw		NULL
#define pm_generic_restore	NULL
#define pm_generic_poweroff	NULL
#define pm_generic_complete	NULL
674 675
#endif /* !CONFIG_PM_SLEEP */

676 677 678 679 680 681 682 683
/* How to reorder dpm_list after device_move() */
enum dpm_order {
	DPM_ORDER_NONE,
	DPM_ORDER_DEV_AFTER_PARENT,
	DPM_ORDER_PARENT_BEFORE_DEV,
	DPM_ORDER_DEV_LAST,
};

L
Linus Torvalds 已提交
684
#endif /* _LINUX_PM_H */