suspend.h 18.8 KB
Newer Older
1 2
#ifndef _LINUX_SUSPEND_H
#define _LINUX_SUSPEND_H
L
Linus Torvalds 已提交
3 4 5 6 7

#include <linux/swap.h>
#include <linux/notifier.h>
#include <linux/init.h>
#include <linux/pm.h>
8
#include <linux/mm.h>
9
#include <linux/freezer.h>
10 11
#include <asm/errno.h>

12
#ifdef CONFIG_VT
13
extern void pm_set_vt_switch(int);
14
#else
15 16 17
static inline void pm_set_vt_switch(int do_switch)
{
}
18
#endif
19

20
#ifdef CONFIG_VT_CONSOLE_SLEEP
21
extern void pm_prepare_console(void);
22 23
extern void pm_restore_console(void);
#else
24
static inline void pm_prepare_console(void)
25 26 27 28 29 30
{
}

static inline void pm_restore_console(void)
{
}
31 32 33 34 35
#endif

typedef int __bitwise suspend_state_t;

#define PM_SUSPEND_ON		((__force suspend_state_t) 0)
36
#define PM_SUSPEND_TO_IDLE	((__force suspend_state_t) 1)
37
#define PM_SUSPEND_STANDBY	((__force suspend_state_t) 2)
38
#define PM_SUSPEND_MEM		((__force suspend_state_t) 3)
39
#define PM_SUSPEND_MIN		PM_SUSPEND_TO_IDLE
40 41
#define PM_SUSPEND_MAX		((__force suspend_state_t) 4)

42 43 44 45
enum suspend_stat_step {
	SUSPEND_FREEZE = 1,
	SUSPEND_PREPARE,
	SUSPEND_SUSPEND,
46
	SUSPEND_SUSPEND_LATE,
47 48
	SUSPEND_SUSPEND_NOIRQ,
	SUSPEND_RESUME_NOIRQ,
49
	SUSPEND_RESUME_EARLY,
50 51 52 53 54 55 56 57 58
	SUSPEND_RESUME
};

struct suspend_stats {
	int	success;
	int	fail;
	int	failed_freeze;
	int	failed_prepare;
	int	failed_suspend;
59
	int	failed_suspend_late;
60 61
	int	failed_suspend_noirq;
	int	failed_resume;
62
	int	failed_resume_early;
63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
	int	failed_resume_noirq;
#define	REC_FAILED_NUM	2
	int	last_failed_dev;
	char	failed_devs[REC_FAILED_NUM][40];
	int	last_failed_errno;
	int	errno[REC_FAILED_NUM];
	int	last_failed_step;
	enum suspend_stat_step	failed_steps[REC_FAILED_NUM];
};

extern struct suspend_stats suspend_stats;

static inline void dpm_save_failed_dev(const char *name)
{
	strlcpy(suspend_stats.failed_devs[suspend_stats.last_failed_dev],
		name,
		sizeof(suspend_stats.failed_devs[0]));
	suspend_stats.last_failed_dev++;
	suspend_stats.last_failed_dev %= REC_FAILED_NUM;
}

static inline void dpm_save_failed_errno(int err)
{
	suspend_stats.errno[suspend_stats.last_failed_errno] = err;
	suspend_stats.last_failed_errno++;
	suspend_stats.last_failed_errno %= REC_FAILED_NUM;
}

static inline void dpm_save_failed_step(enum suspend_stat_step step)
{
	suspend_stats.failed_steps[suspend_stats.last_failed_step] = step;
	suspend_stats.last_failed_step++;
	suspend_stats.last_failed_step %= REC_FAILED_NUM;
}

98
/**
99 100
 * struct platform_suspend_ops - Callbacks for managing platform dependent
 *	system sleep states.
101 102 103 104 105 106
 *
 * @valid: Callback to determine if given system sleep state is supported by
 *	the platform.
 *	Valid (ie. supported) states are advertised in /sys/power/state.  Note
 *	that it still may be impossible to enter given system sleep state if the
 *	conditions aren't right.
107 108
 *	There is the %suspend_valid_only_mem function available that can be
 *	assigned to this if the platform only supports mem sleep.
109
 *
110 111 112 113
 * @begin: Initialise a transition to given system sleep state.
 *	@begin() is executed right prior to suspending devices.  The information
 *	conveyed to the platform code by @begin() should be disregarded by it as
 *	soon as @end() is executed.  If @begin() fails (ie. returns nonzero),
114 115
 *	@prepare(), @enter() and @finish() will not be called by the PM core.
 *	This callback is optional.  However, if it is implemented, the argument
116
 *	passed to @enter() is redundant and should be ignored.
117 118
 *
 * @prepare: Prepare the platform for entering the system sleep state indicated
119
 *	by @begin().
120 121
 *	@prepare() is called right after devices have been suspended (ie. the
 *	appropriate .suspend() method has been executed for each device) and
122 123 124
 *	before device drivers' late suspend callbacks are executed.  It returns
 *	0 on success or a negative error code otherwise, in which case the
 *	system cannot enter the desired sleep state (@prepare_late(), @enter(),
125
 *	and @wake() will not be called in that case).
126 127 128 129 130 131
 *
 * @prepare_late: Finish preparing the platform for entering the system sleep
 *	state indicated by @begin().
 *	@prepare_late is called before disabling nonboot CPUs and after
 *	device drivers' late suspend callbacks have been executed.  It returns
 *	0 on success or a negative error code otherwise, in which case the
132 133
 *	system cannot enter the desired sleep state (@enter() will not be
 *	executed).
134
 *
135 136
 * @enter: Enter the system sleep state indicated by @begin() or represented by
 *	the argument if @begin() is not implemented.
137 138 139 140
 *	This callback is mandatory.  It returns 0 on success or a negative
 *	error code otherwise, in which case the system cannot enter the desired
 *	sleep state.
 *
141 142 143 144 145
 * @wake: Called when the system has just left a sleep state, right after
 *	the nonboot CPUs have been enabled and before device drivers' early
 *	resume callbacks are executed.
 *	This callback is optional, but should be implemented by the platforms
 *	that implement @prepare_late().  If implemented, it is always called
146
 *	after @prepare_late and @enter(), even if one of them fails.
147 148 149 150
 *
 * @finish: Finish wake-up of the platform.
 *	@finish is called right prior to calling device drivers' regular suspend
 *	callbacks.
151 152
 *	This callback is optional, but should be implemented by the platforms
 *	that implement @prepare().  If implemented, it is always called after
153 154
 *	@enter() and @wake(), even if any of them fails.  It is executed after
 *	a failing @prepare.
155
 *
156 157 158 159 160 161 162
 * @suspend_again: Returns whether the system should suspend again (true) or
 *	not (false). If the platform wants to poll sensors or execute some
 *	code during suspended without invoking userspace and most of devices,
 *	suspend_again callback is the place assuming that periodic-wakeup or
 *	alarm-wakeup is already setup. This allows to execute some codes while
 *	being kept suspended in the view of userland and devices.
 *
163 164 165 166
 * @end: Called by the PM core right after resuming devices, to indicate to
 *	the platform that the system has returned to the working state or
 *	the transition to the sleep state has been aborted.
 *	This callback is optional, but should be implemented by the platforms
167 168
 *	that implement @begin().  Accordingly, platforms implementing @begin()
 *	should also provide a @end() which cleans up transitions aborted before
169
 *	@enter().
170 171 172 173 174
 *
 * @recover: Recover the platform from a suspend failure.
 *	Called by the PM core if the suspending of devices fails.
 *	This callback is optional and should only be implemented by platforms
 *	which require special recovery actions in that situation.
175
 */
176
struct platform_suspend_ops {
177
	int (*valid)(suspend_state_t state);
178
	int (*begin)(suspend_state_t state);
179
	int (*prepare)(void);
180
	int (*prepare_late)(void);
181
	int (*enter)(suspend_state_t state);
182
	void (*wake)(void);
183
	void (*finish)(void);
184
	bool (*suspend_again)(void);
185
	void (*end)(void);
186
	void (*recover)(void);
187 188
};

189 190
struct platform_freeze_ops {
	int (*begin)(void);
191
	int (*prepare)(void);
192 193
	void (*wake)(void);
	void (*sync)(void);
194
	void (*restore)(void);
195 196 197
	void (*end)(void);
};

198
#ifdef CONFIG_SUSPEND
199 200 201
extern suspend_state_t mem_sleep_current;
extern suspend_state_t mem_sleep_default;

202
/**
203 204
 * suspend_set_ops - set platform dependent suspend operations
 * @ops: The new suspend operations to set.
205
 */
206
extern void suspend_set_ops(const struct platform_suspend_ops *ops);
207
extern int suspend_valid_only_mem(suspend_state_t state);
208

209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238
extern unsigned int pm_suspend_global_flags;

#define PM_SUSPEND_FLAG_FW_SUSPEND	(1 << 0)
#define PM_SUSPEND_FLAG_FW_RESUME	(1 << 1)

static inline void pm_suspend_clear_flags(void)
{
	pm_suspend_global_flags = 0;
}

static inline void pm_set_suspend_via_firmware(void)
{
	pm_suspend_global_flags |= PM_SUSPEND_FLAG_FW_SUSPEND;
}

static inline void pm_set_resume_via_firmware(void)
{
	pm_suspend_global_flags |= PM_SUSPEND_FLAG_FW_RESUME;
}

static inline bool pm_suspend_via_firmware(void)
{
	return !!(pm_suspend_global_flags & PM_SUSPEND_FLAG_FW_SUSPEND);
}

static inline bool pm_resume_via_firmware(void)
{
	return !!(pm_suspend_global_flags & PM_SUSPEND_FLAG_FW_RESUME);
}

239 240 241 242 243 244 245 246 247 248 249 250 251 252
/* Suspend-to-idle state machnine. */
enum freeze_state {
	FREEZE_STATE_NONE,      /* Not suspended/suspending. */
	FREEZE_STATE_ENTER,     /* Enter suspend-to-idle. */
	FREEZE_STATE_WAKE,      /* Wake up from suspend-to-idle. */
};

extern enum freeze_state __read_mostly suspend_freeze_state;

static inline bool idle_should_freeze(void)
{
	return unlikely(suspend_freeze_state == FREEZE_STATE_ENTER);
}

253
extern void __init pm_states_init(void);
254
extern void freeze_set_ops(const struct platform_freeze_ops *ops);
255
extern void freeze_wake(void);
256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278

/**
 * arch_suspend_disable_irqs - disable IRQs for suspend
 *
 * Disables IRQs (in the default case). This is a weak symbol in the common
 * code and thus allows architectures to override it if more needs to be
 * done. Not called for suspend to disk.
 */
extern void arch_suspend_disable_irqs(void);

/**
 * arch_suspend_enable_irqs - enable IRQs after suspend
 *
 * Enables IRQs (in the default case). This is a weak symbol in the common
 * code and thus allows architectures to override it if more needs to be
 * done. Not called for suspend to disk.
 */
extern void arch_suspend_enable_irqs(void);

extern int pm_suspend(suspend_state_t state);
#else /* !CONFIG_SUSPEND */
#define suspend_valid_only_mem	NULL

279 280 281 282 283 284
static inline void pm_suspend_clear_flags(void) {}
static inline void pm_set_suspend_via_firmware(void) {}
static inline void pm_set_resume_via_firmware(void) {}
static inline bool pm_suspend_via_firmware(void) { return false; }
static inline bool pm_resume_via_firmware(void) { return false; }

285
static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
286
static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
287
static inline bool idle_should_freeze(void) { return false; }
288
static inline void __init pm_states_init(void) {}
289
static inline void freeze_set_ops(const struct platform_freeze_ops *ops) {}
290
static inline void freeze_wake(void) {}
291
#endif /* !CONFIG_SUSPEND */
L
Linus Torvalds 已提交
292

293 294 295 296
/* struct pbe is used for creating lists of pages that should be restored
 * atomically during the resume from disk, because the page frames they have
 * occupied before the suspend are in use.
 */
297
struct pbe {
298 299
	void *address;		/* address of the copy */
	void *orig_address;	/* original address of a page */
300
	struct pbe *next;
301
};
L
Linus Torvalds 已提交
302 303 304 305

/* mm/page_alloc.c */
extern void mark_free_pages(struct zone *zone);

306
/**
307
 * struct platform_hibernation_ops - hibernation platform support
308
 *
309 310
 * The methods in this structure allow a platform to carry out special
 * operations required by it during a hibernation transition.
311
 *
312
 * All the methods below, except for @recover(), must be implemented.
313
 *
314
 * @begin: Tell the platform driver that we're starting hibernation.
315 316
 *	Called right after shrinking memory and before freezing devices.
 *
317 318 319
 * @end: Called by the PM core right after resuming devices, to indicate to
 *	the platform that the system has returned to the working state.
 *
320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338
 * @pre_snapshot: Prepare the platform for creating the hibernation image.
 *	Called right after devices have been frozen and before the nonboot
 *	CPUs are disabled (runs with IRQs on).
 *
 * @finish: Restore the previous state of the platform after the hibernation
 *	image has been created *or* put the platform into the normal operation
 *	mode after the hibernation (the same method is executed in both cases).
 *	Called right after the nonboot CPUs have been enabled and before
 *	thawing devices (runs with IRQs on).
 *
 * @prepare: Prepare the platform for entering the low power state.
 *	Called right after the hibernation image has been saved and before
 *	devices are prepared for entering the low power state.
 *
 * @enter: Put the system into the low power state after the hibernation image
 *	has been saved to disk.
 *	Called after the nonboot CPUs have been disabled and all of the low
 *	level devices have been shut down (runs with IRQs off).
 *
339 340 341 342 343 344
 * @leave: Perform the first stage of the cleanup after the system sleep state
 *	indicated by @set_target() has been left.
 *	Called right after the control has been passed from the boot kernel to
 *	the image kernel, before the nonboot CPUs are enabled and before devices
 *	are resumed.  Executed with interrupts disabled.
 *
345 346 347 348 349 350 351
 * @pre_restore: Prepare system for the restoration from a hibernation image.
 *	Called right after devices have been frozen and before the nonboot
 *	CPUs are disabled (runs with IRQs on).
 *
 * @restore_cleanup: Clean up after a failing image restoration.
 *	Called right after the nonboot CPUs have been enabled and before
 *	thawing devices (runs with IRQs on).
352 353 354 355 356
 *
 * @recover: Recover the platform from a failure to suspend devices.
 *	Called by the PM core if the suspending of devices during hibernation
 *	fails.  This callback is optional and should only be implemented by
 *	platforms which require special recovery actions in that situation.
357
 */
358
struct platform_hibernation_ops {
359 360
	int (*begin)(void);
	void (*end)(void);
361 362
	int (*pre_snapshot)(void);
	void (*finish)(void);
363 364
	int (*prepare)(void);
	int (*enter)(void);
365
	void (*leave)(void);
366 367
	int (*pre_restore)(void);
	void (*restore_cleanup)(void);
368
	void (*recover)(void);
369 370
};

371
#ifdef CONFIG_HIBERNATION
372
/* kernel/power/snapshot.c */
373
extern void __register_nosave_region(unsigned long b, unsigned long e, int km);
374
static inline void __init register_nosave_region(unsigned long b, unsigned long e)
375 376 377
{
	__register_nosave_region(b, e, 0);
}
378
static inline void __init register_nosave_region_late(unsigned long b, unsigned long e)
379 380 381
{
	__register_nosave_region(b, e, 1);
}
382 383 384 385
extern int swsusp_page_is_forbidden(struct page *);
extern void swsusp_set_page_free(struct page *);
extern void swsusp_unset_page_free(struct page *);
extern unsigned long get_safe_page(gfp_t gfp_mask);
386

387
extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
388
extern int hibernate(void);
389
extern bool system_entering_hibernation(void);
390
extern bool hibernation_available(void);
391 392
asmlinkage int swsusp_save(void);
extern struct pbe *restore_pblist;
393
#else /* CONFIG_HIBERNATION */
394 395
static inline void register_nosave_region(unsigned long b, unsigned long e) {}
static inline void register_nosave_region_late(unsigned long b, unsigned long e) {}
396 397 398
static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
static inline void swsusp_set_page_free(struct page *p) {}
static inline void swsusp_unset_page_free(struct page *p) {}
399

400
static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
401
static inline int hibernate(void) { return -ENOSYS; }
402
static inline bool system_entering_hibernation(void) { return false; }
403
static inline bool hibernation_available(void) { return false; }
404 405
#endif /* CONFIG_HIBERNATION */

406 407 408 409 410 411 412 413
/* Hibernation and suspend events */
#define PM_HIBERNATION_PREPARE	0x0001 /* Going to hibernate */
#define PM_POST_HIBERNATION	0x0002 /* Hibernation finished */
#define PM_SUSPEND_PREPARE	0x0003 /* Going to suspend the system */
#define PM_POST_SUSPEND		0x0004 /* Suspend finished */
#define PM_RESTORE_PREPARE	0x0005 /* Going to restore a saved image */
#define PM_POST_RESTORE		0x0006 /* Restore failed */

414 415
extern struct mutex pm_mutex;

416
#ifdef CONFIG_PM_SLEEP
L
Linus Torvalds 已提交
417 418
void save_processor_state(void);
void restore_processor_state(void);
419

420
/* kernel/power/main.c */
421 422
extern int register_pm_notifier(struct notifier_block *nb);
extern int unregister_pm_notifier(struct notifier_block *nb);
423 424 425 426 427 428

#define pm_notifier(fn, pri) {				\
	static struct notifier_block fn##_nb =			\
		{ .notifier_call = fn, .priority = pri };	\
	register_pm_notifier(&fn##_nb);			\
}
429 430 431

/* drivers/base/power/wakeup.c */
extern bool events_check_enabled;
432
extern unsigned int pm_wakeup_irq;
433
extern suspend_state_t pm_suspend_target_state;
434

435
extern bool pm_wakeup_pending(void);
436
extern void pm_system_wakeup(void);
437 438
extern void pm_system_cancel_wakeup(void);
extern void pm_wakeup_clear(bool reset);
439
extern void pm_system_irq_wakeup(unsigned int irq_number);
440
extern bool pm_get_wakeup_count(unsigned int *count, bool block);
441
extern bool pm_save_wakeup_count(unsigned int count);
442
extern void pm_wakep_autosleep_enabled(bool set);
443
extern void pm_print_active_wakeup_sources(void);
444 445 446

static inline void lock_system_sleep(void)
{
447
	current->flags |= PF_FREEZER_SKIP;
448 449 450 451 452
	mutex_lock(&pm_mutex);
}

static inline void unlock_system_sleep(void)
{
453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468
	/*
	 * Don't use freezer_count() because we don't want the call to
	 * try_to_freeze() here.
	 *
	 * Reason:
	 * Fundamentally, we just don't need it, because freezing condition
	 * doesn't come into effect until we release the pm_mutex lock,
	 * since the freezer always works with pm_mutex held.
	 *
	 * More importantly, in the case of hibernation,
	 * unlock_system_sleep() gets called in snapshot_read() and
	 * snapshot_write() when the freezing condition is still in effect.
	 * Which means, if we use try_to_freeze() here, it would make them
	 * enter the refrigerator, thus causing hibernation to lockup.
	 */
	current->flags &= ~PF_FREEZER_SKIP;
469 470 471
	mutex_unlock(&pm_mutex);
}

472
#else /* !CONFIG_PM_SLEEP */
473 474 475 476 477 478 479 480 481 482 483 484

static inline int register_pm_notifier(struct notifier_block *nb)
{
	return 0;
}

static inline int unregister_pm_notifier(struct notifier_block *nb)
{
	return 0;
}

#define pm_notifier(fn, pri)	do { (void)(fn); } while (0)
485

486
static inline bool pm_wakeup_pending(void) { return false; }
487
static inline void pm_system_wakeup(void) {}
488
static inline void pm_wakeup_clear(bool reset) {}
489
static inline void pm_system_irq_wakeup(unsigned int irq_number) {}
490 491 492 493

static inline void lock_system_sleep(void) {}
static inline void unlock_system_sleep(void) {}

494
#endif /* !CONFIG_PM_SLEEP */
495

496 497
#ifdef CONFIG_PM_SLEEP_DEBUG
extern bool pm_print_times_enabled;
498
extern __printf(2, 3) void __pm_pr_dbg(bool defer, const char *fmt, ...);
499 500
#else
#define pm_print_times_enabled	(false)
501 502 503

#include <linux/printk.h>

504
#define __pm_pr_dbg(defer, fmt, ...) \
505
	no_printk(KERN_DEBUG fmt, ##__VA_ARGS__)
506 507
#endif

508 509 510 511 512 513
#define pm_pr_dbg(fmt, ...) \
	__pm_pr_dbg(false, fmt, ##__VA_ARGS__)

#define pm_deferred_pr_dbg(fmt, ...) \
	__pm_pr_dbg(true, fmt, ##__VA_ARGS__)

514 515 516 517 518 519 520 521 522 523 524
#ifdef CONFIG_PM_AUTOSLEEP

/* kernel/power/autosleep.c */
void queue_up_suspend_work(void);

#else /* !CONFIG_PM_AUTOSLEEP */

static inline void queue_up_suspend_work(void) {}

#endif /* !CONFIG_PM_AUTOSLEEP */

525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558
#ifdef CONFIG_ARCH_SAVE_PAGE_KEYS
/*
 * The ARCH_SAVE_PAGE_KEYS functions can be used by an architecture
 * to save/restore additional information to/from the array of page
 * frame numbers in the hibernation image. For s390 this is used to
 * save and restore the storage key for each page that is included
 * in the hibernation image.
 */
unsigned long page_key_additional_pages(unsigned long pages);
int page_key_alloc(unsigned long pages);
void page_key_free(void);
void page_key_read(unsigned long *pfn);
void page_key_memorize(unsigned long *pfn);
void page_key_write(void *address);

#else /* !CONFIG_ARCH_SAVE_PAGE_KEYS */

static inline unsigned long page_key_additional_pages(unsigned long pages)
{
	return 0;
}

static inline int  page_key_alloc(unsigned long pages)
{
	return 0;
}

static inline void page_key_free(void) {}
static inline void page_key_read(unsigned long *pfn) {}
static inline void page_key_memorize(unsigned long *pfn) {}
static inline void page_key_write(void *address) {}

#endif /* !CONFIG_ARCH_SAVE_PAGE_KEYS */

559
#endif /* _LINUX_SUSPEND_H */