main.c 43.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/*
 * drivers/base/power/main.c - Where the driver meets power management.
 *
 * Copyright (c) 2003 Patrick Mochel
 * Copyright (c) 2003 Open Source Development Lab
 *
 * This file is released under the GPLv2
 *
 *
 * The driver model core calls device_pm_add() when a device is registered.
11
 * This will initialize the embedded device_pm_info object in the device
L
Linus Torvalds 已提交
12 13 14
 * and add it to the list of power-controlled devices. sysfs entries for
 * controlling device power management will also be added.
 *
15 16 17
 * A separate list is used for keeping track of power info, because the power
 * domain dependencies may differ from the ancestral dependencies that the
 * subsystem list maintains.
L
Linus Torvalds 已提交
18 19 20
 */

#include <linux/device.h>
21
#include <linux/kallsyms.h>
22
#include <linux/export.h>
23
#include <linux/mutex.h>
24
#include <linux/pm.h>
25
#include <linux/pm_runtime.h>
26
#include <linux/pm-trace.h>
27
#include <linux/pm_wakeirq.h>
28
#include <linux/interrupt.h>
29
#include <linux/sched.h>
30
#include <linux/async.h>
31
#include <linux/suspend.h>
32
#include <trace/events/power.h>
33
#include <linux/cpufreq.h>
34
#include <linux/cpuidle.h>
35 36
#include <linux/timer.h>

37
#include "../base.h"
L
Linus Torvalds 已提交
38 39
#include "power.h"

40 41
typedef int (*pm_callback_t)(struct device *);

42
/*
43
 * The entries in the dpm_list list are in a depth first order, simply
44 45 46
 * because children are guaranteed to be discovered after parents, and
 * are inserted at the back of the list on discovery.
 *
47 48
 * Since device_pm_add() may be called with a device lock held,
 * we must never try to acquire a device lock while holding
49 50 51
 * dpm_list_mutex.
 */

52
LIST_HEAD(dpm_list);
53 54 55 56
static LIST_HEAD(dpm_prepared_list);
static LIST_HEAD(dpm_suspended_list);
static LIST_HEAD(dpm_late_early_list);
static LIST_HEAD(dpm_noirq_list);
L
Linus Torvalds 已提交
57

58
struct suspend_stats suspend_stats;
59
static DEFINE_MUTEX(dpm_list_mtx);
60
static pm_message_t pm_transition;
L
Linus Torvalds 已提交
61

62 63
static int async_error;

64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
static char *pm_verb(int event)
{
	switch (event) {
	case PM_EVENT_SUSPEND:
		return "suspend";
	case PM_EVENT_RESUME:
		return "resume";
	case PM_EVENT_FREEZE:
		return "freeze";
	case PM_EVENT_QUIESCE:
		return "quiesce";
	case PM_EVENT_HIBERNATE:
		return "hibernate";
	case PM_EVENT_THAW:
		return "thaw";
	case PM_EVENT_RESTORE:
		return "restore";
	case PM_EVENT_RECOVER:
		return "recover";
	default:
		return "(unknown PM event)";
	}
}

88
/**
89
 * device_pm_sleep_init - Initialize system suspend-related device fields.
90 91
 * @dev: Device object being initialized.
 */
92
void device_pm_sleep_init(struct device *dev)
93
{
94
	dev->power.is_prepared = false;
95
	dev->power.is_suspended = false;
96 97
	dev->power.is_noirq_suspended = false;
	dev->power.is_late_suspended = false;
98
	init_completion(&dev->power.completion);
99
	complete_all(&dev->power.completion);
100
	dev->power.wakeup = NULL;
101
	INIT_LIST_HEAD(&dev->power.entry);
102 103
}

104
/**
105
 * device_pm_lock - Lock the list of active devices used by the PM core.
106 107 108 109 110 111 112
 */
void device_pm_lock(void)
{
	mutex_lock(&dpm_list_mtx);
}

/**
113
 * device_pm_unlock - Unlock the list of active devices used by the PM core.
114 115 116 117 118
 */
void device_pm_unlock(void)
{
	mutex_unlock(&dpm_list_mtx);
}
119

120
/**
121 122
 * device_pm_add - Add a device to the PM core's list of active devices.
 * @dev: Device to add to the list.
123
 */
124
void device_pm_add(struct device *dev)
L
Linus Torvalds 已提交
125 126
{
	pr_debug("PM: Adding info for %s:%s\n",
127
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
128
	device_pm_check_callbacks(dev);
129
	mutex_lock(&dpm_list_mtx);
130
	if (dev->parent && dev->parent->power.is_prepared)
131 132
		dev_warn(dev, "parent %s should not be sleeping\n",
			dev_name(dev->parent));
133
	list_add_tail(&dev->power.entry, &dpm_list);
134
	mutex_unlock(&dpm_list_mtx);
L
Linus Torvalds 已提交
135 136
}

137
/**
138 139
 * device_pm_remove - Remove a device from the PM core's list of active devices.
 * @dev: Device to be removed from the list.
140
 */
141
void device_pm_remove(struct device *dev)
L
Linus Torvalds 已提交
142 143
{
	pr_debug("PM: Removing info for %s:%s\n",
144
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
145
	complete_all(&dev->power.completion);
146
	mutex_lock(&dpm_list_mtx);
L
Linus Torvalds 已提交
147
	list_del_init(&dev->power.entry);
148
	mutex_unlock(&dpm_list_mtx);
149
	device_wakeup_disable(dev);
150
	pm_runtime_remove(dev);
151
	device_pm_check_callbacks(dev);
152 153
}

154
/**
155 156 157
 * device_pm_move_before - Move device in the PM core's list of active devices.
 * @deva: Device to move in dpm_list.
 * @devb: Device @deva should come before.
158 159 160 161
 */
void device_pm_move_before(struct device *deva, struct device *devb)
{
	pr_debug("PM: Moving %s:%s before %s:%s\n",
162 163
		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
164 165 166 167 168
	/* Delete deva from dpm_list and reinsert before devb. */
	list_move_tail(&deva->power.entry, &devb->power.entry);
}

/**
169 170 171
 * device_pm_move_after - Move device in the PM core's list of active devices.
 * @deva: Device to move in dpm_list.
 * @devb: Device @deva should come after.
172 173 174 175
 */
void device_pm_move_after(struct device *deva, struct device *devb)
{
	pr_debug("PM: Moving %s:%s after %s:%s\n",
176 177
		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
178 179 180 181 182
	/* Delete deva from dpm_list and reinsert after devb. */
	list_move(&deva->power.entry, &devb->power.entry);
}

/**
183 184
 * device_pm_move_last - Move device to end of the PM core's list of devices.
 * @dev: Device to move in dpm_list.
185 186 187 188
 */
void device_pm_move_last(struct device *dev)
{
	pr_debug("PM: Moving %s:%s to end of list\n",
189
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
190 191 192
	list_move_tail(&dev->power.entry, &dpm_list);
}

193 194 195 196
static ktime_t initcall_debug_start(struct device *dev)
{
	ktime_t calltime = ktime_set(0, 0);

197
	if (pm_print_times_enabled) {
198 199 200
		pr_info("calling  %s+ @ %i, parent: %s\n",
			dev_name(dev), task_pid_nr(current),
			dev->parent ? dev_name(dev->parent) : "none");
201 202 203 204 205 206 207
		calltime = ktime_get();
	}

	return calltime;
}

static void initcall_debug_report(struct device *dev, ktime_t calltime,
208
				  int error, pm_message_t state, char *info)
209
{
210 211 212 213 214
	ktime_t rettime;
	s64 nsecs;

	rettime = ktime_get();
	nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
215

216
	if (pm_print_times_enabled) {
217
		pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
218
			error, (unsigned long long)nsecs >> 10);
219 220 221
	}
}

222 223 224 225 226 227 228 229 230 231
/**
 * dpm_wait - Wait for a PM operation to complete.
 * @dev: Device to wait for.
 * @async: If unset, wait only if the device's power.async_suspend flag is set.
 */
static void dpm_wait(struct device *dev, bool async)
{
	if (!dev)
		return;

232
	if (async || (pm_async_enabled && dev->power.async_suspend))
233 234 235 236 237 238 239 240 241 242 243 244 245 246
		wait_for_completion(&dev->power.completion);
}

static int dpm_wait_fn(struct device *dev, void *async_ptr)
{
	dpm_wait(dev, *((bool *)async_ptr));
	return 0;
}

static void dpm_wait_for_children(struct device *dev, bool async)
{
       device_for_each_child(dev, &async, dpm_wait_fn);
}

247
/**
248
 * pm_op - Return the PM operation appropriate for given PM event.
249 250
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
251
 */
252
static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
253 254 255 256
{
	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
257
		return ops->suspend;
258
	case PM_EVENT_RESUME:
259
		return ops->resume;
260
#endif /* CONFIG_SUSPEND */
261
#ifdef CONFIG_HIBERNATE_CALLBACKS
262 263
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
264
		return ops->freeze;
265
	case PM_EVENT_HIBERNATE:
266
		return ops->poweroff;
267 268
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
269
		return ops->thaw;
270 271
		break;
	case PM_EVENT_RESTORE:
272
		return ops->restore;
273
#endif /* CONFIG_HIBERNATE_CALLBACKS */
274
	}
275

276
	return NULL;
277 278
}

279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312
/**
 * pm_late_early_op - Return the PM operation appropriate for given PM event.
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
 *
 * Runtime PM is disabled for @dev while this function is being executed.
 */
static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
				      pm_message_t state)
{
	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
		return ops->suspend_late;
	case PM_EVENT_RESUME:
		return ops->resume_early;
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATE_CALLBACKS
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		return ops->freeze_late;
	case PM_EVENT_HIBERNATE:
		return ops->poweroff_late;
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
		return ops->thaw_early;
	case PM_EVENT_RESTORE:
		return ops->restore_early;
#endif /* CONFIG_HIBERNATE_CALLBACKS */
	}

	return NULL;
}

313
/**
314
 * pm_noirq_op - Return the PM operation appropriate for given PM event.
315 316
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
317
 *
318 319
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
320
 */
321
static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
322 323 324 325
{
	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
326
		return ops->suspend_noirq;
327
	case PM_EVENT_RESUME:
328
		return ops->resume_noirq;
329
#endif /* CONFIG_SUSPEND */
330
#ifdef CONFIG_HIBERNATE_CALLBACKS
331 332
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
333
		return ops->freeze_noirq;
334
	case PM_EVENT_HIBERNATE:
335
		return ops->poweroff_noirq;
336 337
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
338
		return ops->thaw_noirq;
339
	case PM_EVENT_RESTORE:
340
		return ops->restore_noirq;
341
#endif /* CONFIG_HIBERNATE_CALLBACKS */
342
	}
343

344
	return NULL;
345 346 347 348 349 350 351 352 353 354 355 356 357
}

static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
{
	dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
		", may wakeup" : "");
}

static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
			int error)
{
	printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
358
		dev_name(dev), pm_verb(state.event), info, error);
359 360
}

361 362 363
static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
{
	ktime_t calltime;
364
	u64 usecs64;
365 366 367 368 369 370 371 372 373 374 375 376 377
	int usecs;

	calltime = ktime_get();
	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
	do_div(usecs64, NSEC_PER_USEC);
	usecs = usecs64;
	if (usecs == 0)
		usecs = 1;
	pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
		info ?: "", info ? " " : "", pm_verb(state.event),
		usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
}

378 379 380 381 382 383 384 385 386 387 388 389
static int dpm_run_callback(pm_callback_t cb, struct device *dev,
			    pm_message_t state, char *info)
{
	ktime_t calltime;
	int error;

	if (!cb)
		return 0;

	calltime = initcall_debug_start(dev);

	pm_dev_dbg(dev, state, info);
390
	trace_device_pm_callback_start(dev, info, state.event);
391
	error = cb(dev);
392
	trace_device_pm_callback_end(dev, error);
393 394
	suspend_report_result(cb, error);

395
	initcall_debug_report(dev, calltime, error, state, info);
396 397 398 399

	return error;
}

400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464
#ifdef CONFIG_DPM_WATCHDOG
struct dpm_watchdog {
	struct device		*dev;
	struct task_struct	*tsk;
	struct timer_list	timer;
};

#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
	struct dpm_watchdog wd

/**
 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
 * @data: Watchdog object address.
 *
 * Called when a driver has timed out suspending or resuming.
 * There's not much we can do here to recover so panic() to
 * capture a crash-dump in pstore.
 */
static void dpm_watchdog_handler(unsigned long data)
{
	struct dpm_watchdog *wd = (void *)data;

	dev_emerg(wd->dev, "**** DPM device timeout ****\n");
	show_stack(wd->tsk, NULL);
	panic("%s %s: unrecoverable failure\n",
		dev_driver_string(wd->dev), dev_name(wd->dev));
}

/**
 * dpm_watchdog_set - Enable pm watchdog for given device.
 * @wd: Watchdog. Must be allocated on the stack.
 * @dev: Device to handle.
 */
static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
{
	struct timer_list *timer = &wd->timer;

	wd->dev = dev;
	wd->tsk = current;

	init_timer_on_stack(timer);
	/* use same timeout value for both suspend and resume */
	timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
	timer->function = dpm_watchdog_handler;
	timer->data = (unsigned long)wd;
	add_timer(timer);
}

/**
 * dpm_watchdog_clear - Disable suspend/resume watchdog.
 * @wd: Watchdog to disable.
 */
static void dpm_watchdog_clear(struct dpm_watchdog *wd)
{
	struct timer_list *timer = &wd->timer;

	del_timer_sync(timer);
	destroy_timer_on_stack(timer);
}
#else
#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
#define dpm_watchdog_set(x, y)
#define dpm_watchdog_clear(x)
#endif

465 466 467
/*------------------------- Resume routines -------------------------*/

/**
468 469 470
 * device_resume_noirq - Execute an "early resume" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
471
 * @async: If true, the device is being resumed asynchronously.
472
 *
473 474
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
475
 */
476
static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
477
{
478 479
	pm_callback_t callback = NULL;
	char *info = NULL;
480 481 482 483 484
	int error = 0;

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);

485
	if (dev->power.syscore || dev->power.direct_complete)
486 487
		goto Out;

488 489 490
	if (!dev->power.is_noirq_suspended)
		goto Out;

491 492
	dpm_wait(dev->parent, async);

493
	if (dev->pm_domain) {
494
		info = "noirq power domain ";
495
		callback = pm_noirq_op(&dev->pm_domain->ops, state);
496
	} else if (dev->type && dev->type->pm) {
497
		info = "noirq type ";
498
		callback = pm_noirq_op(dev->type->pm, state);
499
	} else if (dev->class && dev->class->pm) {
500
		info = "noirq class ";
501
		callback = pm_noirq_op(dev->class->pm, state);
502
	} else if (dev->bus && dev->bus->pm) {
503
		info = "noirq bus ";
504
		callback = pm_noirq_op(dev->bus->pm, state);
505 506
	}

507
	if (!callback && dev->driver && dev->driver->pm) {
508
		info = "noirq driver ";
509 510 511
		callback = pm_noirq_op(dev->driver->pm, state);
	}

512
	error = dpm_run_callback(callback, dev, state, info);
513
	dev->power.is_noirq_suspended = false;
514

515
 Out:
516
	complete_all(&dev->power.completion);
517 518 519 520
	TRACE_RESUME(error);
	return error;
}

521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538
static bool is_async(struct device *dev)
{
	return dev->power.async_suspend && pm_async_enabled
		&& !pm_trace_is_enabled();
}

static void async_resume_noirq(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = device_resume_noirq(dev, pm_transition, true);
	if (error)
		pm_dev_err(dev, pm_transition, " async", error);

	put_device(dev);
}

539
/**
540
 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
541
 * @state: PM transition of the system being carried out.
542
 *
543
 * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
544
 * enable device drivers to receive interrupts.
545
 */
546
void dpm_resume_noirq(pm_message_t state)
547
{
548
	struct device *dev;
549
	ktime_t starttime = ktime_get();
550

551
	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
552
	mutex_lock(&dpm_list_mtx);
553
	pm_transition = state;
554

555 556 557 558 559 560 561 562 563 564 565 566 567 568 569
	/*
	 * Advanced the async threads upfront,
	 * in case the starting of async threads is
	 * delayed by non-async resuming devices.
	 */
	list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
		reinit_completion(&dev->power.completion);
		if (is_async(dev)) {
			get_device(dev);
			async_schedule(async_resume_noirq, dev);
		}
	}

	while (!list_empty(&dpm_noirq_list)) {
		dev = to_device(dpm_noirq_list.next);
570
		get_device(dev);
571
		list_move_tail(&dev->power.entry, &dpm_late_early_list);
572
		mutex_unlock(&dpm_list_mtx);
573

574 575 576 577 578 579 580 581 582 583
		if (!is_async(dev)) {
			int error;

			error = device_resume_noirq(dev, state, false);
			if (error) {
				suspend_stats.failed_resume_noirq++;
				dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
				dpm_save_failed_dev(dev_name(dev));
				pm_dev_err(dev, state, " noirq", error);
			}
584 585 586 587 588 589
		}

		mutex_lock(&dpm_list_mtx);
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
590
	async_synchronize_full();
591 592
	dpm_show_time(starttime, state, "noirq");
	resume_device_irqs();
593
	device_wakeup_disarm_wake_irqs();
594
	cpuidle_resume();
595
	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
596 597 598 599 600 601
}

/**
 * device_resume_early - Execute an "early resume" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
602
 * @async: If true, the device is being resumed asynchronously.
603 604 605
 *
 * Runtime PM is disabled for @dev while this function is being executed.
 */
606
static int device_resume_early(struct device *dev, pm_message_t state, bool async)
607 608 609 610 611 612 613 614
{
	pm_callback_t callback = NULL;
	char *info = NULL;
	int error = 0;

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);

615
	if (dev->power.syscore || dev->power.direct_complete)
616 617
		goto Out;

618 619 620
	if (!dev->power.is_late_suspended)
		goto Out;

621 622
	dpm_wait(dev->parent, async);

623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642
	if (dev->pm_domain) {
		info = "early power domain ";
		callback = pm_late_early_op(&dev->pm_domain->ops, state);
	} else if (dev->type && dev->type->pm) {
		info = "early type ";
		callback = pm_late_early_op(dev->type->pm, state);
	} else if (dev->class && dev->class->pm) {
		info = "early class ";
		callback = pm_late_early_op(dev->class->pm, state);
	} else if (dev->bus && dev->bus->pm) {
		info = "early bus ";
		callback = pm_late_early_op(dev->bus->pm, state);
	}

	if (!callback && dev->driver && dev->driver->pm) {
		info = "early driver ";
		callback = pm_late_early_op(dev->driver->pm, state);
	}

	error = dpm_run_callback(callback, dev, state, info);
643
	dev->power.is_late_suspended = false;
644

645
 Out:
646
	TRACE_RESUME(error);
647 648

	pm_runtime_enable(dev);
649
	complete_all(&dev->power.completion);
650 651 652
	return error;
}

653 654 655 656 657 658 659 660 661 662 663 664
static void async_resume_early(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = device_resume_early(dev, pm_transition, true);
	if (error)
		pm_dev_err(dev, pm_transition, " async", error);

	put_device(dev);
}

665 666 667 668
/**
 * dpm_resume_early - Execute "early resume" callbacks for all devices.
 * @state: PM transition of the system being carried out.
 */
669
void dpm_resume_early(pm_message_t state)
670
{
671
	struct device *dev;
672 673
	ktime_t starttime = ktime_get();

674
	trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
675
	mutex_lock(&dpm_list_mtx);
676 677 678 679 680 681 682 683 684 685 686 687 688 689
	pm_transition = state;

	/*
	 * Advanced the async threads upfront,
	 * in case the starting of async threads is
	 * delayed by non-async resuming devices.
	 */
	list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
		reinit_completion(&dev->power.completion);
		if (is_async(dev)) {
			get_device(dev);
			async_schedule(async_resume_early, dev);
		}
	}
690

691 692
	while (!list_empty(&dpm_late_early_list)) {
		dev = to_device(dpm_late_early_list.next);
693 694 695 696
		get_device(dev);
		list_move_tail(&dev->power.entry, &dpm_suspended_list);
		mutex_unlock(&dpm_list_mtx);

697 698
		if (!is_async(dev)) {
			int error;
699

700 701 702 703 704 705 706 707
			error = device_resume_early(dev, state, false);
			if (error) {
				suspend_stats.failed_resume_early++;
				dpm_save_failed_step(SUSPEND_RESUME_EARLY);
				dpm_save_failed_dev(dev_name(dev));
				pm_dev_err(dev, state, " early", error);
			}
		}
708
		mutex_lock(&dpm_list_mtx);
709 710
		put_device(dev);
	}
711
	mutex_unlock(&dpm_list_mtx);
712
	async_synchronize_full();
713
	dpm_show_time(starttime, state, "early");
714
	trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
715
}
716 717 718 719 720 721 722 723 724 725 726

/**
 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
 * @state: PM transition of the system being carried out.
 */
void dpm_resume_start(pm_message_t state)
{
	dpm_resume_noirq(state);
	dpm_resume_early(state);
}
EXPORT_SYMBOL_GPL(dpm_resume_start);
727 728

/**
729
 * device_resume - Execute "resume" callbacks for given device.
730 731
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
732
 * @async: If true, the device is being resumed asynchronously.
733
 */
734
static int device_resume(struct device *dev, pm_message_t state, bool async)
735
{
736 737
	pm_callback_t callback = NULL;
	char *info = NULL;
738
	int error = 0;
739
	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
740 741 742

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);
743

744 745 746
	if (dev->power.syscore)
		goto Complete;

747 748 749 750 751 752
	if (dev->power.direct_complete) {
		/* Match the pm_runtime_disable() in __device_suspend(). */
		pm_runtime_enable(dev);
		goto Complete;
	}

753
	dpm_wait(dev->parent, async);
754
	dpm_watchdog_set(&wd, dev);
755
	device_lock(dev);
756

757 758 759 760 761
	/*
	 * This is a fib.  But we'll allow new children to be added below
	 * a resumed device, even if the device hasn't been completed yet.
	 */
	dev->power.is_prepared = false;
762

763 764 765
	if (!dev->power.is_suspended)
		goto Unlock;

766
	if (dev->pm_domain) {
767 768
		info = "power domain ";
		callback = pm_op(&dev->pm_domain->ops, state);
769
		goto Driver;
770 771
	}

772
	if (dev->type && dev->type->pm) {
773 774
		info = "type ";
		callback = pm_op(dev->type->pm, state);
775
		goto Driver;
776 777
	}

778 779
	if (dev->class) {
		if (dev->class->pm) {
780 781
			info = "class ";
			callback = pm_op(dev->class->pm, state);
782
			goto Driver;
783
		} else if (dev->class->resume) {
784 785
			info = "legacy class ";
			callback = dev->class->resume;
786
			goto End;
787
		}
788
	}
789 790 791

	if (dev->bus) {
		if (dev->bus->pm) {
792
			info = "bus ";
793
			callback = pm_op(dev->bus->pm, state);
794
		} else if (dev->bus->resume) {
795
			info = "legacy bus ";
796
			callback = dev->bus->resume;
797
			goto End;
798 799 800
		}
	}

801 802 803 804 805 806
 Driver:
	if (!callback && dev->driver && dev->driver->pm) {
		info = "driver ";
		callback = pm_op(dev->driver->pm, state);
	}

807
 End:
808
	error = dpm_run_callback(callback, dev, state, info);
809 810 811
	dev->power.is_suspended = false;

 Unlock:
812
	device_unlock(dev);
813
	dpm_watchdog_clear(&wd);
814 815

 Complete:
816
	complete_all(&dev->power.completion);
817

818
	TRACE_RESUME(error);
819

820 821 822
	return error;
}

823 824 825 826 827
static void async_resume(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

828
	error = device_resume(dev, pm_transition, true);
829 830 831 832 833
	if (error)
		pm_dev_err(dev, pm_transition, " async", error);
	put_device(dev);
}

834
/**
835 836
 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
 * @state: PM transition of the system being carried out.
837
 *
838 839
 * Execute the appropriate "resume" callback for all devices whose status
 * indicates that they are suspended.
840
 */
841
void dpm_resume(pm_message_t state)
842
{
843
	struct device *dev;
844
	ktime_t starttime = ktime_get();
845

846
	trace_suspend_resume(TPS("dpm_resume"), state.event, true);
847 848
	might_sleep();

849
	mutex_lock(&dpm_list_mtx);
850
	pm_transition = state;
851
	async_error = 0;
852

853
	list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
854
		reinit_completion(&dev->power.completion);
855 856 857 858 859 860
		if (is_async(dev)) {
			get_device(dev);
			async_schedule(async_resume, dev);
		}
	}

861 862
	while (!list_empty(&dpm_suspended_list)) {
		dev = to_device(dpm_suspended_list.next);
863
		get_device(dev);
864
		if (!is_async(dev)) {
865 866 867 868
			int error;

			mutex_unlock(&dpm_list_mtx);

869
			error = device_resume(dev, state, false);
870 871 872 873
			if (error) {
				suspend_stats.failed_resume++;
				dpm_save_failed_step(SUSPEND_RESUME);
				dpm_save_failed_dev(dev_name(dev));
874
				pm_dev_err(dev, state, "", error);
875
			}
876 877

			mutex_lock(&dpm_list_mtx);
878 879
		}
		if (!list_empty(&dev->power.entry))
880
			list_move_tail(&dev->power.entry, &dpm_prepared_list);
881 882 883
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
884
	async_synchronize_full();
885
	dpm_show_time(starttime, state, NULL);
886 887

	cpufreq_resume();
888
	trace_suspend_resume(TPS("dpm_resume"), state.event, false);
889 890 891
}

/**
892 893 894
 * device_complete - Complete a PM transition for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
895
 */
896
static void device_complete(struct device *dev, pm_message_t state)
897
{
898 899 900
	void (*callback)(struct device *) = NULL;
	char *info = NULL;

901 902 903
	if (dev->power.syscore)
		return;

904
	device_lock(dev);
905

906
	if (dev->pm_domain) {
907 908
		info = "completing power domain ";
		callback = dev->pm_domain->ops.complete;
909
	} else if (dev->type && dev->type->pm) {
910 911
		info = "completing type ";
		callback = dev->type->pm->complete;
912
	} else if (dev->class && dev->class->pm) {
913 914
		info = "completing class ";
		callback = dev->class->pm->complete;
915
	} else if (dev->bus && dev->bus->pm) {
916 917 918 919 920 921 922 923 924 925 926 927
		info = "completing bus ";
		callback = dev->bus->pm->complete;
	}

	if (!callback && dev->driver && dev->driver->pm) {
		info = "completing driver ";
		callback = dev->driver->pm->complete;
	}

	if (callback) {
		pm_dev_dbg(dev, state, info);
		callback(dev);
928 929
	}

930
	device_unlock(dev);
931

932
	pm_runtime_put(dev);
933 934 935
}

/**
936 937
 * dpm_complete - Complete a PM transition for all non-sysdev devices.
 * @state: PM transition of the system being carried out.
938
 *
939 940
 * Execute the ->complete() callbacks for all devices whose PM status is not
 * DPM_ON (this allows new devices to be registered).
941
 */
942
void dpm_complete(pm_message_t state)
943
{
944 945
	struct list_head list;

946
	trace_suspend_resume(TPS("dpm_complete"), state.event, true);
947 948
	might_sleep();

949
	INIT_LIST_HEAD(&list);
950
	mutex_lock(&dpm_list_mtx);
951 952
	while (!list_empty(&dpm_prepared_list)) {
		struct device *dev = to_device(dpm_prepared_list.prev);
953

954
		get_device(dev);
955
		dev->power.is_prepared = false;
956 957
		list_move(&dev->power.entry, &list);
		mutex_unlock(&dpm_list_mtx);
958

959
		trace_device_pm_callback_start(dev, "", state.event);
960
		device_complete(dev, state);
961
		trace_device_pm_callback_end(dev, 0);
962

963
		mutex_lock(&dpm_list_mtx);
964
		put_device(dev);
965
	}
966
	list_splice(&list, &dpm_list);
967
	mutex_unlock(&dpm_list_mtx);
968 969 970

	/* Allow device probing and trigger re-probing of deferred devices */
	device_unblock_probing();
971
	trace_suspend_resume(TPS("dpm_complete"), state.event, false);
972 973 974
}

/**
975 976
 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
 * @state: PM transition of the system being carried out.
977
 *
978 979
 * Execute "resume" callbacks for all devices and complete the PM transition of
 * the system.
980
 */
981
void dpm_resume_end(pm_message_t state)
982
{
983 984
	dpm_resume(state);
	dpm_complete(state);
985
}
986
EXPORT_SYMBOL_GPL(dpm_resume_end);
987 988 989 990


/*------------------------- Suspend routines -------------------------*/

991
/**
992 993 994 995 996
 * resume_event - Return a "resume" message for given "suspend" sleep state.
 * @sleep_state: PM message representing a sleep state.
 *
 * Return a PM message representing the resume event corresponding to given
 * sleep state.
997 998
 */
static pm_message_t resume_event(pm_message_t sleep_state)
999
{
1000 1001 1002 1003 1004 1005 1006 1007
	switch (sleep_state.event) {
	case PM_EVENT_SUSPEND:
		return PMSG_RESUME;
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		return PMSG_RECOVER;
	case PM_EVENT_HIBERNATE:
		return PMSG_RESTORE;
1008
	}
1009
	return PMSG_ON;
1010 1011 1012
}

/**
1013 1014 1015
 * device_suspend_noirq - Execute a "late suspend" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
1016
 * @async: If true, the device is being suspended asynchronously.
1017
 *
1018 1019
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
1020
 */
1021
static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1022
{
1023 1024
	pm_callback_t callback = NULL;
	char *info = NULL;
1025 1026
	int error = 0;

1027 1028 1029
	TRACE_DEVICE(dev);
	TRACE_SUSPEND(0);

1030 1031 1032 1033 1034 1035 1036
	if (async_error)
		goto Complete;

	if (pm_wakeup_pending()) {
		async_error = -EBUSY;
		goto Complete;
	}
1037

1038
	if (dev->power.syscore || dev->power.direct_complete)
1039 1040 1041
		goto Complete;

	dpm_wait_for_children(dev, async);
1042

1043
	if (dev->pm_domain) {
1044
		info = "noirq power domain ";
1045
		callback = pm_noirq_op(&dev->pm_domain->ops, state);
1046
	} else if (dev->type && dev->type->pm) {
1047
		info = "noirq type ";
1048
		callback = pm_noirq_op(dev->type->pm, state);
1049
	} else if (dev->class && dev->class->pm) {
1050
		info = "noirq class ";
1051
		callback = pm_noirq_op(dev->class->pm, state);
1052
	} else if (dev->bus && dev->bus->pm) {
1053
		info = "noirq bus ";
1054
		callback = pm_noirq_op(dev->bus->pm, state);
1055 1056
	}

1057
	if (!callback && dev->driver && dev->driver->pm) {
1058
		info = "noirq driver ";
1059 1060 1061
		callback = pm_noirq_op(dev->driver->pm, state);
	}

1062 1063 1064
	error = dpm_run_callback(callback, dev, state, info);
	if (!error)
		dev->power.is_noirq_suspended = true;
1065 1066
	else
		async_error = error;
1067

1068 1069
Complete:
	complete_all(&dev->power.completion);
1070
	TRACE_SUSPEND(error);
1071
	return error;
1072 1073
}

1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091
static void async_suspend_noirq(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = __device_suspend_noirq(dev, pm_transition, true);
	if (error) {
		dpm_save_failed_dev(dev_name(dev));
		pm_dev_err(dev, pm_transition, " async", error);
	}

	put_device(dev);
}

static int device_suspend_noirq(struct device *dev)
{
	reinit_completion(&dev->power.completion);

1092
	if (is_async(dev)) {
1093 1094 1095 1096 1097 1098 1099
		get_device(dev);
		async_schedule(async_suspend_noirq, dev);
		return 0;
	}
	return __device_suspend_noirq(dev, pm_transition, false);
}

1100
/**
1101
 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1102
 * @state: PM transition of the system being carried out.
1103
 *
1104 1105
 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
 * handlers for all non-sysdev devices.
1106
 */
1107
int dpm_suspend_noirq(pm_message_t state)
1108
{
1109
	ktime_t starttime = ktime_get();
1110 1111
	int error = 0;

1112
	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1113
	cpuidle_pause();
1114
	device_wakeup_arm_wake_irqs();
1115
	suspend_device_irqs();
1116
	mutex_lock(&dpm_list_mtx);
1117 1118 1119
	pm_transition = state;
	async_error = 0;

1120 1121
	while (!list_empty(&dpm_late_early_list)) {
		struct device *dev = to_device(dpm_late_early_list.prev);
1122 1123 1124 1125

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

1126
		error = device_suspend_noirq(dev);
1127 1128

		mutex_lock(&dpm_list_mtx);
1129
		if (error) {
1130
			pm_dev_err(dev, state, " noirq", error);
1131
			dpm_save_failed_dev(dev_name(dev));
1132
			put_device(dev);
1133 1134
			break;
		}
1135
		if (!list_empty(&dev->power.entry))
1136
			list_move(&dev->power.entry, &dpm_noirq_list);
1137
		put_device(dev);
1138

1139
		if (async_error)
1140
			break;
1141
	}
1142
	mutex_unlock(&dpm_list_mtx);
1143 1144 1145 1146 1147 1148 1149
	async_synchronize_full();
	if (!error)
		error = async_error;

	if (error) {
		suspend_stats.failed_suspend_noirq++;
		dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1150
		dpm_resume_noirq(resume_event(state));
1151
	} else {
1152
		dpm_show_time(starttime, state, "noirq");
1153
	}
1154
	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1155 1156 1157 1158 1159 1160 1161
	return error;
}

/**
 * device_suspend_late - Execute a "late suspend" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
1162
 * @async: If true, the device is being suspended asynchronously.
1163 1164 1165
 *
 * Runtime PM is disabled for @dev while this function is being executed.
 */
1166
static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1167 1168 1169
{
	pm_callback_t callback = NULL;
	char *info = NULL;
1170
	int error = 0;
1171

1172 1173 1174
	TRACE_DEVICE(dev);
	TRACE_SUSPEND(0);

1175 1176
	__pm_runtime_disable(dev, false);

1177 1178 1179 1180 1181 1182 1183 1184
	if (async_error)
		goto Complete;

	if (pm_wakeup_pending()) {
		async_error = -EBUSY;
		goto Complete;
	}

1185
	if (dev->power.syscore || dev->power.direct_complete)
1186 1187 1188
		goto Complete;

	dpm_wait_for_children(dev, async);
1189

1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208
	if (dev->pm_domain) {
		info = "late power domain ";
		callback = pm_late_early_op(&dev->pm_domain->ops, state);
	} else if (dev->type && dev->type->pm) {
		info = "late type ";
		callback = pm_late_early_op(dev->type->pm, state);
	} else if (dev->class && dev->class->pm) {
		info = "late class ";
		callback = pm_late_early_op(dev->class->pm, state);
	} else if (dev->bus && dev->bus->pm) {
		info = "late bus ";
		callback = pm_late_early_op(dev->bus->pm, state);
	}

	if (!callback && dev->driver && dev->driver->pm) {
		info = "late driver ";
		callback = pm_late_early_op(dev->driver->pm, state);
	}

1209 1210 1211
	error = dpm_run_callback(callback, dev, state, info);
	if (!error)
		dev->power.is_late_suspended = true;
1212 1213
	else
		async_error = error;
1214

1215
Complete:
1216
	TRACE_SUSPEND(error);
1217
	complete_all(&dev->power.completion);
1218
	return error;
1219 1220
}

1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237
static void async_suspend_late(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = __device_suspend_late(dev, pm_transition, true);
	if (error) {
		dpm_save_failed_dev(dev_name(dev));
		pm_dev_err(dev, pm_transition, " async", error);
	}
	put_device(dev);
}

static int device_suspend_late(struct device *dev)
{
	reinit_completion(&dev->power.completion);

1238
	if (is_async(dev)) {
1239 1240 1241 1242 1243 1244 1245 1246
		get_device(dev);
		async_schedule(async_suspend_late, dev);
		return 0;
	}

	return __device_suspend_late(dev, pm_transition, false);
}

1247 1248 1249 1250
/**
 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
 * @state: PM transition of the system being carried out.
 */
1251
int dpm_suspend_late(pm_message_t state)
1252 1253 1254 1255
{
	ktime_t starttime = ktime_get();
	int error = 0;

1256
	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1257
	mutex_lock(&dpm_list_mtx);
1258 1259 1260
	pm_transition = state;
	async_error = 0;

1261 1262 1263 1264 1265 1266
	while (!list_empty(&dpm_suspended_list)) {
		struct device *dev = to_device(dpm_suspended_list.prev);

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

1267
		error = device_suspend_late(dev);
1268 1269

		mutex_lock(&dpm_list_mtx);
1270 1271 1272
		if (!list_empty(&dev->power.entry))
			list_move(&dev->power.entry, &dpm_late_early_list);

1273 1274 1275 1276 1277 1278 1279
		if (error) {
			pm_dev_err(dev, state, " late", error);
			dpm_save_failed_dev(dev_name(dev));
			put_device(dev);
			break;
		}
		put_device(dev);
1280

1281
		if (async_error)
1282
			break;
1283 1284
	}
	mutex_unlock(&dpm_list_mtx);
1285
	async_synchronize_full();
1286 1287
	if (!error)
		error = async_error;
1288 1289 1290
	if (error) {
		suspend_stats.failed_suspend_late++;
		dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1291
		dpm_resume_early(resume_event(state));
1292
	} else {
1293
		dpm_show_time(starttime, state, "late");
1294
	}
1295
	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1296 1297
	return error;
}
1298 1299 1300 1301 1302 1303 1304 1305

/**
 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
 * @state: PM transition of the system being carried out.
 */
int dpm_suspend_end(pm_message_t state)
{
	int error = dpm_suspend_late(state);
1306 1307 1308 1309 1310
	if (error)
		return error;

	error = dpm_suspend_noirq(state);
	if (error) {
1311
		dpm_resume_early(resume_event(state));
1312 1313
		return error;
	}
1314

1315
	return 0;
1316 1317
}
EXPORT_SYMBOL_GPL(dpm_suspend_end);
1318

1319 1320
/**
 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
R
Randy Dunlap 已提交
1321 1322 1323
 * @dev: Device to suspend.
 * @state: PM transition of the system being carried out.
 * @cb: Suspend callback to execute.
1324
 * @info: string description of caller.
1325 1326
 */
static int legacy_suspend(struct device *dev, pm_message_t state,
1327 1328
			  int (*cb)(struct device *dev, pm_message_t state),
			  char *info)
1329 1330 1331 1332 1333 1334
{
	int error;
	ktime_t calltime;

	calltime = initcall_debug_start(dev);

1335
	trace_device_pm_callback_start(dev, info, state.event);
1336
	error = cb(dev, state);
1337
	trace_device_pm_callback_end(dev, error);
1338 1339
	suspend_report_result(cb, error);

1340
	initcall_debug_report(dev, calltime, error, state, info);
1341 1342 1343 1344

	return error;
}

1345
/**
1346 1347 1348
 * device_suspend - Execute "suspend" callbacks for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
1349
 * @async: If true, the device is being suspended asynchronously.
1350
 */
1351
static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1352
{
1353 1354
	pm_callback_t callback = NULL;
	char *info = NULL;
1355
	int error = 0;
1356
	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1357

1358 1359 1360
	TRACE_DEVICE(dev);
	TRACE_SUSPEND(0);

1361
	dpm_wait_for_children(dev, async);
1362

1363
	if (async_error)
1364
		goto Complete;
1365

1366 1367 1368 1369 1370 1371
	/*
	 * If a device configured to wake up the system from sleep states
	 * has been suspended at run time and there's a resume request pending
	 * for it, this is equivalent to the device signaling wakeup, so the
	 * system suspend operation should be aborted.
	 */
1372 1373
	if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
		pm_wakeup_event(dev, 0);
1374

1375 1376
	if (pm_wakeup_pending()) {
		async_error = -EBUSY;
1377
		goto Complete;
1378 1379
	}

1380 1381 1382
	if (dev->power.syscore)
		goto Complete;

1383 1384 1385
	if (dev->power.direct_complete) {
		if (pm_runtime_status_suspended(dev)) {
			pm_runtime_disable(dev);
1386
			if (pm_runtime_status_suspended(dev))
1387 1388 1389 1390 1391 1392 1393
				goto Complete;

			pm_runtime_enable(dev);
		}
		dev->power.direct_complete = false;
	}

1394
	dpm_watchdog_set(&wd, dev);
1395 1396
	device_lock(dev);

1397
	if (dev->pm_domain) {
1398 1399 1400
		info = "power domain ";
		callback = pm_op(&dev->pm_domain->ops, state);
		goto Run;
1401 1402
	}

1403
	if (dev->type && dev->type->pm) {
1404 1405 1406
		info = "type ";
		callback = pm_op(dev->type->pm, state);
		goto Run;
1407 1408
	}

1409 1410
	if (dev->class) {
		if (dev->class->pm) {
1411 1412 1413
			info = "class ";
			callback = pm_op(dev->class->pm, state);
			goto Run;
1414 1415
		} else if (dev->class->suspend) {
			pm_dev_dbg(dev, state, "legacy class ");
1416 1417
			error = legacy_suspend(dev, state, dev->class->suspend,
						"legacy class ");
1418
			goto End;
1419
		}
1420 1421
	}

1422 1423
	if (dev->bus) {
		if (dev->bus->pm) {
1424
			info = "bus ";
1425
			callback = pm_op(dev->bus->pm, state);
1426
		} else if (dev->bus->suspend) {
1427
			pm_dev_dbg(dev, state, "legacy bus ");
1428 1429
			error = legacy_suspend(dev, state, dev->bus->suspend,
						"legacy bus ");
1430
			goto End;
1431
		}
1432 1433
	}

1434
 Run:
1435 1436 1437 1438 1439
	if (!callback && dev->driver && dev->driver->pm) {
		info = "driver ";
		callback = pm_op(dev->driver->pm, state);
	}

1440 1441
	error = dpm_run_callback(callback, dev, state, info);

1442
 End:
1443
	if (!error) {
1444 1445
		struct device *parent = dev->parent;

1446
		dev->power.is_suspended = true;
1447 1448 1449 1450 1451 1452 1453 1454 1455 1456
		if (parent) {
			spin_lock_irq(&parent->power.lock);

			dev->parent->power.direct_complete = false;
			if (dev->power.wakeup_path
			    && !dev->parent->power.ignore_children)
				dev->parent->power.wakeup_path = true;

			spin_unlock_irq(&parent->power.lock);
		}
1457
	}
1458

1459
	device_unlock(dev);
1460
	dpm_watchdog_clear(&wd);
1461 1462

 Complete:
1463
	complete_all(&dev->power.completion);
1464
	if (error)
1465 1466
		async_error = error;

1467
	TRACE_SUSPEND(error);
1468 1469 1470
	return error;
}

1471 1472 1473 1474 1475 1476
static void async_suspend(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = __device_suspend(dev, pm_transition, true);
1477 1478
	if (error) {
		dpm_save_failed_dev(dev_name(dev));
1479
		pm_dev_err(dev, pm_transition, " async", error);
1480
	}
1481 1482 1483 1484 1485 1486

	put_device(dev);
}

static int device_suspend(struct device *dev)
{
1487
	reinit_completion(&dev->power.completion);
1488

1489
	if (is_async(dev)) {
1490 1491 1492 1493 1494 1495 1496 1497
		get_device(dev);
		async_schedule(async_suspend, dev);
		return 0;
	}

	return __device_suspend(dev, pm_transition, false);
}

1498
/**
1499 1500
 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
 * @state: PM transition of the system being carried out.
1501
 */
1502
int dpm_suspend(pm_message_t state)
1503
{
1504
	ktime_t starttime = ktime_get();
1505 1506
	int error = 0;

1507
	trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1508 1509
	might_sleep();

1510 1511
	cpufreq_suspend();

1512
	mutex_lock(&dpm_list_mtx);
1513 1514
	pm_transition = state;
	async_error = 0;
1515 1516
	while (!list_empty(&dpm_prepared_list)) {
		struct device *dev = to_device(dpm_prepared_list.prev);
1517

1518
		get_device(dev);
1519
		mutex_unlock(&dpm_list_mtx);
1520

1521
		error = device_suspend(dev);
1522

1523
		mutex_lock(&dpm_list_mtx);
1524
		if (error) {
1525
			pm_dev_err(dev, state, "", error);
1526
			dpm_save_failed_dev(dev_name(dev));
1527
			put_device(dev);
1528 1529
			break;
		}
1530
		if (!list_empty(&dev->power.entry))
1531
			list_move(&dev->power.entry, &dpm_suspended_list);
1532
		put_device(dev);
1533 1534
		if (async_error)
			break;
1535 1536
	}
	mutex_unlock(&dpm_list_mtx);
1537 1538 1539
	async_synchronize_full();
	if (!error)
		error = async_error;
1540 1541 1542 1543
	if (error) {
		suspend_stats.failed_suspend++;
		dpm_save_failed_step(SUSPEND_SUSPEND);
	} else
1544
		dpm_show_time(starttime, state, NULL);
1545
	trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1546 1547 1548 1549
	return error;
}

/**
1550 1551 1552 1553 1554 1555
 * device_prepare - Prepare a device for system power transition.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
 *
 * Execute the ->prepare() callback(s) for given device.  No new children of the
 * device may be registered after this function has returned.
1556
 */
1557
static int device_prepare(struct device *dev, pm_message_t state)
1558
{
1559
	int (*callback)(struct device *) = NULL;
1560
	int ret = 0;
1561

1562 1563 1564
	if (dev->power.syscore)
		return 0;

1565 1566 1567 1568 1569 1570 1571 1572
	/*
	 * If a device's parent goes into runtime suspend at the wrong time,
	 * it won't be possible to resume the device.  To prevent this we
	 * block runtime suspend here, during the prepare phase, and allow
	 * it again during the complete phase.
	 */
	pm_runtime_get_noresume(dev);

1573
	device_lock(dev);
1574

1575 1576
	dev->power.wakeup_path = device_may_wakeup(dev);

1577 1578 1579 1580 1581
	if (dev->power.no_pm_callbacks) {
		ret = 1;	/* Let device go direct_complete */
		goto unlock;
	}

1582
	if (dev->pm_domain)
1583
		callback = dev->pm_domain->ops.prepare;
1584
	else if (dev->type && dev->type->pm)
1585
		callback = dev->type->pm->prepare;
1586
	else if (dev->class && dev->class->pm)
1587
		callback = dev->class->pm->prepare;
1588
	else if (dev->bus && dev->bus->pm)
1589 1590
		callback = dev->bus->pm->prepare;

1591
	if (!callback && dev->driver && dev->driver->pm)
1592 1593
		callback = dev->driver->pm->prepare;

1594
	if (callback)
1595
		ret = callback(dev);
1596

1597
unlock:
1598
	device_unlock(dev);
1599

1600 1601
	if (ret < 0) {
		suspend_report_result(callback, ret);
1602
		pm_runtime_put(dev);
1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615
		return ret;
	}
	/*
	 * A positive return value from ->prepare() means "this device appears
	 * to be runtime-suspended and its state is fine, so if it really is
	 * runtime-suspended, you can leave it in that state provided that you
	 * will do the same thing with all of its descendants".  This only
	 * applies to suspend transitions, however.
	 */
	spin_lock_irq(&dev->power.lock);
	dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND;
	spin_unlock_irq(&dev->power.lock);
	return 0;
1616
}
1617

1618
/**
1619 1620
 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
 * @state: PM transition of the system being carried out.
1621
 *
1622
 * Execute the ->prepare() callback(s) for all devices.
1623
 */
1624
int dpm_prepare(pm_message_t state)
1625 1626 1627
{
	int error = 0;

1628
	trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1629 1630
	might_sleep();

1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644
	/*
	 * Give a chance for the known devices to complete their probes, before
	 * disable probing of devices. This sync point is important at least
	 * at boot time + hibernation restore.
	 */
	wait_for_device_probe();
	/*
	 * It is unsafe if probing of devices will happen during suspend or
	 * hibernation and system behavior will be unpredictable in this case.
	 * So, let's prohibit device's probing here and defer their probes
	 * instead. The normal behavior will be restored in dpm_complete().
	 */
	device_block_probing();

1645 1646 1647 1648 1649 1650 1651
	mutex_lock(&dpm_list_mtx);
	while (!list_empty(&dpm_list)) {
		struct device *dev = to_device(dpm_list.next);

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

1652
		trace_device_pm_callback_start(dev, "", state.event);
1653
		error = device_prepare(dev, state);
1654
		trace_device_pm_callback_end(dev, error);
1655 1656 1657 1658 1659

		mutex_lock(&dpm_list_mtx);
		if (error) {
			if (error == -EAGAIN) {
				put_device(dev);
S
Sebastian Ott 已提交
1660
				error = 0;
1661 1662
				continue;
			}
1663 1664
			printk(KERN_INFO "PM: Device %s not prepared "
				"for power transition: code %d\n",
1665
				dev_name(dev), error);
1666 1667 1668
			put_device(dev);
			break;
		}
1669
		dev->power.is_prepared = true;
1670
		if (!list_empty(&dev->power.entry))
1671
			list_move_tail(&dev->power.entry, &dpm_prepared_list);
1672 1673 1674
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
1675
	trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1676 1677 1678
	return error;
}

1679
/**
1680 1681
 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
 * @state: PM transition of the system being carried out.
1682
 *
1683 1684
 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
 * callbacks for them.
1685
 */
1686
int dpm_suspend_start(pm_message_t state)
1687 1688
{
	int error;
1689

1690
	error = dpm_prepare(state);
1691 1692 1693 1694
	if (error) {
		suspend_stats.failed_prepare++;
		dpm_save_failed_step(SUSPEND_PREPARE);
	} else
1695
		error = dpm_suspend(state);
1696 1697
	return error;
}
1698
EXPORT_SYMBOL_GPL(dpm_suspend_start);
1699 1700 1701

void __suspend_report_result(const char *function, void *fn, int ret)
{
1702 1703
	if (ret)
		printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1704 1705
}
EXPORT_SYMBOL_GPL(__suspend_report_result);
1706 1707 1708 1709 1710 1711

/**
 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
 * @dev: Device to wait for.
 * @subordinate: Device that needs to wait for @dev.
 */
1712
int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1713 1714
{
	dpm_wait(dev, subordinate->power.async_suspend);
1715
	return async_error;
1716 1717
}
EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739

/**
 * dpm_for_each_dev - device iterator.
 * @data: data for the callback.
 * @fn: function to be called for each device.
 *
 * Iterate over devices in dpm_list, and call @fn for each device,
 * passing it @data.
 */
void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
{
	struct device *dev;

	if (!fn)
		return;

	device_pm_lock();
	list_for_each_entry(dev, &dpm_list, power.entry)
		fn(dev, data);
	device_pm_unlock();
}
EXPORT_SYMBOL_GPL(dpm_for_each_dev);
1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766

static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
{
	if (!ops)
		return true;

	return !ops->prepare &&
	       !ops->suspend &&
	       !ops->suspend_late &&
	       !ops->suspend_noirq &&
	       !ops->resume_noirq &&
	       !ops->resume_early &&
	       !ops->resume &&
	       !ops->complete;
}

void device_pm_check_callbacks(struct device *dev)
{
	spin_lock_irq(&dev->power.lock);
	dev->power.no_pm_callbacks =
		(!dev->bus || pm_ops_is_empty(dev->bus->pm)) &&
		(!dev->class || pm_ops_is_empty(dev->class->pm)) &&
		(!dev->type || pm_ops_is_empty(dev->type->pm)) &&
		(!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
		(!dev->driver || pm_ops_is_empty(dev->driver->pm));
	spin_unlock_irq(&dev->power.lock);
}