main.c 42.2 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/*
 * drivers/base/power/main.c - Where the driver meets power management.
 *
 * Copyright (c) 2003 Patrick Mochel
 * Copyright (c) 2003 Open Source Development Lab
 *
 * This file is released under the GPLv2
 *
 *
 * The driver model core calls device_pm_add() when a device is registered.
11
 * This will initialize the embedded device_pm_info object in the device
L
Linus Torvalds 已提交
12 13 14
 * and add it to the list of power-controlled devices. sysfs entries for
 * controlling device power management will also be added.
 *
15 16 17
 * A separate list is used for keeping track of power info, because the power
 * domain dependencies may differ from the ancestral dependencies that the
 * subsystem list maintains.
L
Linus Torvalds 已提交
18 19 20
 */

#include <linux/device.h>
21
#include <linux/kallsyms.h>
22
#include <linux/export.h>
23
#include <linux/mutex.h>
24
#include <linux/pm.h>
25
#include <linux/pm_runtime.h>
26
#include <linux/pm-trace.h>
27
#include <linux/pm_wakeirq.h>
28
#include <linux/interrupt.h>
29
#include <linux/sched.h>
30
#include <linux/async.h>
31
#include <linux/suspend.h>
32
#include <trace/events/power.h>
33
#include <linux/cpufreq.h>
34
#include <linux/cpuidle.h>
35 36
#include <linux/timer.h>

37
#include "../base.h"
L
Linus Torvalds 已提交
38 39
#include "power.h"

40 41
typedef int (*pm_callback_t)(struct device *);

42
/*
43
 * The entries in the dpm_list list are in a depth first order, simply
44 45 46
 * because children are guaranteed to be discovered after parents, and
 * are inserted at the back of the list on discovery.
 *
47 48
 * Since device_pm_add() may be called with a device lock held,
 * we must never try to acquire a device lock while holding
49 50 51
 * dpm_list_mutex.
 */

52
LIST_HEAD(dpm_list);
53 54 55 56
static LIST_HEAD(dpm_prepared_list);
static LIST_HEAD(dpm_suspended_list);
static LIST_HEAD(dpm_late_early_list);
static LIST_HEAD(dpm_noirq_list);
L
Linus Torvalds 已提交
57

58
struct suspend_stats suspend_stats;
59
static DEFINE_MUTEX(dpm_list_mtx);
60
static pm_message_t pm_transition;
L
Linus Torvalds 已提交
61

62 63
static int async_error;

64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
static char *pm_verb(int event)
{
	switch (event) {
	case PM_EVENT_SUSPEND:
		return "suspend";
	case PM_EVENT_RESUME:
		return "resume";
	case PM_EVENT_FREEZE:
		return "freeze";
	case PM_EVENT_QUIESCE:
		return "quiesce";
	case PM_EVENT_HIBERNATE:
		return "hibernate";
	case PM_EVENT_THAW:
		return "thaw";
	case PM_EVENT_RESTORE:
		return "restore";
	case PM_EVENT_RECOVER:
		return "recover";
	default:
		return "(unknown PM event)";
	}
}

88
/**
89
 * device_pm_sleep_init - Initialize system suspend-related device fields.
90 91
 * @dev: Device object being initialized.
 */
92
void device_pm_sleep_init(struct device *dev)
93
{
94
	dev->power.is_prepared = false;
95
	dev->power.is_suspended = false;
96 97
	dev->power.is_noirq_suspended = false;
	dev->power.is_late_suspended = false;
98
	init_completion(&dev->power.completion);
99
	complete_all(&dev->power.completion);
100
	dev->power.wakeup = NULL;
101
	INIT_LIST_HEAD(&dev->power.entry);
102 103
}

104
/**
105
 * device_pm_lock - Lock the list of active devices used by the PM core.
106 107 108 109 110 111 112
 */
void device_pm_lock(void)
{
	mutex_lock(&dpm_list_mtx);
}

/**
113
 * device_pm_unlock - Unlock the list of active devices used by the PM core.
114 115 116 117 118
 */
void device_pm_unlock(void)
{
	mutex_unlock(&dpm_list_mtx);
}
119

120
/**
121 122
 * device_pm_add - Add a device to the PM core's list of active devices.
 * @dev: Device to add to the list.
123
 */
124
void device_pm_add(struct device *dev)
L
Linus Torvalds 已提交
125 126
{
	pr_debug("PM: Adding info for %s:%s\n",
127
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
128
	mutex_lock(&dpm_list_mtx);
129
	if (dev->parent && dev->parent->power.is_prepared)
130 131
		dev_warn(dev, "parent %s should not be sleeping\n",
			dev_name(dev->parent));
132
	list_add_tail(&dev->power.entry, &dpm_list);
133
	mutex_unlock(&dpm_list_mtx);
L
Linus Torvalds 已提交
134 135
}

136
/**
137 138
 * device_pm_remove - Remove a device from the PM core's list of active devices.
 * @dev: Device to be removed from the list.
139
 */
140
void device_pm_remove(struct device *dev)
L
Linus Torvalds 已提交
141 142
{
	pr_debug("PM: Removing info for %s:%s\n",
143
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
144
	complete_all(&dev->power.completion);
145
	mutex_lock(&dpm_list_mtx);
L
Linus Torvalds 已提交
146
	list_del_init(&dev->power.entry);
147
	mutex_unlock(&dpm_list_mtx);
148
	device_wakeup_disable(dev);
149
	pm_runtime_remove(dev);
150 151
}

152
/**
153 154 155
 * device_pm_move_before - Move device in the PM core's list of active devices.
 * @deva: Device to move in dpm_list.
 * @devb: Device @deva should come before.
156 157 158 159
 */
void device_pm_move_before(struct device *deva, struct device *devb)
{
	pr_debug("PM: Moving %s:%s before %s:%s\n",
160 161
		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
162 163 164 165 166
	/* Delete deva from dpm_list and reinsert before devb. */
	list_move_tail(&deva->power.entry, &devb->power.entry);
}

/**
167 168 169
 * device_pm_move_after - Move device in the PM core's list of active devices.
 * @deva: Device to move in dpm_list.
 * @devb: Device @deva should come after.
170 171 172 173
 */
void device_pm_move_after(struct device *deva, struct device *devb)
{
	pr_debug("PM: Moving %s:%s after %s:%s\n",
174 175
		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
176 177 178 179 180
	/* Delete deva from dpm_list and reinsert after devb. */
	list_move(&deva->power.entry, &devb->power.entry);
}

/**
181 182
 * device_pm_move_last - Move device to end of the PM core's list of devices.
 * @dev: Device to move in dpm_list.
183 184 185 186
 */
void device_pm_move_last(struct device *dev)
{
	pr_debug("PM: Moving %s:%s to end of list\n",
187
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
188 189 190
	list_move_tail(&dev->power.entry, &dpm_list);
}

191 192 193 194
static ktime_t initcall_debug_start(struct device *dev)
{
	ktime_t calltime = ktime_set(0, 0);

195
	if (pm_print_times_enabled) {
196 197 198
		pr_info("calling  %s+ @ %i, parent: %s\n",
			dev_name(dev), task_pid_nr(current),
			dev->parent ? dev_name(dev->parent) : "none");
199 200 201 202 203 204 205
		calltime = ktime_get();
	}

	return calltime;
}

static void initcall_debug_report(struct device *dev, ktime_t calltime,
206
				  int error, pm_message_t state, char *info)
207
{
208 209 210 211 212
	ktime_t rettime;
	s64 nsecs;

	rettime = ktime_get();
	nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
213

214
	if (pm_print_times_enabled) {
215
		pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
216
			error, (unsigned long long)nsecs >> 10);
217 218 219
	}
}

220 221 222 223 224 225 226 227 228 229
/**
 * dpm_wait - Wait for a PM operation to complete.
 * @dev: Device to wait for.
 * @async: If unset, wait only if the device's power.async_suspend flag is set.
 */
static void dpm_wait(struct device *dev, bool async)
{
	if (!dev)
		return;

230
	if (async || (pm_async_enabled && dev->power.async_suspend))
231 232 233 234 235 236 237 238 239 240 241 242 243 244
		wait_for_completion(&dev->power.completion);
}

static int dpm_wait_fn(struct device *dev, void *async_ptr)
{
	dpm_wait(dev, *((bool *)async_ptr));
	return 0;
}

static void dpm_wait_for_children(struct device *dev, bool async)
{
       device_for_each_child(dev, &async, dpm_wait_fn);
}

245
/**
246
 * pm_op - Return the PM operation appropriate for given PM event.
247 248
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
249
 */
250
static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
251 252 253 254
{
	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
255
		return ops->suspend;
256
	case PM_EVENT_RESUME:
257
		return ops->resume;
258
#endif /* CONFIG_SUSPEND */
259
#ifdef CONFIG_HIBERNATE_CALLBACKS
260 261
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
262
		return ops->freeze;
263
	case PM_EVENT_HIBERNATE:
264
		return ops->poweroff;
265 266
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
267
		return ops->thaw;
268 269
		break;
	case PM_EVENT_RESTORE:
270
		return ops->restore;
271
#endif /* CONFIG_HIBERNATE_CALLBACKS */
272
	}
273

274
	return NULL;
275 276
}

277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
/**
 * pm_late_early_op - Return the PM operation appropriate for given PM event.
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
 *
 * Runtime PM is disabled for @dev while this function is being executed.
 */
static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
				      pm_message_t state)
{
	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
		return ops->suspend_late;
	case PM_EVENT_RESUME:
		return ops->resume_early;
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATE_CALLBACKS
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		return ops->freeze_late;
	case PM_EVENT_HIBERNATE:
		return ops->poweroff_late;
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
		return ops->thaw_early;
	case PM_EVENT_RESTORE:
		return ops->restore_early;
#endif /* CONFIG_HIBERNATE_CALLBACKS */
	}

	return NULL;
}

311
/**
312
 * pm_noirq_op - Return the PM operation appropriate for given PM event.
313 314
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
315
 *
316 317
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
318
 */
319
static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
320 321 322 323
{
	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
324
		return ops->suspend_noirq;
325
	case PM_EVENT_RESUME:
326
		return ops->resume_noirq;
327
#endif /* CONFIG_SUSPEND */
328
#ifdef CONFIG_HIBERNATE_CALLBACKS
329 330
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
331
		return ops->freeze_noirq;
332
	case PM_EVENT_HIBERNATE:
333
		return ops->poweroff_noirq;
334 335
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
336
		return ops->thaw_noirq;
337
	case PM_EVENT_RESTORE:
338
		return ops->restore_noirq;
339
#endif /* CONFIG_HIBERNATE_CALLBACKS */
340
	}
341

342
	return NULL;
343 344 345 346 347 348 349 350 351 352 353 354 355
}

static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
{
	dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
		", may wakeup" : "");
}

static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
			int error)
{
	printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
356
		dev_name(dev), pm_verb(state.event), info, error);
357 358
}

359 360 361
static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
{
	ktime_t calltime;
362
	u64 usecs64;
363 364 365 366 367 368 369 370 371 372 373 374 375
	int usecs;

	calltime = ktime_get();
	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
	do_div(usecs64, NSEC_PER_USEC);
	usecs = usecs64;
	if (usecs == 0)
		usecs = 1;
	pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
		info ?: "", info ? " " : "", pm_verb(state.event),
		usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
}

376 377 378 379 380 381 382 383 384 385 386 387
static int dpm_run_callback(pm_callback_t cb, struct device *dev,
			    pm_message_t state, char *info)
{
	ktime_t calltime;
	int error;

	if (!cb)
		return 0;

	calltime = initcall_debug_start(dev);

	pm_dev_dbg(dev, state, info);
388
	trace_device_pm_callback_start(dev, info, state.event);
389
	error = cb(dev);
390
	trace_device_pm_callback_end(dev, error);
391 392
	suspend_report_result(cb, error);

393
	initcall_debug_report(dev, calltime, error, state, info);
394 395 396 397

	return error;
}

398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462
#ifdef CONFIG_DPM_WATCHDOG
struct dpm_watchdog {
	struct device		*dev;
	struct task_struct	*tsk;
	struct timer_list	timer;
};

#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
	struct dpm_watchdog wd

/**
 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
 * @data: Watchdog object address.
 *
 * Called when a driver has timed out suspending or resuming.
 * There's not much we can do here to recover so panic() to
 * capture a crash-dump in pstore.
 */
static void dpm_watchdog_handler(unsigned long data)
{
	struct dpm_watchdog *wd = (void *)data;

	dev_emerg(wd->dev, "**** DPM device timeout ****\n");
	show_stack(wd->tsk, NULL);
	panic("%s %s: unrecoverable failure\n",
		dev_driver_string(wd->dev), dev_name(wd->dev));
}

/**
 * dpm_watchdog_set - Enable pm watchdog for given device.
 * @wd: Watchdog. Must be allocated on the stack.
 * @dev: Device to handle.
 */
static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
{
	struct timer_list *timer = &wd->timer;

	wd->dev = dev;
	wd->tsk = current;

	init_timer_on_stack(timer);
	/* use same timeout value for both suspend and resume */
	timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
	timer->function = dpm_watchdog_handler;
	timer->data = (unsigned long)wd;
	add_timer(timer);
}

/**
 * dpm_watchdog_clear - Disable suspend/resume watchdog.
 * @wd: Watchdog to disable.
 */
static void dpm_watchdog_clear(struct dpm_watchdog *wd)
{
	struct timer_list *timer = &wd->timer;

	del_timer_sync(timer);
	destroy_timer_on_stack(timer);
}
#else
#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
#define dpm_watchdog_set(x, y)
#define dpm_watchdog_clear(x)
#endif

463 464 465
/*------------------------- Resume routines -------------------------*/

/**
466 467 468
 * device_resume_noirq - Execute an "early resume" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
469
 * @async: If true, the device is being resumed asynchronously.
470
 *
471 472
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
473
 */
474
static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
475
{
476 477
	pm_callback_t callback = NULL;
	char *info = NULL;
478 479 480 481 482
	int error = 0;

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);

483
	if (dev->power.syscore || dev->power.direct_complete)
484 485
		goto Out;

486 487 488
	if (!dev->power.is_noirq_suspended)
		goto Out;

489 490
	dpm_wait(dev->parent, async);

491
	if (dev->pm_domain) {
492
		info = "noirq power domain ";
493
		callback = pm_noirq_op(&dev->pm_domain->ops, state);
494
	} else if (dev->type && dev->type->pm) {
495
		info = "noirq type ";
496
		callback = pm_noirq_op(dev->type->pm, state);
497
	} else if (dev->class && dev->class->pm) {
498
		info = "noirq class ";
499
		callback = pm_noirq_op(dev->class->pm, state);
500
	} else if (dev->bus && dev->bus->pm) {
501
		info = "noirq bus ";
502
		callback = pm_noirq_op(dev->bus->pm, state);
503 504
	}

505
	if (!callback && dev->driver && dev->driver->pm) {
506
		info = "noirq driver ";
507 508 509
		callback = pm_noirq_op(dev->driver->pm, state);
	}

510
	error = dpm_run_callback(callback, dev, state, info);
511
	dev->power.is_noirq_suspended = false;
512

513
 Out:
514
	complete_all(&dev->power.completion);
515 516 517 518
	TRACE_RESUME(error);
	return error;
}

519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536
static bool is_async(struct device *dev)
{
	return dev->power.async_suspend && pm_async_enabled
		&& !pm_trace_is_enabled();
}

static void async_resume_noirq(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = device_resume_noirq(dev, pm_transition, true);
	if (error)
		pm_dev_err(dev, pm_transition, " async", error);

	put_device(dev);
}

537
/**
538
 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
539
 * @state: PM transition of the system being carried out.
540
 *
541
 * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
542
 * enable device drivers to receive interrupts.
543
 */
544
void dpm_resume_noirq(pm_message_t state)
545
{
546
	struct device *dev;
547
	ktime_t starttime = ktime_get();
548

549
	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
550
	mutex_lock(&dpm_list_mtx);
551
	pm_transition = state;
552

553 554 555 556 557 558 559 560 561 562 563 564 565 566 567
	/*
	 * Advanced the async threads upfront,
	 * in case the starting of async threads is
	 * delayed by non-async resuming devices.
	 */
	list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
		reinit_completion(&dev->power.completion);
		if (is_async(dev)) {
			get_device(dev);
			async_schedule(async_resume_noirq, dev);
		}
	}

	while (!list_empty(&dpm_noirq_list)) {
		dev = to_device(dpm_noirq_list.next);
568
		get_device(dev);
569
		list_move_tail(&dev->power.entry, &dpm_late_early_list);
570
		mutex_unlock(&dpm_list_mtx);
571

572 573 574 575 576 577 578 579 580 581
		if (!is_async(dev)) {
			int error;

			error = device_resume_noirq(dev, state, false);
			if (error) {
				suspend_stats.failed_resume_noirq++;
				dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
				dpm_save_failed_dev(dev_name(dev));
				pm_dev_err(dev, state, " noirq", error);
			}
582 583 584 585 586 587
		}

		mutex_lock(&dpm_list_mtx);
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
588
	async_synchronize_full();
589 590
	dpm_show_time(starttime, state, "noirq");
	resume_device_irqs();
591
	device_wakeup_disarm_wake_irqs();
592
	cpuidle_resume();
593
	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
594 595 596 597 598 599
}

/**
 * device_resume_early - Execute an "early resume" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
600
 * @async: If true, the device is being resumed asynchronously.
601 602 603
 *
 * Runtime PM is disabled for @dev while this function is being executed.
 */
604
static int device_resume_early(struct device *dev, pm_message_t state, bool async)
605 606 607 608 609 610 611 612
{
	pm_callback_t callback = NULL;
	char *info = NULL;
	int error = 0;

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);

613
	if (dev->power.syscore || dev->power.direct_complete)
614 615
		goto Out;

616 617 618
	if (!dev->power.is_late_suspended)
		goto Out;

619 620
	dpm_wait(dev->parent, async);

621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640
	if (dev->pm_domain) {
		info = "early power domain ";
		callback = pm_late_early_op(&dev->pm_domain->ops, state);
	} else if (dev->type && dev->type->pm) {
		info = "early type ";
		callback = pm_late_early_op(dev->type->pm, state);
	} else if (dev->class && dev->class->pm) {
		info = "early class ";
		callback = pm_late_early_op(dev->class->pm, state);
	} else if (dev->bus && dev->bus->pm) {
		info = "early bus ";
		callback = pm_late_early_op(dev->bus->pm, state);
	}

	if (!callback && dev->driver && dev->driver->pm) {
		info = "early driver ";
		callback = pm_late_early_op(dev->driver->pm, state);
	}

	error = dpm_run_callback(callback, dev, state, info);
641
	dev->power.is_late_suspended = false;
642

643
 Out:
644
	TRACE_RESUME(error);
645 646

	pm_runtime_enable(dev);
647
	complete_all(&dev->power.completion);
648 649 650
	return error;
}

651 652 653 654 655 656 657 658 659 660 661 662
static void async_resume_early(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = device_resume_early(dev, pm_transition, true);
	if (error)
		pm_dev_err(dev, pm_transition, " async", error);

	put_device(dev);
}

663 664 665 666
/**
 * dpm_resume_early - Execute "early resume" callbacks for all devices.
 * @state: PM transition of the system being carried out.
 */
667
void dpm_resume_early(pm_message_t state)
668
{
669
	struct device *dev;
670 671
	ktime_t starttime = ktime_get();

672
	trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
673
	mutex_lock(&dpm_list_mtx);
674 675 676 677 678 679 680 681 682 683 684 685 686 687
	pm_transition = state;

	/*
	 * Advanced the async threads upfront,
	 * in case the starting of async threads is
	 * delayed by non-async resuming devices.
	 */
	list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
		reinit_completion(&dev->power.completion);
		if (is_async(dev)) {
			get_device(dev);
			async_schedule(async_resume_early, dev);
		}
	}
688

689 690
	while (!list_empty(&dpm_late_early_list)) {
		dev = to_device(dpm_late_early_list.next);
691 692 693 694
		get_device(dev);
		list_move_tail(&dev->power.entry, &dpm_suspended_list);
		mutex_unlock(&dpm_list_mtx);

695 696
		if (!is_async(dev)) {
			int error;
697

698 699 700 701 702 703 704 705
			error = device_resume_early(dev, state, false);
			if (error) {
				suspend_stats.failed_resume_early++;
				dpm_save_failed_step(SUSPEND_RESUME_EARLY);
				dpm_save_failed_dev(dev_name(dev));
				pm_dev_err(dev, state, " early", error);
			}
		}
706
		mutex_lock(&dpm_list_mtx);
707 708
		put_device(dev);
	}
709
	mutex_unlock(&dpm_list_mtx);
710
	async_synchronize_full();
711
	dpm_show_time(starttime, state, "early");
712
	trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
713
}
714 715 716 717 718 719 720 721 722 723 724

/**
 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
 * @state: PM transition of the system being carried out.
 */
void dpm_resume_start(pm_message_t state)
{
	dpm_resume_noirq(state);
	dpm_resume_early(state);
}
EXPORT_SYMBOL_GPL(dpm_resume_start);
725 726

/**
727
 * device_resume - Execute "resume" callbacks for given device.
728 729
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
730
 * @async: If true, the device is being resumed asynchronously.
731
 */
732
static int device_resume(struct device *dev, pm_message_t state, bool async)
733
{
734 735
	pm_callback_t callback = NULL;
	char *info = NULL;
736
	int error = 0;
737
	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
738 739 740

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);
741

742 743 744
	if (dev->power.syscore)
		goto Complete;

745 746 747 748 749 750
	if (dev->power.direct_complete) {
		/* Match the pm_runtime_disable() in __device_suspend(). */
		pm_runtime_enable(dev);
		goto Complete;
	}

751
	dpm_wait(dev->parent, async);
752
	dpm_watchdog_set(&wd, dev);
753
	device_lock(dev);
754

755 756 757 758 759
	/*
	 * This is a fib.  But we'll allow new children to be added below
	 * a resumed device, even if the device hasn't been completed yet.
	 */
	dev->power.is_prepared = false;
760

761 762 763
	if (!dev->power.is_suspended)
		goto Unlock;

764
	if (dev->pm_domain) {
765 766
		info = "power domain ";
		callback = pm_op(&dev->pm_domain->ops, state);
767
		goto Driver;
768 769
	}

770
	if (dev->type && dev->type->pm) {
771 772
		info = "type ";
		callback = pm_op(dev->type->pm, state);
773
		goto Driver;
774 775
	}

776 777
	if (dev->class) {
		if (dev->class->pm) {
778 779
			info = "class ";
			callback = pm_op(dev->class->pm, state);
780
			goto Driver;
781
		} else if (dev->class->resume) {
782 783
			info = "legacy class ";
			callback = dev->class->resume;
784
			goto End;
785
		}
786
	}
787 788 789

	if (dev->bus) {
		if (dev->bus->pm) {
790
			info = "bus ";
791
			callback = pm_op(dev->bus->pm, state);
792
		} else if (dev->bus->resume) {
793
			info = "legacy bus ";
794
			callback = dev->bus->resume;
795
			goto End;
796 797 798
		}
	}

799 800 801 802 803 804
 Driver:
	if (!callback && dev->driver && dev->driver->pm) {
		info = "driver ";
		callback = pm_op(dev->driver->pm, state);
	}

805
 End:
806
	error = dpm_run_callback(callback, dev, state, info);
807 808 809
	dev->power.is_suspended = false;

 Unlock:
810
	device_unlock(dev);
811
	dpm_watchdog_clear(&wd);
812 813

 Complete:
814
	complete_all(&dev->power.completion);
815

816
	TRACE_RESUME(error);
817

818 819 820
	return error;
}

821 822 823 824 825
static void async_resume(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

826
	error = device_resume(dev, pm_transition, true);
827 828 829 830 831
	if (error)
		pm_dev_err(dev, pm_transition, " async", error);
	put_device(dev);
}

832
/**
833 834
 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
 * @state: PM transition of the system being carried out.
835
 *
836 837
 * Execute the appropriate "resume" callback for all devices whose status
 * indicates that they are suspended.
838
 */
839
void dpm_resume(pm_message_t state)
840
{
841
	struct device *dev;
842
	ktime_t starttime = ktime_get();
843

844
	trace_suspend_resume(TPS("dpm_resume"), state.event, true);
845 846
	might_sleep();

847
	mutex_lock(&dpm_list_mtx);
848
	pm_transition = state;
849
	async_error = 0;
850

851
	list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
852
		reinit_completion(&dev->power.completion);
853 854 855 856 857 858
		if (is_async(dev)) {
			get_device(dev);
			async_schedule(async_resume, dev);
		}
	}

859 860
	while (!list_empty(&dpm_suspended_list)) {
		dev = to_device(dpm_suspended_list.next);
861
		get_device(dev);
862
		if (!is_async(dev)) {
863 864 865 866
			int error;

			mutex_unlock(&dpm_list_mtx);

867
			error = device_resume(dev, state, false);
868 869 870 871
			if (error) {
				suspend_stats.failed_resume++;
				dpm_save_failed_step(SUSPEND_RESUME);
				dpm_save_failed_dev(dev_name(dev));
872
				pm_dev_err(dev, state, "", error);
873
			}
874 875

			mutex_lock(&dpm_list_mtx);
876 877
		}
		if (!list_empty(&dev->power.entry))
878
			list_move_tail(&dev->power.entry, &dpm_prepared_list);
879 880 881
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
882
	async_synchronize_full();
883
	dpm_show_time(starttime, state, NULL);
884 885

	cpufreq_resume();
886
	trace_suspend_resume(TPS("dpm_resume"), state.event, false);
887 888 889
}

/**
890 891 892
 * device_complete - Complete a PM transition for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
893
 */
894
static void device_complete(struct device *dev, pm_message_t state)
895
{
896 897 898
	void (*callback)(struct device *) = NULL;
	char *info = NULL;

899 900 901
	if (dev->power.syscore)
		return;

902
	device_lock(dev);
903

904
	if (dev->pm_domain) {
905 906
		info = "completing power domain ";
		callback = dev->pm_domain->ops.complete;
907
	} else if (dev->type && dev->type->pm) {
908 909
		info = "completing type ";
		callback = dev->type->pm->complete;
910
	} else if (dev->class && dev->class->pm) {
911 912
		info = "completing class ";
		callback = dev->class->pm->complete;
913
	} else if (dev->bus && dev->bus->pm) {
914 915 916 917 918 919 920 921 922 923 924 925
		info = "completing bus ";
		callback = dev->bus->pm->complete;
	}

	if (!callback && dev->driver && dev->driver->pm) {
		info = "completing driver ";
		callback = dev->driver->pm->complete;
	}

	if (callback) {
		pm_dev_dbg(dev, state, info);
		callback(dev);
926 927
	}

928
	device_unlock(dev);
929

930
	pm_runtime_put(dev);
931 932 933
}

/**
934 935
 * dpm_complete - Complete a PM transition for all non-sysdev devices.
 * @state: PM transition of the system being carried out.
936
 *
937 938
 * Execute the ->complete() callbacks for all devices whose PM status is not
 * DPM_ON (this allows new devices to be registered).
939
 */
940
void dpm_complete(pm_message_t state)
941
{
942 943
	struct list_head list;

944
	trace_suspend_resume(TPS("dpm_complete"), state.event, true);
945 946
	might_sleep();

947
	INIT_LIST_HEAD(&list);
948
	mutex_lock(&dpm_list_mtx);
949 950
	while (!list_empty(&dpm_prepared_list)) {
		struct device *dev = to_device(dpm_prepared_list.prev);
951

952
		get_device(dev);
953
		dev->power.is_prepared = false;
954 955
		list_move(&dev->power.entry, &list);
		mutex_unlock(&dpm_list_mtx);
956

957
		trace_device_pm_callback_start(dev, "", state.event);
958
		device_complete(dev, state);
959
		trace_device_pm_callback_end(dev, 0);
960

961
		mutex_lock(&dpm_list_mtx);
962
		put_device(dev);
963
	}
964
	list_splice(&list, &dpm_list);
965
	mutex_unlock(&dpm_list_mtx);
966
	trace_suspend_resume(TPS("dpm_complete"), state.event, false);
967 968 969
}

/**
970 971
 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
 * @state: PM transition of the system being carried out.
972
 *
973 974
 * Execute "resume" callbacks for all devices and complete the PM transition of
 * the system.
975
 */
976
void dpm_resume_end(pm_message_t state)
977
{
978 979
	dpm_resume(state);
	dpm_complete(state);
980
}
981
EXPORT_SYMBOL_GPL(dpm_resume_end);
982 983 984 985


/*------------------------- Suspend routines -------------------------*/

986
/**
987 988 989 990 991
 * resume_event - Return a "resume" message for given "suspend" sleep state.
 * @sleep_state: PM message representing a sleep state.
 *
 * Return a PM message representing the resume event corresponding to given
 * sleep state.
992 993
 */
static pm_message_t resume_event(pm_message_t sleep_state)
994
{
995 996 997 998 999 1000 1001 1002
	switch (sleep_state.event) {
	case PM_EVENT_SUSPEND:
		return PMSG_RESUME;
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		return PMSG_RECOVER;
	case PM_EVENT_HIBERNATE:
		return PMSG_RESTORE;
1003
	}
1004
	return PMSG_ON;
1005 1006 1007
}

/**
1008 1009 1010
 * device_suspend_noirq - Execute a "late suspend" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
1011
 * @async: If true, the device is being suspended asynchronously.
1012
 *
1013 1014
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
1015
 */
1016
static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1017
{
1018 1019
	pm_callback_t callback = NULL;
	char *info = NULL;
1020 1021
	int error = 0;

1022 1023 1024
	TRACE_DEVICE(dev);
	TRACE_SUSPEND(0);

1025 1026 1027 1028 1029 1030 1031
	if (async_error)
		goto Complete;

	if (pm_wakeup_pending()) {
		async_error = -EBUSY;
		goto Complete;
	}
1032

1033
	if (dev->power.syscore || dev->power.direct_complete)
1034 1035 1036
		goto Complete;

	dpm_wait_for_children(dev, async);
1037

1038
	if (dev->pm_domain) {
1039
		info = "noirq power domain ";
1040
		callback = pm_noirq_op(&dev->pm_domain->ops, state);
1041
	} else if (dev->type && dev->type->pm) {
1042
		info = "noirq type ";
1043
		callback = pm_noirq_op(dev->type->pm, state);
1044
	} else if (dev->class && dev->class->pm) {
1045
		info = "noirq class ";
1046
		callback = pm_noirq_op(dev->class->pm, state);
1047
	} else if (dev->bus && dev->bus->pm) {
1048
		info = "noirq bus ";
1049
		callback = pm_noirq_op(dev->bus->pm, state);
1050 1051
	}

1052
	if (!callback && dev->driver && dev->driver->pm) {
1053
		info = "noirq driver ";
1054 1055 1056
		callback = pm_noirq_op(dev->driver->pm, state);
	}

1057 1058 1059
	error = dpm_run_callback(callback, dev, state, info);
	if (!error)
		dev->power.is_noirq_suspended = true;
1060 1061
	else
		async_error = error;
1062

1063 1064
Complete:
	complete_all(&dev->power.completion);
1065
	TRACE_SUSPEND(error);
1066
	return error;
1067 1068
}

1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086
static void async_suspend_noirq(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = __device_suspend_noirq(dev, pm_transition, true);
	if (error) {
		dpm_save_failed_dev(dev_name(dev));
		pm_dev_err(dev, pm_transition, " async", error);
	}

	put_device(dev);
}

static int device_suspend_noirq(struct device *dev)
{
	reinit_completion(&dev->power.completion);

1087
	if (is_async(dev)) {
1088 1089 1090 1091 1092 1093 1094
		get_device(dev);
		async_schedule(async_suspend_noirq, dev);
		return 0;
	}
	return __device_suspend_noirq(dev, pm_transition, false);
}

1095
/**
1096
 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1097
 * @state: PM transition of the system being carried out.
1098
 *
1099 1100
 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
 * handlers for all non-sysdev devices.
1101
 */
1102
int dpm_suspend_noirq(pm_message_t state)
1103
{
1104
	ktime_t starttime = ktime_get();
1105 1106
	int error = 0;

1107
	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1108
	cpuidle_pause();
1109
	device_wakeup_arm_wake_irqs();
1110
	suspend_device_irqs();
1111
	mutex_lock(&dpm_list_mtx);
1112 1113 1114
	pm_transition = state;
	async_error = 0;

1115 1116
	while (!list_empty(&dpm_late_early_list)) {
		struct device *dev = to_device(dpm_late_early_list.prev);
1117 1118 1119 1120

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

1121
		error = device_suspend_noirq(dev);
1122 1123

		mutex_lock(&dpm_list_mtx);
1124
		if (error) {
1125
			pm_dev_err(dev, state, " noirq", error);
1126
			dpm_save_failed_dev(dev_name(dev));
1127
			put_device(dev);
1128 1129
			break;
		}
1130
		if (!list_empty(&dev->power.entry))
1131
			list_move(&dev->power.entry, &dpm_noirq_list);
1132
		put_device(dev);
1133

1134
		if (async_error)
1135
			break;
1136
	}
1137
	mutex_unlock(&dpm_list_mtx);
1138 1139 1140 1141 1142 1143 1144
	async_synchronize_full();
	if (!error)
		error = async_error;

	if (error) {
		suspend_stats.failed_suspend_noirq++;
		dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1145
		dpm_resume_noirq(resume_event(state));
1146
	} else {
1147
		dpm_show_time(starttime, state, "noirq");
1148
	}
1149
	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1150 1151 1152 1153 1154 1155 1156
	return error;
}

/**
 * device_suspend_late - Execute a "late suspend" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
1157
 * @async: If true, the device is being suspended asynchronously.
1158 1159 1160
 *
 * Runtime PM is disabled for @dev while this function is being executed.
 */
1161
static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1162 1163 1164
{
	pm_callback_t callback = NULL;
	char *info = NULL;
1165
	int error = 0;
1166

1167 1168 1169
	TRACE_DEVICE(dev);
	TRACE_SUSPEND(0);

1170 1171
	__pm_runtime_disable(dev, false);

1172 1173 1174 1175 1176 1177 1178 1179
	if (async_error)
		goto Complete;

	if (pm_wakeup_pending()) {
		async_error = -EBUSY;
		goto Complete;
	}

1180
	if (dev->power.syscore || dev->power.direct_complete)
1181 1182 1183
		goto Complete;

	dpm_wait_for_children(dev, async);
1184

1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203
	if (dev->pm_domain) {
		info = "late power domain ";
		callback = pm_late_early_op(&dev->pm_domain->ops, state);
	} else if (dev->type && dev->type->pm) {
		info = "late type ";
		callback = pm_late_early_op(dev->type->pm, state);
	} else if (dev->class && dev->class->pm) {
		info = "late class ";
		callback = pm_late_early_op(dev->class->pm, state);
	} else if (dev->bus && dev->bus->pm) {
		info = "late bus ";
		callback = pm_late_early_op(dev->bus->pm, state);
	}

	if (!callback && dev->driver && dev->driver->pm) {
		info = "late driver ";
		callback = pm_late_early_op(dev->driver->pm, state);
	}

1204 1205 1206
	error = dpm_run_callback(callback, dev, state, info);
	if (!error)
		dev->power.is_late_suspended = true;
1207 1208
	else
		async_error = error;
1209

1210
Complete:
1211
	TRACE_SUSPEND(error);
1212
	complete_all(&dev->power.completion);
1213
	return error;
1214 1215
}

1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232
static void async_suspend_late(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = __device_suspend_late(dev, pm_transition, true);
	if (error) {
		dpm_save_failed_dev(dev_name(dev));
		pm_dev_err(dev, pm_transition, " async", error);
	}
	put_device(dev);
}

static int device_suspend_late(struct device *dev)
{
	reinit_completion(&dev->power.completion);

1233
	if (is_async(dev)) {
1234 1235 1236 1237 1238 1239 1240 1241
		get_device(dev);
		async_schedule(async_suspend_late, dev);
		return 0;
	}

	return __device_suspend_late(dev, pm_transition, false);
}

1242 1243 1244 1245
/**
 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
 * @state: PM transition of the system being carried out.
 */
1246
int dpm_suspend_late(pm_message_t state)
1247 1248 1249 1250
{
	ktime_t starttime = ktime_get();
	int error = 0;

1251
	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1252
	mutex_lock(&dpm_list_mtx);
1253 1254 1255
	pm_transition = state;
	async_error = 0;

1256 1257 1258 1259 1260 1261
	while (!list_empty(&dpm_suspended_list)) {
		struct device *dev = to_device(dpm_suspended_list.prev);

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

1262
		error = device_suspend_late(dev);
1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273

		mutex_lock(&dpm_list_mtx);
		if (error) {
			pm_dev_err(dev, state, " late", error);
			dpm_save_failed_dev(dev_name(dev));
			put_device(dev);
			break;
		}
		if (!list_empty(&dev->power.entry))
			list_move(&dev->power.entry, &dpm_late_early_list);
		put_device(dev);
1274

1275
		if (async_error)
1276
			break;
1277 1278
	}
	mutex_unlock(&dpm_list_mtx);
1279
	async_synchronize_full();
1280 1281
	if (!error)
		error = async_error;
1282 1283 1284
	if (error) {
		suspend_stats.failed_suspend_late++;
		dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1285
		dpm_resume_early(resume_event(state));
1286
	} else {
1287
		dpm_show_time(starttime, state, "late");
1288
	}
1289
	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1290 1291
	return error;
}
1292 1293 1294 1295 1296 1297 1298 1299

/**
 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
 * @state: PM transition of the system being carried out.
 */
int dpm_suspend_end(pm_message_t state)
{
	int error = dpm_suspend_late(state);
1300 1301 1302 1303 1304
	if (error)
		return error;

	error = dpm_suspend_noirq(state);
	if (error) {
1305
		dpm_resume_early(resume_event(state));
1306 1307
		return error;
	}
1308

1309
	return 0;
1310 1311
}
EXPORT_SYMBOL_GPL(dpm_suspend_end);
1312

1313 1314
/**
 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
R
Randy Dunlap 已提交
1315 1316 1317
 * @dev: Device to suspend.
 * @state: PM transition of the system being carried out.
 * @cb: Suspend callback to execute.
1318
 * @info: string description of caller.
1319 1320
 */
static int legacy_suspend(struct device *dev, pm_message_t state,
1321 1322
			  int (*cb)(struct device *dev, pm_message_t state),
			  char *info)
1323 1324 1325 1326 1327 1328
{
	int error;
	ktime_t calltime;

	calltime = initcall_debug_start(dev);

1329
	trace_device_pm_callback_start(dev, info, state.event);
1330
	error = cb(dev, state);
1331
	trace_device_pm_callback_end(dev, error);
1332 1333
	suspend_report_result(cb, error);

1334
	initcall_debug_report(dev, calltime, error, state, info);
1335 1336 1337 1338

	return error;
}

1339
/**
1340 1341 1342
 * device_suspend - Execute "suspend" callbacks for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
1343
 * @async: If true, the device is being suspended asynchronously.
1344
 */
1345
static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1346
{
1347 1348
	pm_callback_t callback = NULL;
	char *info = NULL;
1349
	int error = 0;
1350
	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1351

1352 1353 1354
	TRACE_DEVICE(dev);
	TRACE_SUSPEND(0);

1355
	dpm_wait_for_children(dev, async);
1356

1357
	if (async_error)
1358
		goto Complete;
1359

1360 1361 1362 1363 1364 1365
	/*
	 * If a device configured to wake up the system from sleep states
	 * has been suspended at run time and there's a resume request pending
	 * for it, this is equivalent to the device signaling wakeup, so the
	 * system suspend operation should be aborted.
	 */
1366 1367
	if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
		pm_wakeup_event(dev, 0);
1368

1369 1370
	if (pm_wakeup_pending()) {
		async_error = -EBUSY;
1371
		goto Complete;
1372 1373
	}

1374 1375 1376
	if (dev->power.syscore)
		goto Complete;

1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387
	if (dev->power.direct_complete) {
		if (pm_runtime_status_suspended(dev)) {
			pm_runtime_disable(dev);
			if (pm_runtime_suspended_if_enabled(dev))
				goto Complete;

			pm_runtime_enable(dev);
		}
		dev->power.direct_complete = false;
	}

1388
	dpm_watchdog_set(&wd, dev);
1389 1390
	device_lock(dev);

1391
	if (dev->pm_domain) {
1392 1393 1394
		info = "power domain ";
		callback = pm_op(&dev->pm_domain->ops, state);
		goto Run;
1395 1396
	}

1397
	if (dev->type && dev->type->pm) {
1398 1399 1400
		info = "type ";
		callback = pm_op(dev->type->pm, state);
		goto Run;
1401 1402
	}

1403 1404
	if (dev->class) {
		if (dev->class->pm) {
1405 1406 1407
			info = "class ";
			callback = pm_op(dev->class->pm, state);
			goto Run;
1408 1409
		} else if (dev->class->suspend) {
			pm_dev_dbg(dev, state, "legacy class ");
1410 1411
			error = legacy_suspend(dev, state, dev->class->suspend,
						"legacy class ");
1412
			goto End;
1413
		}
1414 1415
	}

1416 1417
	if (dev->bus) {
		if (dev->bus->pm) {
1418
			info = "bus ";
1419
			callback = pm_op(dev->bus->pm, state);
1420
		} else if (dev->bus->suspend) {
1421
			pm_dev_dbg(dev, state, "legacy bus ");
1422 1423
			error = legacy_suspend(dev, state, dev->bus->suspend,
						"legacy bus ");
1424
			goto End;
1425
		}
1426 1427
	}

1428
 Run:
1429 1430 1431 1432 1433
	if (!callback && dev->driver && dev->driver->pm) {
		info = "driver ";
		callback = pm_op(dev->driver->pm, state);
	}

1434 1435
	error = dpm_run_callback(callback, dev, state, info);

1436
 End:
1437
	if (!error) {
1438 1439
		struct device *parent = dev->parent;

1440
		dev->power.is_suspended = true;
1441 1442 1443 1444 1445 1446 1447 1448 1449 1450
		if (parent) {
			spin_lock_irq(&parent->power.lock);

			dev->parent->power.direct_complete = false;
			if (dev->power.wakeup_path
			    && !dev->parent->power.ignore_children)
				dev->parent->power.wakeup_path = true;

			spin_unlock_irq(&parent->power.lock);
		}
1451
	}
1452

1453
	device_unlock(dev);
1454
	dpm_watchdog_clear(&wd);
1455 1456

 Complete:
1457
	complete_all(&dev->power.completion);
1458
	if (error)
1459 1460
		async_error = error;

1461
	TRACE_SUSPEND(error);
1462 1463 1464
	return error;
}

1465 1466 1467 1468 1469 1470
static void async_suspend(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = __device_suspend(dev, pm_transition, true);
1471 1472
	if (error) {
		dpm_save_failed_dev(dev_name(dev));
1473
		pm_dev_err(dev, pm_transition, " async", error);
1474
	}
1475 1476 1477 1478 1479 1480

	put_device(dev);
}

static int device_suspend(struct device *dev)
{
1481
	reinit_completion(&dev->power.completion);
1482

1483
	if (is_async(dev)) {
1484 1485 1486 1487 1488 1489 1490 1491
		get_device(dev);
		async_schedule(async_suspend, dev);
		return 0;
	}

	return __device_suspend(dev, pm_transition, false);
}

1492
/**
1493 1494
 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
 * @state: PM transition of the system being carried out.
1495
 */
1496
int dpm_suspend(pm_message_t state)
1497
{
1498
	ktime_t starttime = ktime_get();
1499 1500
	int error = 0;

1501
	trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1502 1503
	might_sleep();

1504 1505
	cpufreq_suspend();

1506
	mutex_lock(&dpm_list_mtx);
1507 1508
	pm_transition = state;
	async_error = 0;
1509 1510
	while (!list_empty(&dpm_prepared_list)) {
		struct device *dev = to_device(dpm_prepared_list.prev);
1511

1512
		get_device(dev);
1513
		mutex_unlock(&dpm_list_mtx);
1514

1515
		error = device_suspend(dev);
1516

1517
		mutex_lock(&dpm_list_mtx);
1518
		if (error) {
1519
			pm_dev_err(dev, state, "", error);
1520
			dpm_save_failed_dev(dev_name(dev));
1521
			put_device(dev);
1522 1523
			break;
		}
1524
		if (!list_empty(&dev->power.entry))
1525
			list_move(&dev->power.entry, &dpm_suspended_list);
1526
		put_device(dev);
1527 1528
		if (async_error)
			break;
1529 1530
	}
	mutex_unlock(&dpm_list_mtx);
1531 1532 1533
	async_synchronize_full();
	if (!error)
		error = async_error;
1534 1535 1536 1537
	if (error) {
		suspend_stats.failed_suspend++;
		dpm_save_failed_step(SUSPEND_SUSPEND);
	} else
1538
		dpm_show_time(starttime, state, NULL);
1539
	trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1540 1541 1542 1543
	return error;
}

/**
1544 1545 1546 1547 1548 1549
 * device_prepare - Prepare a device for system power transition.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
 *
 * Execute the ->prepare() callback(s) for given device.  No new children of the
 * device may be registered after this function has returned.
1550
 */
1551
static int device_prepare(struct device *dev, pm_message_t state)
1552
{
1553 1554
	int (*callback)(struct device *) = NULL;
	char *info = NULL;
1555
	int ret = 0;
1556

1557 1558 1559
	if (dev->power.syscore)
		return 0;

1560 1561 1562 1563 1564 1565 1566 1567
	/*
	 * If a device's parent goes into runtime suspend at the wrong time,
	 * it won't be possible to resume the device.  To prevent this we
	 * block runtime suspend here, during the prepare phase, and allow
	 * it again during the complete phase.
	 */
	pm_runtime_get_noresume(dev);

1568
	device_lock(dev);
1569

1570 1571
	dev->power.wakeup_path = device_may_wakeup(dev);

1572
	if (dev->pm_domain) {
1573 1574
		info = "preparing power domain ";
		callback = dev->pm_domain->ops.prepare;
1575
	} else if (dev->type && dev->type->pm) {
1576 1577
		info = "preparing type ";
		callback = dev->type->pm->prepare;
1578
	} else if (dev->class && dev->class->pm) {
1579 1580
		info = "preparing class ";
		callback = dev->class->pm->prepare;
1581
	} else if (dev->bus && dev->bus->pm) {
1582 1583 1584 1585 1586 1587 1588 1589 1590
		info = "preparing bus ";
		callback = dev->bus->pm->prepare;
	}

	if (!callback && dev->driver && dev->driver->pm) {
		info = "preparing driver ";
		callback = dev->driver->pm->prepare;
	}

1591
	if (callback)
1592
		ret = callback(dev);
1593

1594
	device_unlock(dev);
1595

1596 1597
	if (ret < 0) {
		suspend_report_result(callback, ret);
1598
		pm_runtime_put(dev);
1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611
		return ret;
	}
	/*
	 * A positive return value from ->prepare() means "this device appears
	 * to be runtime-suspended and its state is fine, so if it really is
	 * runtime-suspended, you can leave it in that state provided that you
	 * will do the same thing with all of its descendants".  This only
	 * applies to suspend transitions, however.
	 */
	spin_lock_irq(&dev->power.lock);
	dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND;
	spin_unlock_irq(&dev->power.lock);
	return 0;
1612
}
1613

1614
/**
1615 1616
 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
 * @state: PM transition of the system being carried out.
1617
 *
1618
 * Execute the ->prepare() callback(s) for all devices.
1619
 */
1620
int dpm_prepare(pm_message_t state)
1621 1622 1623
{
	int error = 0;

1624
	trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1625 1626
	might_sleep();

1627 1628 1629 1630 1631 1632 1633
	mutex_lock(&dpm_list_mtx);
	while (!list_empty(&dpm_list)) {
		struct device *dev = to_device(dpm_list.next);

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

1634
		trace_device_pm_callback_start(dev, "", state.event);
1635
		error = device_prepare(dev, state);
1636
		trace_device_pm_callback_end(dev, error);
1637 1638 1639 1640 1641

		mutex_lock(&dpm_list_mtx);
		if (error) {
			if (error == -EAGAIN) {
				put_device(dev);
S
Sebastian Ott 已提交
1642
				error = 0;
1643 1644
				continue;
			}
1645 1646
			printk(KERN_INFO "PM: Device %s not prepared "
				"for power transition: code %d\n",
1647
				dev_name(dev), error);
1648 1649 1650
			put_device(dev);
			break;
		}
1651
		dev->power.is_prepared = true;
1652
		if (!list_empty(&dev->power.entry))
1653
			list_move_tail(&dev->power.entry, &dpm_prepared_list);
1654 1655 1656
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
1657
	trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1658 1659 1660
	return error;
}

1661
/**
1662 1663
 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
 * @state: PM transition of the system being carried out.
1664
 *
1665 1666
 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
 * callbacks for them.
1667
 */
1668
int dpm_suspend_start(pm_message_t state)
1669 1670
{
	int error;
1671

1672
	error = dpm_prepare(state);
1673 1674 1675 1676
	if (error) {
		suspend_stats.failed_prepare++;
		dpm_save_failed_step(SUSPEND_PREPARE);
	} else
1677
		error = dpm_suspend(state);
1678 1679
	return error;
}
1680
EXPORT_SYMBOL_GPL(dpm_suspend_start);
1681 1682 1683

void __suspend_report_result(const char *function, void *fn, int ret)
{
1684 1685
	if (ret)
		printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1686 1687
}
EXPORT_SYMBOL_GPL(__suspend_report_result);
1688 1689 1690 1691 1692 1693

/**
 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
 * @dev: Device to wait for.
 * @subordinate: Device that needs to wait for @dev.
 */
1694
int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1695 1696
{
	dpm_wait(dev, subordinate->power.async_suspend);
1697
	return async_error;
1698 1699
}
EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721

/**
 * dpm_for_each_dev - device iterator.
 * @data: data for the callback.
 * @fn: function to be called for each device.
 *
 * Iterate over devices in dpm_list, and call @fn for each device,
 * passing it @data.
 */
void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
{
	struct device *dev;

	if (!fn)
		return;

	device_pm_lock();
	list_for_each_entry(dev, &dpm_list, power.entry)
		fn(dev, data);
	device_pm_unlock();
}
EXPORT_SYMBOL_GPL(dpm_for_each_dev);