main.c 38.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/*
 * drivers/base/power/main.c - Where the driver meets power management.
 *
 * Copyright (c) 2003 Patrick Mochel
 * Copyright (c) 2003 Open Source Development Lab
 *
 * This file is released under the GPLv2
 *
 *
 * The driver model core calls device_pm_add() when a device is registered.
11
 * This will initialize the embedded device_pm_info object in the device
L
Linus Torvalds 已提交
12 13 14
 * and add it to the list of power-controlled devices. sysfs entries for
 * controlling device power management will also be added.
 *
15 16 17
 * A separate list is used for keeping track of power info, because the power
 * domain dependencies may differ from the ancestral dependencies that the
 * subsystem list maintains.
L
Linus Torvalds 已提交
18 19 20
 */

#include <linux/device.h>
21
#include <linux/kallsyms.h>
22
#include <linux/export.h>
23
#include <linux/mutex.h>
24
#include <linux/pm.h>
25
#include <linux/pm_runtime.h>
26
#include <linux/resume-trace.h>
27
#include <linux/interrupt.h>
28
#include <linux/sched.h>
29
#include <linux/async.h>
30
#include <linux/suspend.h>
31
#include <trace/events/power.h>
32
#include <linux/cpuidle.h>
33 34
#include <linux/timer.h>

35
#include "../base.h"
L
Linus Torvalds 已提交
36 37
#include "power.h"

38 39
typedef int (*pm_callback_t)(struct device *);

40
/*
41
 * The entries in the dpm_list list are in a depth first order, simply
42 43 44
 * because children are guaranteed to be discovered after parents, and
 * are inserted at the back of the list on discovery.
 *
45 46
 * Since device_pm_add() may be called with a device lock held,
 * we must never try to acquire a device lock while holding
47 48 49
 * dpm_list_mutex.
 */

50
LIST_HEAD(dpm_list);
51 52 53 54
static LIST_HEAD(dpm_prepared_list);
static LIST_HEAD(dpm_suspended_list);
static LIST_HEAD(dpm_late_early_list);
static LIST_HEAD(dpm_noirq_list);
L
Linus Torvalds 已提交
55

56
struct suspend_stats suspend_stats;
57
static DEFINE_MUTEX(dpm_list_mtx);
58
static pm_message_t pm_transition;
L
Linus Torvalds 已提交
59

60 61
static int async_error;

62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
static char *pm_verb(int event)
{
	switch (event) {
	case PM_EVENT_SUSPEND:
		return "suspend";
	case PM_EVENT_RESUME:
		return "resume";
	case PM_EVENT_FREEZE:
		return "freeze";
	case PM_EVENT_QUIESCE:
		return "quiesce";
	case PM_EVENT_HIBERNATE:
		return "hibernate";
	case PM_EVENT_THAW:
		return "thaw";
	case PM_EVENT_RESTORE:
		return "restore";
	case PM_EVENT_RECOVER:
		return "recover";
	default:
		return "(unknown PM event)";
	}
}

86
/**
87
 * device_pm_sleep_init - Initialize system suspend-related device fields.
88 89
 * @dev: Device object being initialized.
 */
90
void device_pm_sleep_init(struct device *dev)
91
{
92
	dev->power.is_prepared = false;
93
	dev->power.is_suspended = false;
94 95
	dev->power.is_noirq_suspended = false;
	dev->power.is_late_suspended = false;
96
	init_completion(&dev->power.completion);
97
	complete_all(&dev->power.completion);
98
	dev->power.wakeup = NULL;
99
	INIT_LIST_HEAD(&dev->power.entry);
100 101
}

102
/**
103
 * device_pm_lock - Lock the list of active devices used by the PM core.
104 105 106 107 108 109 110
 */
void device_pm_lock(void)
{
	mutex_lock(&dpm_list_mtx);
}

/**
111
 * device_pm_unlock - Unlock the list of active devices used by the PM core.
112 113 114 115 116
 */
void device_pm_unlock(void)
{
	mutex_unlock(&dpm_list_mtx);
}
117

118
/**
119 120
 * device_pm_add - Add a device to the PM core's list of active devices.
 * @dev: Device to add to the list.
121
 */
122
void device_pm_add(struct device *dev)
L
Linus Torvalds 已提交
123 124
{
	pr_debug("PM: Adding info for %s:%s\n",
125
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
126
	mutex_lock(&dpm_list_mtx);
127
	if (dev->parent && dev->parent->power.is_prepared)
128 129
		dev_warn(dev, "parent %s should not be sleeping\n",
			dev_name(dev->parent));
130
	list_add_tail(&dev->power.entry, &dpm_list);
131
	mutex_unlock(&dpm_list_mtx);
L
Linus Torvalds 已提交
132 133
}

134
/**
135 136
 * device_pm_remove - Remove a device from the PM core's list of active devices.
 * @dev: Device to be removed from the list.
137
 */
138
void device_pm_remove(struct device *dev)
L
Linus Torvalds 已提交
139 140
{
	pr_debug("PM: Removing info for %s:%s\n",
141
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
142
	complete_all(&dev->power.completion);
143
	mutex_lock(&dpm_list_mtx);
L
Linus Torvalds 已提交
144
	list_del_init(&dev->power.entry);
145
	mutex_unlock(&dpm_list_mtx);
146
	device_wakeup_disable(dev);
147
	pm_runtime_remove(dev);
148 149
}

150
/**
151 152 153
 * device_pm_move_before - Move device in the PM core's list of active devices.
 * @deva: Device to move in dpm_list.
 * @devb: Device @deva should come before.
154 155 156 157
 */
void device_pm_move_before(struct device *deva, struct device *devb)
{
	pr_debug("PM: Moving %s:%s before %s:%s\n",
158 159
		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
160 161 162 163 164
	/* Delete deva from dpm_list and reinsert before devb. */
	list_move_tail(&deva->power.entry, &devb->power.entry);
}

/**
165 166 167
 * device_pm_move_after - Move device in the PM core's list of active devices.
 * @deva: Device to move in dpm_list.
 * @devb: Device @deva should come after.
168 169 170 171
 */
void device_pm_move_after(struct device *deva, struct device *devb)
{
	pr_debug("PM: Moving %s:%s after %s:%s\n",
172 173
		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
174 175 176 177 178
	/* Delete deva from dpm_list and reinsert after devb. */
	list_move(&deva->power.entry, &devb->power.entry);
}

/**
179 180
 * device_pm_move_last - Move device to end of the PM core's list of devices.
 * @dev: Device to move in dpm_list.
181 182 183 184
 */
void device_pm_move_last(struct device *dev)
{
	pr_debug("PM: Moving %s:%s to end of list\n",
185
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
186 187 188
	list_move_tail(&dev->power.entry, &dpm_list);
}

189 190 191 192
static ktime_t initcall_debug_start(struct device *dev)
{
	ktime_t calltime = ktime_set(0, 0);

193
	if (pm_print_times_enabled) {
194 195 196
		pr_info("calling  %s+ @ %i, parent: %s\n",
			dev_name(dev), task_pid_nr(current),
			dev->parent ? dev_name(dev->parent) : "none");
197 198 199 200 201 202 203
		calltime = ktime_get();
	}

	return calltime;
}

static void initcall_debug_report(struct device *dev, ktime_t calltime,
204
				  int error, pm_message_t state, char *info)
205
{
206 207 208 209 210
	ktime_t rettime;
	s64 nsecs;

	rettime = ktime_get();
	nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
211

212
	if (pm_print_times_enabled) {
213
		pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
214
			error, (unsigned long long)nsecs >> 10);
215
	}
216 217 218

	trace_device_pm_report_time(dev, info, nsecs, pm_verb(state.event),
				    error);
219 220
}

221 222 223 224 225 226 227 228 229 230
/**
 * dpm_wait - Wait for a PM operation to complete.
 * @dev: Device to wait for.
 * @async: If unset, wait only if the device's power.async_suspend flag is set.
 */
static void dpm_wait(struct device *dev, bool async)
{
	if (!dev)
		return;

231
	if (async || (pm_async_enabled && dev->power.async_suspend))
232 233 234 235 236 237 238 239 240 241 242 243 244 245
		wait_for_completion(&dev->power.completion);
}

static int dpm_wait_fn(struct device *dev, void *async_ptr)
{
	dpm_wait(dev, *((bool *)async_ptr));
	return 0;
}

static void dpm_wait_for_children(struct device *dev, bool async)
{
       device_for_each_child(dev, &async, dpm_wait_fn);
}

246
/**
247
 * pm_op - Return the PM operation appropriate for given PM event.
248 249
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
250
 */
251
static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
252 253 254 255
{
	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
256
		return ops->suspend;
257
	case PM_EVENT_RESUME:
258
		return ops->resume;
259
#endif /* CONFIG_SUSPEND */
260
#ifdef CONFIG_HIBERNATE_CALLBACKS
261 262
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
263
		return ops->freeze;
264
	case PM_EVENT_HIBERNATE:
265
		return ops->poweroff;
266 267
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
268
		return ops->thaw;
269 270
		break;
	case PM_EVENT_RESTORE:
271
		return ops->restore;
272
#endif /* CONFIG_HIBERNATE_CALLBACKS */
273
	}
274

275
	return NULL;
276 277
}

278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311
/**
 * pm_late_early_op - Return the PM operation appropriate for given PM event.
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
 *
 * Runtime PM is disabled for @dev while this function is being executed.
 */
static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
				      pm_message_t state)
{
	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
		return ops->suspend_late;
	case PM_EVENT_RESUME:
		return ops->resume_early;
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATE_CALLBACKS
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		return ops->freeze_late;
	case PM_EVENT_HIBERNATE:
		return ops->poweroff_late;
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
		return ops->thaw_early;
	case PM_EVENT_RESTORE:
		return ops->restore_early;
#endif /* CONFIG_HIBERNATE_CALLBACKS */
	}

	return NULL;
}

312
/**
313
 * pm_noirq_op - Return the PM operation appropriate for given PM event.
314 315
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
316
 *
317 318
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
319
 */
320
static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
321 322 323 324
{
	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
325
		return ops->suspend_noirq;
326
	case PM_EVENT_RESUME:
327
		return ops->resume_noirq;
328
#endif /* CONFIG_SUSPEND */
329
#ifdef CONFIG_HIBERNATE_CALLBACKS
330 331
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
332
		return ops->freeze_noirq;
333
	case PM_EVENT_HIBERNATE:
334
		return ops->poweroff_noirq;
335 336
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
337
		return ops->thaw_noirq;
338
	case PM_EVENT_RESTORE:
339
		return ops->restore_noirq;
340
#endif /* CONFIG_HIBERNATE_CALLBACKS */
341
	}
342

343
	return NULL;
344 345 346 347 348 349 350 351 352 353 354 355 356
}

static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
{
	dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
		", may wakeup" : "");
}

static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
			int error)
{
	printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
357
		dev_name(dev), pm_verb(state.event), info, error);
358 359
}

360 361 362
static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
{
	ktime_t calltime;
363
	u64 usecs64;
364 365 366 367 368 369 370 371 372 373 374 375 376
	int usecs;

	calltime = ktime_get();
	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
	do_div(usecs64, NSEC_PER_USEC);
	usecs = usecs64;
	if (usecs == 0)
		usecs = 1;
	pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
		info ?: "", info ? " " : "", pm_verb(state.event),
		usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
}

377 378 379 380 381 382 383 384 385 386 387 388 389 390 391
static int dpm_run_callback(pm_callback_t cb, struct device *dev,
			    pm_message_t state, char *info)
{
	ktime_t calltime;
	int error;

	if (!cb)
		return 0;

	calltime = initcall_debug_start(dev);

	pm_dev_dbg(dev, state, info);
	error = cb(dev);
	suspend_report_result(cb, error);

392
	initcall_debug_report(dev, calltime, error, state, info);
393 394 395 396

	return error;
}

397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461
#ifdef CONFIG_DPM_WATCHDOG
struct dpm_watchdog {
	struct device		*dev;
	struct task_struct	*tsk;
	struct timer_list	timer;
};

#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
	struct dpm_watchdog wd

/**
 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
 * @data: Watchdog object address.
 *
 * Called when a driver has timed out suspending or resuming.
 * There's not much we can do here to recover so panic() to
 * capture a crash-dump in pstore.
 */
static void dpm_watchdog_handler(unsigned long data)
{
	struct dpm_watchdog *wd = (void *)data;

	dev_emerg(wd->dev, "**** DPM device timeout ****\n");
	show_stack(wd->tsk, NULL);
	panic("%s %s: unrecoverable failure\n",
		dev_driver_string(wd->dev), dev_name(wd->dev));
}

/**
 * dpm_watchdog_set - Enable pm watchdog for given device.
 * @wd: Watchdog. Must be allocated on the stack.
 * @dev: Device to handle.
 */
static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
{
	struct timer_list *timer = &wd->timer;

	wd->dev = dev;
	wd->tsk = current;

	init_timer_on_stack(timer);
	/* use same timeout value for both suspend and resume */
	timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
	timer->function = dpm_watchdog_handler;
	timer->data = (unsigned long)wd;
	add_timer(timer);
}

/**
 * dpm_watchdog_clear - Disable suspend/resume watchdog.
 * @wd: Watchdog to disable.
 */
static void dpm_watchdog_clear(struct dpm_watchdog *wd)
{
	struct timer_list *timer = &wd->timer;

	del_timer_sync(timer);
	destroy_timer_on_stack(timer);
}
#else
#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
#define dpm_watchdog_set(x, y)
#define dpm_watchdog_clear(x)
#endif

462 463 464
/*------------------------- Resume routines -------------------------*/

/**
465 466 467
 * device_resume_noirq - Execute an "early resume" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
468
 *
469 470
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
471
 */
472
static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
473
{
474 475
	pm_callback_t callback = NULL;
	char *info = NULL;
476 477 478 479 480
	int error = 0;

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);

481 482 483
	if (dev->power.syscore)
		goto Out;

484 485 486
	if (!dev->power.is_noirq_suspended)
		goto Out;

487 488
	dpm_wait(dev->parent, async);

489
	if (dev->pm_domain) {
490
		info = "noirq power domain ";
491
		callback = pm_noirq_op(&dev->pm_domain->ops, state);
492
	} else if (dev->type && dev->type->pm) {
493
		info = "noirq type ";
494
		callback = pm_noirq_op(dev->type->pm, state);
495
	} else if (dev->class && dev->class->pm) {
496
		info = "noirq class ";
497
		callback = pm_noirq_op(dev->class->pm, state);
498
	} else if (dev->bus && dev->bus->pm) {
499
		info = "noirq bus ";
500
		callback = pm_noirq_op(dev->bus->pm, state);
501 502
	}

503
	if (!callback && dev->driver && dev->driver->pm) {
504
		info = "noirq driver ";
505 506 507
		callback = pm_noirq_op(dev->driver->pm, state);
	}

508
	error = dpm_run_callback(callback, dev, state, info);
509
	dev->power.is_noirq_suspended = false;
510

511
 Out:
512
	complete_all(&dev->power.completion);
513 514 515 516
	TRACE_RESUME(error);
	return error;
}

517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534
static bool is_async(struct device *dev)
{
	return dev->power.async_suspend && pm_async_enabled
		&& !pm_trace_is_enabled();
}

static void async_resume_noirq(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = device_resume_noirq(dev, pm_transition, true);
	if (error)
		pm_dev_err(dev, pm_transition, " async", error);

	put_device(dev);
}

535
/**
536
 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
537
 * @state: PM transition of the system being carried out.
538
 *
539
 * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
540
 * enable device drivers to receive interrupts.
541
 */
542
static void dpm_resume_noirq(pm_message_t state)
543
{
544
	struct device *dev;
545
	ktime_t starttime = ktime_get();
546

547
	mutex_lock(&dpm_list_mtx);
548
	pm_transition = state;
549

550 551 552 553 554 555 556 557 558 559 560 561 562 563 564
	/*
	 * Advanced the async threads upfront,
	 * in case the starting of async threads is
	 * delayed by non-async resuming devices.
	 */
	list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
		reinit_completion(&dev->power.completion);
		if (is_async(dev)) {
			get_device(dev);
			async_schedule(async_resume_noirq, dev);
		}
	}

	while (!list_empty(&dpm_noirq_list)) {
		dev = to_device(dpm_noirq_list.next);
565
		get_device(dev);
566
		list_move_tail(&dev->power.entry, &dpm_late_early_list);
567
		mutex_unlock(&dpm_list_mtx);
568

569 570 571 572 573 574 575 576 577 578
		if (!is_async(dev)) {
			int error;

			error = device_resume_noirq(dev, state, false);
			if (error) {
				suspend_stats.failed_resume_noirq++;
				dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
				dpm_save_failed_dev(dev_name(dev));
				pm_dev_err(dev, state, " noirq", error);
			}
579 580 581 582 583 584
		}

		mutex_lock(&dpm_list_mtx);
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
585
	async_synchronize_full();
586 587
	dpm_show_time(starttime, state, "noirq");
	resume_device_irqs();
588
	cpuidle_resume();
589 590 591 592 593 594 595 596 597
}

/**
 * device_resume_early - Execute an "early resume" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
 *
 * Runtime PM is disabled for @dev while this function is being executed.
 */
598
static int device_resume_early(struct device *dev, pm_message_t state, bool async)
599 600 601 602 603 604 605 606
{
	pm_callback_t callback = NULL;
	char *info = NULL;
	int error = 0;

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);

607 608 609
	if (dev->power.syscore)
		goto Out;

610 611 612
	if (!dev->power.is_late_suspended)
		goto Out;

613 614
	dpm_wait(dev->parent, async);

615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634
	if (dev->pm_domain) {
		info = "early power domain ";
		callback = pm_late_early_op(&dev->pm_domain->ops, state);
	} else if (dev->type && dev->type->pm) {
		info = "early type ";
		callback = pm_late_early_op(dev->type->pm, state);
	} else if (dev->class && dev->class->pm) {
		info = "early class ";
		callback = pm_late_early_op(dev->class->pm, state);
	} else if (dev->bus && dev->bus->pm) {
		info = "early bus ";
		callback = pm_late_early_op(dev->bus->pm, state);
	}

	if (!callback && dev->driver && dev->driver->pm) {
		info = "early driver ";
		callback = pm_late_early_op(dev->driver->pm, state);
	}

	error = dpm_run_callback(callback, dev, state, info);
635
	dev->power.is_late_suspended = false;
636

637
 Out:
638
	TRACE_RESUME(error);
639 640

	pm_runtime_enable(dev);
641
	complete_all(&dev->power.completion);
642 643 644
	return error;
}

645 646 647 648 649 650 651 652 653 654 655 656
static void async_resume_early(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = device_resume_early(dev, pm_transition, true);
	if (error)
		pm_dev_err(dev, pm_transition, " async", error);

	put_device(dev);
}

657 658 659 660 661 662
/**
 * dpm_resume_early - Execute "early resume" callbacks for all devices.
 * @state: PM transition of the system being carried out.
 */
static void dpm_resume_early(pm_message_t state)
{
663
	struct device *dev;
664 665 666
	ktime_t starttime = ktime_get();

	mutex_lock(&dpm_list_mtx);
667 668 669 670 671 672 673 674 675 676 677 678 679 680
	pm_transition = state;

	/*
	 * Advanced the async threads upfront,
	 * in case the starting of async threads is
	 * delayed by non-async resuming devices.
	 */
	list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
		reinit_completion(&dev->power.completion);
		if (is_async(dev)) {
			get_device(dev);
			async_schedule(async_resume_early, dev);
		}
	}
681

682 683
	while (!list_empty(&dpm_late_early_list)) {
		dev = to_device(dpm_late_early_list.next);
684 685 686 687
		get_device(dev);
		list_move_tail(&dev->power.entry, &dpm_suspended_list);
		mutex_unlock(&dpm_list_mtx);

688 689
		if (!is_async(dev)) {
			int error;
690

691 692 693 694 695 696 697 698
			error = device_resume_early(dev, state, false);
			if (error) {
				suspend_stats.failed_resume_early++;
				dpm_save_failed_step(SUSPEND_RESUME_EARLY);
				dpm_save_failed_dev(dev_name(dev));
				pm_dev_err(dev, state, " early", error);
			}
		}
699
		mutex_lock(&dpm_list_mtx);
700 701
		put_device(dev);
	}
702
	mutex_unlock(&dpm_list_mtx);
703
	async_synchronize_full();
704
	dpm_show_time(starttime, state, "early");
705
}
706 707 708 709 710 711 712 713 714 715 716

/**
 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
 * @state: PM transition of the system being carried out.
 */
void dpm_resume_start(pm_message_t state)
{
	dpm_resume_noirq(state);
	dpm_resume_early(state);
}
EXPORT_SYMBOL_GPL(dpm_resume_start);
717 718

/**
719
 * device_resume - Execute "resume" callbacks for given device.
720 721
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
722
 * @async: If true, the device is being resumed asynchronously.
723
 */
724
static int device_resume(struct device *dev, pm_message_t state, bool async)
725
{
726 727
	pm_callback_t callback = NULL;
	char *info = NULL;
728
	int error = 0;
729
	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
730 731 732

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);
733

734 735 736
	if (dev->power.syscore)
		goto Complete;

737
	dpm_wait(dev->parent, async);
738
	dpm_watchdog_set(&wd, dev);
739
	device_lock(dev);
740

741 742 743 744 745
	/*
	 * This is a fib.  But we'll allow new children to be added below
	 * a resumed device, even if the device hasn't been completed yet.
	 */
	dev->power.is_prepared = false;
746

747 748 749
	if (!dev->power.is_suspended)
		goto Unlock;

750
	if (dev->pm_domain) {
751 752
		info = "power domain ";
		callback = pm_op(&dev->pm_domain->ops, state);
753
		goto Driver;
754 755
	}

756
	if (dev->type && dev->type->pm) {
757 758
		info = "type ";
		callback = pm_op(dev->type->pm, state);
759
		goto Driver;
760 761
	}

762 763
	if (dev->class) {
		if (dev->class->pm) {
764 765
			info = "class ";
			callback = pm_op(dev->class->pm, state);
766
			goto Driver;
767
		} else if (dev->class->resume) {
768 769
			info = "legacy class ";
			callback = dev->class->resume;
770
			goto End;
771
		}
772
	}
773 774 775

	if (dev->bus) {
		if (dev->bus->pm) {
776
			info = "bus ";
777
			callback = pm_op(dev->bus->pm, state);
778
		} else if (dev->bus->resume) {
779
			info = "legacy bus ";
780
			callback = dev->bus->resume;
781
			goto End;
782 783 784
		}
	}

785 786 787 788 789 790
 Driver:
	if (!callback && dev->driver && dev->driver->pm) {
		info = "driver ";
		callback = pm_op(dev->driver->pm, state);
	}

791
 End:
792
	error = dpm_run_callback(callback, dev, state, info);
793 794 795
	dev->power.is_suspended = false;

 Unlock:
796
	device_unlock(dev);
797
	dpm_watchdog_clear(&wd);
798 799

 Complete:
800
	complete_all(&dev->power.completion);
801

802
	TRACE_RESUME(error);
803

804 805 806
	return error;
}

807 808 809 810 811
static void async_resume(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

812
	error = device_resume(dev, pm_transition, true);
813 814 815 816 817
	if (error)
		pm_dev_err(dev, pm_transition, " async", error);
	put_device(dev);
}

818
/**
819 820
 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
 * @state: PM transition of the system being carried out.
821
 *
822 823
 * Execute the appropriate "resume" callback for all devices whose status
 * indicates that they are suspended.
824
 */
825
void dpm_resume(pm_message_t state)
826
{
827
	struct device *dev;
828
	ktime_t starttime = ktime_get();
829

830 831
	might_sleep();

832
	mutex_lock(&dpm_list_mtx);
833
	pm_transition = state;
834
	async_error = 0;
835

836
	list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
837
		reinit_completion(&dev->power.completion);
838 839 840 841 842 843
		if (is_async(dev)) {
			get_device(dev);
			async_schedule(async_resume, dev);
		}
	}

844 845
	while (!list_empty(&dpm_suspended_list)) {
		dev = to_device(dpm_suspended_list.next);
846
		get_device(dev);
847
		if (!is_async(dev)) {
848 849 850 851
			int error;

			mutex_unlock(&dpm_list_mtx);

852
			error = device_resume(dev, state, false);
853 854 855 856
			if (error) {
				suspend_stats.failed_resume++;
				dpm_save_failed_step(SUSPEND_RESUME);
				dpm_save_failed_dev(dev_name(dev));
857
				pm_dev_err(dev, state, "", error);
858
			}
859 860

			mutex_lock(&dpm_list_mtx);
861 862
		}
		if (!list_empty(&dev->power.entry))
863
			list_move_tail(&dev->power.entry, &dpm_prepared_list);
864 865 866
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
867
	async_synchronize_full();
868
	dpm_show_time(starttime, state, NULL);
869 870 871
}

/**
872 873 874
 * device_complete - Complete a PM transition for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
875
 */
876
static void device_complete(struct device *dev, pm_message_t state)
877
{
878 879 880
	void (*callback)(struct device *) = NULL;
	char *info = NULL;

881 882 883
	if (dev->power.syscore)
		return;

884
	device_lock(dev);
885

886
	if (dev->pm_domain) {
887 888
		info = "completing power domain ";
		callback = dev->pm_domain->ops.complete;
889
	} else if (dev->type && dev->type->pm) {
890 891
		info = "completing type ";
		callback = dev->type->pm->complete;
892
	} else if (dev->class && dev->class->pm) {
893 894
		info = "completing class ";
		callback = dev->class->pm->complete;
895
	} else if (dev->bus && dev->bus->pm) {
896 897 898 899 900 901 902 903 904 905 906 907
		info = "completing bus ";
		callback = dev->bus->pm->complete;
	}

	if (!callback && dev->driver && dev->driver->pm) {
		info = "completing driver ";
		callback = dev->driver->pm->complete;
	}

	if (callback) {
		pm_dev_dbg(dev, state, info);
		callback(dev);
908 909
	}

910
	device_unlock(dev);
911

912
	pm_runtime_put(dev);
913 914 915
}

/**
916 917
 * dpm_complete - Complete a PM transition for all non-sysdev devices.
 * @state: PM transition of the system being carried out.
918
 *
919 920
 * Execute the ->complete() callbacks for all devices whose PM status is not
 * DPM_ON (this allows new devices to be registered).
921
 */
922
void dpm_complete(pm_message_t state)
923
{
924 925
	struct list_head list;

926 927
	might_sleep();

928
	INIT_LIST_HEAD(&list);
929
	mutex_lock(&dpm_list_mtx);
930 931
	while (!list_empty(&dpm_prepared_list)) {
		struct device *dev = to_device(dpm_prepared_list.prev);
932

933
		get_device(dev);
934
		dev->power.is_prepared = false;
935 936
		list_move(&dev->power.entry, &list);
		mutex_unlock(&dpm_list_mtx);
937

938
		device_complete(dev, state);
939

940
		mutex_lock(&dpm_list_mtx);
941
		put_device(dev);
942
	}
943
	list_splice(&list, &dpm_list);
944 945 946 947
	mutex_unlock(&dpm_list_mtx);
}

/**
948 949
 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
 * @state: PM transition of the system being carried out.
950
 *
951 952
 * Execute "resume" callbacks for all devices and complete the PM transition of
 * the system.
953
 */
954
void dpm_resume_end(pm_message_t state)
955
{
956 957
	dpm_resume(state);
	dpm_complete(state);
958
}
959
EXPORT_SYMBOL_GPL(dpm_resume_end);
960 961 962 963


/*------------------------- Suspend routines -------------------------*/

964
/**
965 966 967 968 969
 * resume_event - Return a "resume" message for given "suspend" sleep state.
 * @sleep_state: PM message representing a sleep state.
 *
 * Return a PM message representing the resume event corresponding to given
 * sleep state.
970 971
 */
static pm_message_t resume_event(pm_message_t sleep_state)
972
{
973 974 975 976 977 978 979 980
	switch (sleep_state.event) {
	case PM_EVENT_SUSPEND:
		return PMSG_RESUME;
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		return PMSG_RECOVER;
	case PM_EVENT_HIBERNATE:
		return PMSG_RESTORE;
981
	}
982
	return PMSG_ON;
983 984 985
}

/**
986 987 988
 * device_suspend_noirq - Execute a "late suspend" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
989
 *
990 991
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
992
 */
993
static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
994
{
995 996
	pm_callback_t callback = NULL;
	char *info = NULL;
997 998 999 1000 1001 1002 1003 1004 1005
	int error = 0;

	if (async_error)
		goto Complete;

	if (pm_wakeup_pending()) {
		async_error = -EBUSY;
		goto Complete;
	}
1006

1007
	if (dev->power.syscore)
1008 1009 1010
		goto Complete;

	dpm_wait_for_children(dev, async);
1011

1012
	if (dev->pm_domain) {
1013
		info = "noirq power domain ";
1014
		callback = pm_noirq_op(&dev->pm_domain->ops, state);
1015
	} else if (dev->type && dev->type->pm) {
1016
		info = "noirq type ";
1017
		callback = pm_noirq_op(dev->type->pm, state);
1018
	} else if (dev->class && dev->class->pm) {
1019
		info = "noirq class ";
1020
		callback = pm_noirq_op(dev->class->pm, state);
1021
	} else if (dev->bus && dev->bus->pm) {
1022
		info = "noirq bus ";
1023
		callback = pm_noirq_op(dev->bus->pm, state);
1024 1025
	}

1026
	if (!callback && dev->driver && dev->driver->pm) {
1027
		info = "noirq driver ";
1028 1029 1030
		callback = pm_noirq_op(dev->driver->pm, state);
	}

1031 1032 1033
	error = dpm_run_callback(callback, dev, state, info);
	if (!error)
		dev->power.is_noirq_suspended = true;
1034 1035
	else
		async_error = error;
1036

1037 1038
Complete:
	complete_all(&dev->power.completion);
1039
	return error;
1040 1041
}

1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067
static void async_suspend_noirq(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = __device_suspend_noirq(dev, pm_transition, true);
	if (error) {
		dpm_save_failed_dev(dev_name(dev));
		pm_dev_err(dev, pm_transition, " async", error);
	}

	put_device(dev);
}

static int device_suspend_noirq(struct device *dev)
{
	reinit_completion(&dev->power.completion);

	if (pm_async_enabled && dev->power.async_suspend) {
		get_device(dev);
		async_schedule(async_suspend_noirq, dev);
		return 0;
	}
	return __device_suspend_noirq(dev, pm_transition, false);
}

1068
/**
1069
 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1070
 * @state: PM transition of the system being carried out.
1071
 *
1072 1073
 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
 * handlers for all non-sysdev devices.
1074
 */
1075
static int dpm_suspend_noirq(pm_message_t state)
1076
{
1077
	ktime_t starttime = ktime_get();
1078 1079
	int error = 0;

1080
	cpuidle_pause();
1081
	suspend_device_irqs();
1082
	mutex_lock(&dpm_list_mtx);
1083 1084 1085
	pm_transition = state;
	async_error = 0;

1086 1087
	while (!list_empty(&dpm_late_early_list)) {
		struct device *dev = to_device(dpm_late_early_list.prev);
1088 1089 1090 1091

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

1092
		error = device_suspend_noirq(dev);
1093 1094

		mutex_lock(&dpm_list_mtx);
1095
		if (error) {
1096
			pm_dev_err(dev, state, " noirq", error);
1097
			dpm_save_failed_dev(dev_name(dev));
1098
			put_device(dev);
1099 1100
			break;
		}
1101
		if (!list_empty(&dev->power.entry))
1102
			list_move(&dev->power.entry, &dpm_noirq_list);
1103
		put_device(dev);
1104

1105
		if (async_error)
1106
			break;
1107
	}
1108
	mutex_unlock(&dpm_list_mtx);
1109 1110 1111 1112 1113 1114 1115
	async_synchronize_full();
	if (!error)
		error = async_error;

	if (error) {
		suspend_stats.failed_suspend_noirq++;
		dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1116
		dpm_resume_noirq(resume_event(state));
1117
	} else {
1118
		dpm_show_time(starttime, state, "noirq");
1119
	}
1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133
	return error;
}

/**
 * device_suspend_late - Execute a "late suspend" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
 *
 * Runtime PM is disabled for @dev while this function is being executed.
 */
static int device_suspend_late(struct device *dev, pm_message_t state)
{
	pm_callback_t callback = NULL;
	char *info = NULL;
1134
	int error;
1135

1136 1137
	__pm_runtime_disable(dev, false);

1138 1139 1140
	if (dev->power.syscore)
		return 0;

1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159
	if (dev->pm_domain) {
		info = "late power domain ";
		callback = pm_late_early_op(&dev->pm_domain->ops, state);
	} else if (dev->type && dev->type->pm) {
		info = "late type ";
		callback = pm_late_early_op(dev->type->pm, state);
	} else if (dev->class && dev->class->pm) {
		info = "late class ";
		callback = pm_late_early_op(dev->class->pm, state);
	} else if (dev->bus && dev->bus->pm) {
		info = "late bus ";
		callback = pm_late_early_op(dev->bus->pm, state);
	}

	if (!callback && dev->driver && dev->driver->pm) {
		info = "late driver ";
		callback = pm_late_early_op(dev->driver->pm, state);
	}

1160 1161 1162 1163 1164
	error = dpm_run_callback(callback, dev, state, info);
	if (!error)
		dev->power.is_late_suspended = true;

	return error;
1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196
}

/**
 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
 * @state: PM transition of the system being carried out.
 */
static int dpm_suspend_late(pm_message_t state)
{
	ktime_t starttime = ktime_get();
	int error = 0;

	mutex_lock(&dpm_list_mtx);
	while (!list_empty(&dpm_suspended_list)) {
		struct device *dev = to_device(dpm_suspended_list.prev);

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

		error = device_suspend_late(dev, state);

		mutex_lock(&dpm_list_mtx);
		if (error) {
			pm_dev_err(dev, state, " late", error);
			suspend_stats.failed_suspend_late++;
			dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
			dpm_save_failed_dev(dev_name(dev));
			put_device(dev);
			break;
		}
		if (!list_empty(&dev->power.entry))
			list_move(&dev->power.entry, &dpm_late_early_list);
		put_device(dev);
1197 1198 1199 1200 1201

		if (pm_wakeup_pending()) {
			error = -EBUSY;
			break;
		}
1202 1203 1204 1205
	}
	mutex_unlock(&dpm_list_mtx);
	if (error)
		dpm_resume_early(resume_event(state));
1206 1207
	else
		dpm_show_time(starttime, state, "late");
1208

1209 1210
	return error;
}
1211 1212 1213 1214 1215 1216 1217 1218

/**
 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
 * @state: PM transition of the system being carried out.
 */
int dpm_suspend_end(pm_message_t state)
{
	int error = dpm_suspend_late(state);
1219 1220 1221 1222 1223
	if (error)
		return error;

	error = dpm_suspend_noirq(state);
	if (error) {
1224
		dpm_resume_early(resume_event(state));
1225 1226
		return error;
	}
1227

1228
	return 0;
1229 1230
}
EXPORT_SYMBOL_GPL(dpm_suspend_end);
1231

1232 1233
/**
 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
R
Randy Dunlap 已提交
1234 1235 1236
 * @dev: Device to suspend.
 * @state: PM transition of the system being carried out.
 * @cb: Suspend callback to execute.
1237 1238
 */
static int legacy_suspend(struct device *dev, pm_message_t state,
1239 1240
			  int (*cb)(struct device *dev, pm_message_t state),
			  char *info)
1241 1242 1243 1244 1245 1246 1247 1248 1249
{
	int error;
	ktime_t calltime;

	calltime = initcall_debug_start(dev);

	error = cb(dev, state);
	suspend_report_result(cb, error);

1250
	initcall_debug_report(dev, calltime, error, state, info);
1251 1252 1253 1254

	return error;
}

1255
/**
1256 1257 1258
 * device_suspend - Execute "suspend" callbacks for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
1259
 * @async: If true, the device is being suspended asynchronously.
1260
 */
1261
static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1262
{
1263 1264
	pm_callback_t callback = NULL;
	char *info = NULL;
1265
	int error = 0;
1266
	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1267

1268
	dpm_wait_for_children(dev, async);
1269

1270
	if (async_error)
1271
		goto Complete;
1272

1273 1274 1275 1276 1277 1278
	/*
	 * If a device configured to wake up the system from sleep states
	 * has been suspended at run time and there's a resume request pending
	 * for it, this is equivalent to the device signaling wakeup, so the
	 * system suspend operation should be aborted.
	 */
1279 1280
	if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
		pm_wakeup_event(dev, 0);
1281

1282 1283
	if (pm_wakeup_pending()) {
		async_error = -EBUSY;
1284
		goto Complete;
1285 1286
	}

1287 1288 1289
	if (dev->power.syscore)
		goto Complete;

1290
	dpm_watchdog_set(&wd, dev);
1291 1292
	device_lock(dev);

1293
	if (dev->pm_domain) {
1294 1295 1296
		info = "power domain ";
		callback = pm_op(&dev->pm_domain->ops, state);
		goto Run;
1297 1298
	}

1299
	if (dev->type && dev->type->pm) {
1300 1301 1302
		info = "type ";
		callback = pm_op(dev->type->pm, state);
		goto Run;
1303 1304
	}

1305 1306
	if (dev->class) {
		if (dev->class->pm) {
1307 1308 1309
			info = "class ";
			callback = pm_op(dev->class->pm, state);
			goto Run;
1310 1311
		} else if (dev->class->suspend) {
			pm_dev_dbg(dev, state, "legacy class ");
1312 1313
			error = legacy_suspend(dev, state, dev->class->suspend,
						"legacy class ");
1314
			goto End;
1315
		}
1316 1317
	}

1318 1319
	if (dev->bus) {
		if (dev->bus->pm) {
1320
			info = "bus ";
1321
			callback = pm_op(dev->bus->pm, state);
1322
		} else if (dev->bus->suspend) {
1323
			pm_dev_dbg(dev, state, "legacy bus ");
1324 1325
			error = legacy_suspend(dev, state, dev->bus->suspend,
						"legacy bus ");
1326
			goto End;
1327
		}
1328 1329
	}

1330
 Run:
1331 1332 1333 1334 1335
	if (!callback && dev->driver && dev->driver->pm) {
		info = "driver ";
		callback = pm_op(dev->driver->pm, state);
	}

1336 1337
	error = dpm_run_callback(callback, dev, state, info);

1338
 End:
1339 1340
	if (!error) {
		dev->power.is_suspended = true;
1341 1342
		if (dev->power.wakeup_path
		    && dev->parent && !dev->parent->power.ignore_children)
1343 1344
			dev->parent->power.wakeup_path = true;
	}
1345

1346
	device_unlock(dev);
1347
	dpm_watchdog_clear(&wd);
1348 1349

 Complete:
1350
	complete_all(&dev->power.completion);
1351
	if (error)
1352 1353
		async_error = error;

1354 1355 1356
	return error;
}

1357 1358 1359 1360 1361 1362
static void async_suspend(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = __device_suspend(dev, pm_transition, true);
1363 1364
	if (error) {
		dpm_save_failed_dev(dev_name(dev));
1365
		pm_dev_err(dev, pm_transition, " async", error);
1366
	}
1367 1368 1369 1370 1371 1372

	put_device(dev);
}

static int device_suspend(struct device *dev)
{
1373
	reinit_completion(&dev->power.completion);
1374

1375
	if (pm_async_enabled && dev->power.async_suspend) {
1376 1377 1378 1379 1380 1381 1382 1383
		get_device(dev);
		async_schedule(async_suspend, dev);
		return 0;
	}

	return __device_suspend(dev, pm_transition, false);
}

1384
/**
1385 1386
 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
 * @state: PM transition of the system being carried out.
1387
 */
1388
int dpm_suspend(pm_message_t state)
1389
{
1390
	ktime_t starttime = ktime_get();
1391 1392
	int error = 0;

1393 1394
	might_sleep();

1395
	mutex_lock(&dpm_list_mtx);
1396 1397
	pm_transition = state;
	async_error = 0;
1398 1399
	while (!list_empty(&dpm_prepared_list)) {
		struct device *dev = to_device(dpm_prepared_list.prev);
1400

1401
		get_device(dev);
1402
		mutex_unlock(&dpm_list_mtx);
1403

1404
		error = device_suspend(dev);
1405

1406
		mutex_lock(&dpm_list_mtx);
1407
		if (error) {
1408
			pm_dev_err(dev, state, "", error);
1409
			dpm_save_failed_dev(dev_name(dev));
1410
			put_device(dev);
1411 1412
			break;
		}
1413
		if (!list_empty(&dev->power.entry))
1414
			list_move(&dev->power.entry, &dpm_suspended_list);
1415
		put_device(dev);
1416 1417
		if (async_error)
			break;
1418 1419
	}
	mutex_unlock(&dpm_list_mtx);
1420 1421 1422
	async_synchronize_full();
	if (!error)
		error = async_error;
1423 1424 1425 1426
	if (error) {
		suspend_stats.failed_suspend++;
		dpm_save_failed_step(SUSPEND_SUSPEND);
	} else
1427
		dpm_show_time(starttime, state, NULL);
1428 1429 1430 1431
	return error;
}

/**
1432 1433 1434 1435 1436 1437
 * device_prepare - Prepare a device for system power transition.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
 *
 * Execute the ->prepare() callback(s) for given device.  No new children of the
 * device may be registered after this function has returned.
1438
 */
1439
static int device_prepare(struct device *dev, pm_message_t state)
1440
{
1441 1442
	int (*callback)(struct device *) = NULL;
	char *info = NULL;
1443 1444
	int error = 0;

1445 1446 1447
	if (dev->power.syscore)
		return 0;

1448 1449 1450 1451 1452 1453 1454 1455
	/*
	 * If a device's parent goes into runtime suspend at the wrong time,
	 * it won't be possible to resume the device.  To prevent this we
	 * block runtime suspend here, during the prepare phase, and allow
	 * it again during the complete phase.
	 */
	pm_runtime_get_noresume(dev);

1456
	device_lock(dev);
1457

1458 1459
	dev->power.wakeup_path = device_may_wakeup(dev);

1460
	if (dev->pm_domain) {
1461 1462
		info = "preparing power domain ";
		callback = dev->pm_domain->ops.prepare;
1463
	} else if (dev->type && dev->type->pm) {
1464 1465
		info = "preparing type ";
		callback = dev->type->pm->prepare;
1466
	} else if (dev->class && dev->class->pm) {
1467 1468
		info = "preparing class ";
		callback = dev->class->pm->prepare;
1469
	} else if (dev->bus && dev->bus->pm) {
1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481
		info = "preparing bus ";
		callback = dev->bus->pm->prepare;
	}

	if (!callback && dev->driver && dev->driver->pm) {
		info = "preparing driver ";
		callback = dev->driver->pm->prepare;
	}

	if (callback) {
		error = callback(dev);
		suspend_report_result(callback, error);
1482
	}
1483

1484
	device_unlock(dev);
1485

1486 1487 1488
	if (error)
		pm_runtime_put(dev);

1489 1490
	return error;
}
1491

1492
/**
1493 1494
 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
 * @state: PM transition of the system being carried out.
1495
 *
1496
 * Execute the ->prepare() callback(s) for all devices.
1497
 */
1498
int dpm_prepare(pm_message_t state)
1499 1500 1501
{
	int error = 0;

1502 1503
	might_sleep();

1504 1505 1506 1507 1508 1509 1510
	mutex_lock(&dpm_list_mtx);
	while (!list_empty(&dpm_list)) {
		struct device *dev = to_device(dpm_list.next);

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

1511
		error = device_prepare(dev, state);
1512 1513 1514 1515 1516

		mutex_lock(&dpm_list_mtx);
		if (error) {
			if (error == -EAGAIN) {
				put_device(dev);
S
Sebastian Ott 已提交
1517
				error = 0;
1518 1519
				continue;
			}
1520 1521
			printk(KERN_INFO "PM: Device %s not prepared "
				"for power transition: code %d\n",
1522
				dev_name(dev), error);
1523 1524 1525
			put_device(dev);
			break;
		}
1526
		dev->power.is_prepared = true;
1527
		if (!list_empty(&dev->power.entry))
1528
			list_move_tail(&dev->power.entry, &dpm_prepared_list);
1529 1530 1531
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
1532 1533 1534
	return error;
}

1535
/**
1536 1537
 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
 * @state: PM transition of the system being carried out.
1538
 *
1539 1540
 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
 * callbacks for them.
1541
 */
1542
int dpm_suspend_start(pm_message_t state)
1543 1544
{
	int error;
1545

1546
	error = dpm_prepare(state);
1547 1548 1549 1550
	if (error) {
		suspend_stats.failed_prepare++;
		dpm_save_failed_step(SUSPEND_PREPARE);
	} else
1551
		error = dpm_suspend(state);
1552 1553
	return error;
}
1554
EXPORT_SYMBOL_GPL(dpm_suspend_start);
1555 1556 1557

void __suspend_report_result(const char *function, void *fn, int ret)
{
1558 1559
	if (ret)
		printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1560 1561
}
EXPORT_SYMBOL_GPL(__suspend_report_result);
1562 1563 1564 1565 1566 1567

/**
 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
 * @dev: Device to wait for.
 * @subordinate: Device that needs to wait for @dev.
 */
1568
int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1569 1570
{
	dpm_wait(dev, subordinate->power.async_suspend);
1571
	return async_error;
1572 1573
}
EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595

/**
 * dpm_for_each_dev - device iterator.
 * @data: data for the callback.
 * @fn: function to be called for each device.
 *
 * Iterate over devices in dpm_list, and call @fn for each device,
 * passing it @data.
 */
void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
{
	struct device *dev;

	if (!fn)
		return;

	device_pm_lock();
	list_for_each_entry(dev, &dpm_list, power.entry)
		fn(dev, data);
	device_pm_unlock();
}
EXPORT_SYMBOL_GPL(dpm_for_each_dev);