main.c 36.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/*
 * drivers/base/power/main.c - Where the driver meets power management.
 *
 * Copyright (c) 2003 Patrick Mochel
 * Copyright (c) 2003 Open Source Development Lab
 *
 * This file is released under the GPLv2
 *
 *
 * The driver model core calls device_pm_add() when a device is registered.
11
 * This will initialize the embedded device_pm_info object in the device
L
Linus Torvalds 已提交
12 13 14
 * and add it to the list of power-controlled devices. sysfs entries for
 * controlling device power management will also be added.
 *
15 16 17
 * A separate list is used for keeping track of power info, because the power
 * domain dependencies may differ from the ancestral dependencies that the
 * subsystem list maintains.
L
Linus Torvalds 已提交
18 19 20
 */

#include <linux/device.h>
21
#include <linux/kallsyms.h>
22
#include <linux/export.h>
23
#include <linux/mutex.h>
24
#include <linux/pm.h>
25
#include <linux/pm_runtime.h>
26
#include <linux/resume-trace.h>
27
#include <linux/interrupt.h>
28
#include <linux/sched.h>
29
#include <linux/async.h>
30
#include <linux/suspend.h>
31
#include <trace/events/power.h>
32
#include <linux/cpuidle.h>
33 34
#include <linux/timer.h>

35
#include "../base.h"
L
Linus Torvalds 已提交
36 37
#include "power.h"

38 39
typedef int (*pm_callback_t)(struct device *);

40
/*
41
 * The entries in the dpm_list list are in a depth first order, simply
42 43 44
 * because children are guaranteed to be discovered after parents, and
 * are inserted at the back of the list on discovery.
 *
45 46
 * Since device_pm_add() may be called with a device lock held,
 * we must never try to acquire a device lock while holding
47 48 49
 * dpm_list_mutex.
 */

50
LIST_HEAD(dpm_list);
51 52 53 54
static LIST_HEAD(dpm_prepared_list);
static LIST_HEAD(dpm_suspended_list);
static LIST_HEAD(dpm_late_early_list);
static LIST_HEAD(dpm_noirq_list);
L
Linus Torvalds 已提交
55

56
struct suspend_stats suspend_stats;
57
static DEFINE_MUTEX(dpm_list_mtx);
58
static pm_message_t pm_transition;
L
Linus Torvalds 已提交
59

60 61
static int async_error;

62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
static char *pm_verb(int event)
{
	switch (event) {
	case PM_EVENT_SUSPEND:
		return "suspend";
	case PM_EVENT_RESUME:
		return "resume";
	case PM_EVENT_FREEZE:
		return "freeze";
	case PM_EVENT_QUIESCE:
		return "quiesce";
	case PM_EVENT_HIBERNATE:
		return "hibernate";
	case PM_EVENT_THAW:
		return "thaw";
	case PM_EVENT_RESTORE:
		return "restore";
	case PM_EVENT_RECOVER:
		return "recover";
	default:
		return "(unknown PM event)";
	}
}

86
/**
87
 * device_pm_sleep_init - Initialize system suspend-related device fields.
88 89
 * @dev: Device object being initialized.
 */
90
void device_pm_sleep_init(struct device *dev)
91
{
92
	dev->power.is_prepared = false;
93
	dev->power.is_suspended = false;
94 95
	dev->power.is_noirq_suspended = false;
	dev->power.is_late_suspended = false;
96
	init_completion(&dev->power.completion);
97
	complete_all(&dev->power.completion);
98
	dev->power.wakeup = NULL;
99
	INIT_LIST_HEAD(&dev->power.entry);
100 101
}

102
/**
103
 * device_pm_lock - Lock the list of active devices used by the PM core.
104 105 106 107 108 109 110
 */
void device_pm_lock(void)
{
	mutex_lock(&dpm_list_mtx);
}

/**
111
 * device_pm_unlock - Unlock the list of active devices used by the PM core.
112 113 114 115 116
 */
void device_pm_unlock(void)
{
	mutex_unlock(&dpm_list_mtx);
}
117

118
/**
119 120
 * device_pm_add - Add a device to the PM core's list of active devices.
 * @dev: Device to add to the list.
121
 */
122
void device_pm_add(struct device *dev)
L
Linus Torvalds 已提交
123 124
{
	pr_debug("PM: Adding info for %s:%s\n",
125
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
126
	mutex_lock(&dpm_list_mtx);
127
	if (dev->parent && dev->parent->power.is_prepared)
128 129
		dev_warn(dev, "parent %s should not be sleeping\n",
			dev_name(dev->parent));
130
	list_add_tail(&dev->power.entry, &dpm_list);
131
	mutex_unlock(&dpm_list_mtx);
L
Linus Torvalds 已提交
132 133
}

134
/**
135 136
 * device_pm_remove - Remove a device from the PM core's list of active devices.
 * @dev: Device to be removed from the list.
137
 */
138
void device_pm_remove(struct device *dev)
L
Linus Torvalds 已提交
139 140
{
	pr_debug("PM: Removing info for %s:%s\n",
141
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
142
	complete_all(&dev->power.completion);
143
	mutex_lock(&dpm_list_mtx);
L
Linus Torvalds 已提交
144
	list_del_init(&dev->power.entry);
145
	mutex_unlock(&dpm_list_mtx);
146
	device_wakeup_disable(dev);
147
	pm_runtime_remove(dev);
148 149
}

150
/**
151 152 153
 * device_pm_move_before - Move device in the PM core's list of active devices.
 * @deva: Device to move in dpm_list.
 * @devb: Device @deva should come before.
154 155 156 157
 */
void device_pm_move_before(struct device *deva, struct device *devb)
{
	pr_debug("PM: Moving %s:%s before %s:%s\n",
158 159
		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
160 161 162 163 164
	/* Delete deva from dpm_list and reinsert before devb. */
	list_move_tail(&deva->power.entry, &devb->power.entry);
}

/**
165 166 167
 * device_pm_move_after - Move device in the PM core's list of active devices.
 * @deva: Device to move in dpm_list.
 * @devb: Device @deva should come after.
168 169 170 171
 */
void device_pm_move_after(struct device *deva, struct device *devb)
{
	pr_debug("PM: Moving %s:%s after %s:%s\n",
172 173
		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
174 175 176 177 178
	/* Delete deva from dpm_list and reinsert after devb. */
	list_move(&deva->power.entry, &devb->power.entry);
}

/**
179 180
 * device_pm_move_last - Move device to end of the PM core's list of devices.
 * @dev: Device to move in dpm_list.
181 182 183 184
 */
void device_pm_move_last(struct device *dev)
{
	pr_debug("PM: Moving %s:%s to end of list\n",
185
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
186 187 188
	list_move_tail(&dev->power.entry, &dpm_list);
}

189 190 191 192
static ktime_t initcall_debug_start(struct device *dev)
{
	ktime_t calltime = ktime_set(0, 0);

193
	if (pm_print_times_enabled) {
194 195 196
		pr_info("calling  %s+ @ %i, parent: %s\n",
			dev_name(dev), task_pid_nr(current),
			dev->parent ? dev_name(dev->parent) : "none");
197 198 199 200 201 202 203
		calltime = ktime_get();
	}

	return calltime;
}

static void initcall_debug_report(struct device *dev, ktime_t calltime,
204
				  int error, pm_message_t state, char *info)
205
{
206 207 208 209 210
	ktime_t rettime;
	s64 nsecs;

	rettime = ktime_get();
	nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
211

212
	if (pm_print_times_enabled) {
213
		pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
214
			error, (unsigned long long)nsecs >> 10);
215
	}
216 217 218

	trace_device_pm_report_time(dev, info, nsecs, pm_verb(state.event),
				    error);
219 220
}

221 222 223 224 225 226 227 228 229 230
/**
 * dpm_wait - Wait for a PM operation to complete.
 * @dev: Device to wait for.
 * @async: If unset, wait only if the device's power.async_suspend flag is set.
 */
static void dpm_wait(struct device *dev, bool async)
{
	if (!dev)
		return;

231
	if (async || (pm_async_enabled && dev->power.async_suspend))
232 233 234 235 236 237 238 239 240 241 242 243 244 245
		wait_for_completion(&dev->power.completion);
}

static int dpm_wait_fn(struct device *dev, void *async_ptr)
{
	dpm_wait(dev, *((bool *)async_ptr));
	return 0;
}

static void dpm_wait_for_children(struct device *dev, bool async)
{
       device_for_each_child(dev, &async, dpm_wait_fn);
}

246
/**
247
 * pm_op - Return the PM operation appropriate for given PM event.
248 249
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
250
 */
251
static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
252 253 254 255
{
	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
256
		return ops->suspend;
257
	case PM_EVENT_RESUME:
258
		return ops->resume;
259
#endif /* CONFIG_SUSPEND */
260
#ifdef CONFIG_HIBERNATE_CALLBACKS
261 262
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
263
		return ops->freeze;
264
	case PM_EVENT_HIBERNATE:
265
		return ops->poweroff;
266 267
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
268
		return ops->thaw;
269 270
		break;
	case PM_EVENT_RESTORE:
271
		return ops->restore;
272
#endif /* CONFIG_HIBERNATE_CALLBACKS */
273
	}
274

275
	return NULL;
276 277
}

278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311
/**
 * pm_late_early_op - Return the PM operation appropriate for given PM event.
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
 *
 * Runtime PM is disabled for @dev while this function is being executed.
 */
static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
				      pm_message_t state)
{
	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
		return ops->suspend_late;
	case PM_EVENT_RESUME:
		return ops->resume_early;
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATE_CALLBACKS
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		return ops->freeze_late;
	case PM_EVENT_HIBERNATE:
		return ops->poweroff_late;
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
		return ops->thaw_early;
	case PM_EVENT_RESTORE:
		return ops->restore_early;
#endif /* CONFIG_HIBERNATE_CALLBACKS */
	}

	return NULL;
}

312
/**
313
 * pm_noirq_op - Return the PM operation appropriate for given PM event.
314 315
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
316
 *
317 318
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
319
 */
320
static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
321 322 323 324
{
	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
325
		return ops->suspend_noirq;
326
	case PM_EVENT_RESUME:
327
		return ops->resume_noirq;
328
#endif /* CONFIG_SUSPEND */
329
#ifdef CONFIG_HIBERNATE_CALLBACKS
330 331
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
332
		return ops->freeze_noirq;
333
	case PM_EVENT_HIBERNATE:
334
		return ops->poweroff_noirq;
335 336
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
337
		return ops->thaw_noirq;
338
	case PM_EVENT_RESTORE:
339
		return ops->restore_noirq;
340
#endif /* CONFIG_HIBERNATE_CALLBACKS */
341
	}
342

343
	return NULL;
344 345 346 347 348 349 350 351 352 353 354 355 356
}

static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
{
	dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
		", may wakeup" : "");
}

static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
			int error)
{
	printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
357
		dev_name(dev), pm_verb(state.event), info, error);
358 359
}

360 361 362
static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
{
	ktime_t calltime;
363
	u64 usecs64;
364 365 366 367 368 369 370 371 372 373 374 375 376
	int usecs;

	calltime = ktime_get();
	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
	do_div(usecs64, NSEC_PER_USEC);
	usecs = usecs64;
	if (usecs == 0)
		usecs = 1;
	pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
		info ?: "", info ? " " : "", pm_verb(state.event),
		usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
}

377 378 379 380 381 382 383 384 385 386 387 388 389 390 391
static int dpm_run_callback(pm_callback_t cb, struct device *dev,
			    pm_message_t state, char *info)
{
	ktime_t calltime;
	int error;

	if (!cb)
		return 0;

	calltime = initcall_debug_start(dev);

	pm_dev_dbg(dev, state, info);
	error = cb(dev);
	suspend_report_result(cb, error);

392
	initcall_debug_report(dev, calltime, error, state, info);
393 394 395 396

	return error;
}

397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461
#ifdef CONFIG_DPM_WATCHDOG
struct dpm_watchdog {
	struct device		*dev;
	struct task_struct	*tsk;
	struct timer_list	timer;
};

#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
	struct dpm_watchdog wd

/**
 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
 * @data: Watchdog object address.
 *
 * Called when a driver has timed out suspending or resuming.
 * There's not much we can do here to recover so panic() to
 * capture a crash-dump in pstore.
 */
static void dpm_watchdog_handler(unsigned long data)
{
	struct dpm_watchdog *wd = (void *)data;

	dev_emerg(wd->dev, "**** DPM device timeout ****\n");
	show_stack(wd->tsk, NULL);
	panic("%s %s: unrecoverable failure\n",
		dev_driver_string(wd->dev), dev_name(wd->dev));
}

/**
 * dpm_watchdog_set - Enable pm watchdog for given device.
 * @wd: Watchdog. Must be allocated on the stack.
 * @dev: Device to handle.
 */
static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
{
	struct timer_list *timer = &wd->timer;

	wd->dev = dev;
	wd->tsk = current;

	init_timer_on_stack(timer);
	/* use same timeout value for both suspend and resume */
	timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
	timer->function = dpm_watchdog_handler;
	timer->data = (unsigned long)wd;
	add_timer(timer);
}

/**
 * dpm_watchdog_clear - Disable suspend/resume watchdog.
 * @wd: Watchdog to disable.
 */
static void dpm_watchdog_clear(struct dpm_watchdog *wd)
{
	struct timer_list *timer = &wd->timer;

	del_timer_sync(timer);
	destroy_timer_on_stack(timer);
}
#else
#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
#define dpm_watchdog_set(x, y)
#define dpm_watchdog_clear(x)
#endif

462 463 464
/*------------------------- Resume routines -------------------------*/

/**
465 466 467
 * device_resume_noirq - Execute an "early resume" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
468
 *
469 470
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
471
 */
472
static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
473
{
474 475
	pm_callback_t callback = NULL;
	char *info = NULL;
476 477 478 479 480
	int error = 0;

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);

481 482 483
	if (dev->power.syscore)
		goto Out;

484 485 486
	if (!dev->power.is_noirq_suspended)
		goto Out;

487 488
	dpm_wait(dev->parent, async);

489
	if (dev->pm_domain) {
490
		info = "noirq power domain ";
491
		callback = pm_noirq_op(&dev->pm_domain->ops, state);
492
	} else if (dev->type && dev->type->pm) {
493
		info = "noirq type ";
494
		callback = pm_noirq_op(dev->type->pm, state);
495
	} else if (dev->class && dev->class->pm) {
496
		info = "noirq class ";
497
		callback = pm_noirq_op(dev->class->pm, state);
498
	} else if (dev->bus && dev->bus->pm) {
499
		info = "noirq bus ";
500
		callback = pm_noirq_op(dev->bus->pm, state);
501 502
	}

503
	if (!callback && dev->driver && dev->driver->pm) {
504
		info = "noirq driver ";
505 506 507
		callback = pm_noirq_op(dev->driver->pm, state);
	}

508
	error = dpm_run_callback(callback, dev, state, info);
509
	dev->power.is_noirq_suspended = false;
510

511
 Out:
512
	complete_all(&dev->power.completion);
513 514 515 516
	TRACE_RESUME(error);
	return error;
}

517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534
static bool is_async(struct device *dev)
{
	return dev->power.async_suspend && pm_async_enabled
		&& !pm_trace_is_enabled();
}

static void async_resume_noirq(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = device_resume_noirq(dev, pm_transition, true);
	if (error)
		pm_dev_err(dev, pm_transition, " async", error);

	put_device(dev);
}

535
/**
536
 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
537
 * @state: PM transition of the system being carried out.
538
 *
539
 * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
540
 * enable device drivers to receive interrupts.
541
 */
542
static void dpm_resume_noirq(pm_message_t state)
543
{
544
	struct device *dev;
545
	ktime_t starttime = ktime_get();
546

547
	mutex_lock(&dpm_list_mtx);
548
	pm_transition = state;
549

550 551 552 553 554 555 556 557 558 559 560 561 562 563 564
	/*
	 * Advanced the async threads upfront,
	 * in case the starting of async threads is
	 * delayed by non-async resuming devices.
	 */
	list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
		reinit_completion(&dev->power.completion);
		if (is_async(dev)) {
			get_device(dev);
			async_schedule(async_resume_noirq, dev);
		}
	}

	while (!list_empty(&dpm_noirq_list)) {
		dev = to_device(dpm_noirq_list.next);
565
		get_device(dev);
566
		list_move_tail(&dev->power.entry, &dpm_late_early_list);
567
		mutex_unlock(&dpm_list_mtx);
568

569 570 571 572 573 574 575 576 577 578
		if (!is_async(dev)) {
			int error;

			error = device_resume_noirq(dev, state, false);
			if (error) {
				suspend_stats.failed_resume_noirq++;
				dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
				dpm_save_failed_dev(dev_name(dev));
				pm_dev_err(dev, state, " noirq", error);
			}
579 580 581 582 583 584
		}

		mutex_lock(&dpm_list_mtx);
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
585
	async_synchronize_full();
586 587
	dpm_show_time(starttime, state, "noirq");
	resume_device_irqs();
588
	cpuidle_resume();
589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606
}

/**
 * device_resume_early - Execute an "early resume" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
 *
 * Runtime PM is disabled for @dev while this function is being executed.
 */
static int device_resume_early(struct device *dev, pm_message_t state)
{
	pm_callback_t callback = NULL;
	char *info = NULL;
	int error = 0;

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);

607 608 609
	if (dev->power.syscore)
		goto Out;

610 611 612
	if (!dev->power.is_late_suspended)
		goto Out;

613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632
	if (dev->pm_domain) {
		info = "early power domain ";
		callback = pm_late_early_op(&dev->pm_domain->ops, state);
	} else if (dev->type && dev->type->pm) {
		info = "early type ";
		callback = pm_late_early_op(dev->type->pm, state);
	} else if (dev->class && dev->class->pm) {
		info = "early class ";
		callback = pm_late_early_op(dev->class->pm, state);
	} else if (dev->bus && dev->bus->pm) {
		info = "early bus ";
		callback = pm_late_early_op(dev->bus->pm, state);
	}

	if (!callback && dev->driver && dev->driver->pm) {
		info = "early driver ";
		callback = pm_late_early_op(dev->driver->pm, state);
	}

	error = dpm_run_callback(callback, dev, state, info);
633
	dev->power.is_late_suspended = false;
634

635
 Out:
636
	TRACE_RESUME(error);
637 638

	pm_runtime_enable(dev);
639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663
	return error;
}

/**
 * dpm_resume_early - Execute "early resume" callbacks for all devices.
 * @state: PM transition of the system being carried out.
 */
static void dpm_resume_early(pm_message_t state)
{
	ktime_t starttime = ktime_get();

	mutex_lock(&dpm_list_mtx);
	while (!list_empty(&dpm_late_early_list)) {
		struct device *dev = to_device(dpm_late_early_list.next);
		int error;

		get_device(dev);
		list_move_tail(&dev->power.entry, &dpm_suspended_list);
		mutex_unlock(&dpm_list_mtx);

		error = device_resume_early(dev, state);
		if (error) {
			suspend_stats.failed_resume_early++;
			dpm_save_failed_step(SUSPEND_RESUME_EARLY);
			dpm_save_failed_dev(dev_name(dev));
664
			pm_dev_err(dev, state, " early", error);
665
		}
666

667
		mutex_lock(&dpm_list_mtx);
668 669
		put_device(dev);
	}
670
	mutex_unlock(&dpm_list_mtx);
671
	dpm_show_time(starttime, state, "early");
672
}
673 674 675 676 677 678 679 680 681 682 683

/**
 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
 * @state: PM transition of the system being carried out.
 */
void dpm_resume_start(pm_message_t state)
{
	dpm_resume_noirq(state);
	dpm_resume_early(state);
}
EXPORT_SYMBOL_GPL(dpm_resume_start);
684 685

/**
686
 * device_resume - Execute "resume" callbacks for given device.
687 688
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
689
 * @async: If true, the device is being resumed asynchronously.
690
 */
691
static int device_resume(struct device *dev, pm_message_t state, bool async)
692
{
693 694
	pm_callback_t callback = NULL;
	char *info = NULL;
695
	int error = 0;
696
	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
697 698 699

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);
700

701 702 703
	if (dev->power.syscore)
		goto Complete;

704
	dpm_wait(dev->parent, async);
705
	dpm_watchdog_set(&wd, dev);
706
	device_lock(dev);
707

708 709 710 711 712
	/*
	 * This is a fib.  But we'll allow new children to be added below
	 * a resumed device, even if the device hasn't been completed yet.
	 */
	dev->power.is_prepared = false;
713

714 715 716
	if (!dev->power.is_suspended)
		goto Unlock;

717
	if (dev->pm_domain) {
718 719
		info = "power domain ";
		callback = pm_op(&dev->pm_domain->ops, state);
720
		goto Driver;
721 722
	}

723
	if (dev->type && dev->type->pm) {
724 725
		info = "type ";
		callback = pm_op(dev->type->pm, state);
726
		goto Driver;
727 728
	}

729 730
	if (dev->class) {
		if (dev->class->pm) {
731 732
			info = "class ";
			callback = pm_op(dev->class->pm, state);
733
			goto Driver;
734
		} else if (dev->class->resume) {
735 736
			info = "legacy class ";
			callback = dev->class->resume;
737
			goto End;
738
		}
739
	}
740 741 742

	if (dev->bus) {
		if (dev->bus->pm) {
743
			info = "bus ";
744
			callback = pm_op(dev->bus->pm, state);
745
		} else if (dev->bus->resume) {
746
			info = "legacy bus ";
747
			callback = dev->bus->resume;
748
			goto End;
749 750 751
		}
	}

752 753 754 755 756 757
 Driver:
	if (!callback && dev->driver && dev->driver->pm) {
		info = "driver ";
		callback = pm_op(dev->driver->pm, state);
	}

758
 End:
759
	error = dpm_run_callback(callback, dev, state, info);
760 761 762
	dev->power.is_suspended = false;

 Unlock:
763
	device_unlock(dev);
764
	dpm_watchdog_clear(&wd);
765 766

 Complete:
767
	complete_all(&dev->power.completion);
768

769
	TRACE_RESUME(error);
770

771 772 773
	return error;
}

774 775 776 777 778
static void async_resume(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

779
	error = device_resume(dev, pm_transition, true);
780 781 782 783 784
	if (error)
		pm_dev_err(dev, pm_transition, " async", error);
	put_device(dev);
}

785
/**
786 787
 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
 * @state: PM transition of the system being carried out.
788
 *
789 790
 * Execute the appropriate "resume" callback for all devices whose status
 * indicates that they are suspended.
791
 */
792
void dpm_resume(pm_message_t state)
793
{
794
	struct device *dev;
795
	ktime_t starttime = ktime_get();
796

797 798
	might_sleep();

799
	mutex_lock(&dpm_list_mtx);
800
	pm_transition = state;
801
	async_error = 0;
802

803
	list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
804
		reinit_completion(&dev->power.completion);
805 806 807 808 809 810
		if (is_async(dev)) {
			get_device(dev);
			async_schedule(async_resume, dev);
		}
	}

811 812
	while (!list_empty(&dpm_suspended_list)) {
		dev = to_device(dpm_suspended_list.next);
813
		get_device(dev);
814
		if (!is_async(dev)) {
815 816 817 818
			int error;

			mutex_unlock(&dpm_list_mtx);

819
			error = device_resume(dev, state, false);
820 821 822 823
			if (error) {
				suspend_stats.failed_resume++;
				dpm_save_failed_step(SUSPEND_RESUME);
				dpm_save_failed_dev(dev_name(dev));
824
				pm_dev_err(dev, state, "", error);
825
			}
826 827

			mutex_lock(&dpm_list_mtx);
828 829
		}
		if (!list_empty(&dev->power.entry))
830
			list_move_tail(&dev->power.entry, &dpm_prepared_list);
831 832 833
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
834
	async_synchronize_full();
835
	dpm_show_time(starttime, state, NULL);
836 837 838
}

/**
839 840 841
 * device_complete - Complete a PM transition for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
842
 */
843
static void device_complete(struct device *dev, pm_message_t state)
844
{
845 846 847
	void (*callback)(struct device *) = NULL;
	char *info = NULL;

848 849 850
	if (dev->power.syscore)
		return;

851
	device_lock(dev);
852

853
	if (dev->pm_domain) {
854 855
		info = "completing power domain ";
		callback = dev->pm_domain->ops.complete;
856
	} else if (dev->type && dev->type->pm) {
857 858
		info = "completing type ";
		callback = dev->type->pm->complete;
859
	} else if (dev->class && dev->class->pm) {
860 861
		info = "completing class ";
		callback = dev->class->pm->complete;
862
	} else if (dev->bus && dev->bus->pm) {
863 864 865 866 867 868 869 870 871 872 873 874
		info = "completing bus ";
		callback = dev->bus->pm->complete;
	}

	if (!callback && dev->driver && dev->driver->pm) {
		info = "completing driver ";
		callback = dev->driver->pm->complete;
	}

	if (callback) {
		pm_dev_dbg(dev, state, info);
		callback(dev);
875 876
	}

877
	device_unlock(dev);
878

879
	pm_runtime_put(dev);
880 881 882
}

/**
883 884
 * dpm_complete - Complete a PM transition for all non-sysdev devices.
 * @state: PM transition of the system being carried out.
885
 *
886 887
 * Execute the ->complete() callbacks for all devices whose PM status is not
 * DPM_ON (this allows new devices to be registered).
888
 */
889
void dpm_complete(pm_message_t state)
890
{
891 892
	struct list_head list;

893 894
	might_sleep();

895
	INIT_LIST_HEAD(&list);
896
	mutex_lock(&dpm_list_mtx);
897 898
	while (!list_empty(&dpm_prepared_list)) {
		struct device *dev = to_device(dpm_prepared_list.prev);
899

900
		get_device(dev);
901
		dev->power.is_prepared = false;
902 903
		list_move(&dev->power.entry, &list);
		mutex_unlock(&dpm_list_mtx);
904

905
		device_complete(dev, state);
906

907
		mutex_lock(&dpm_list_mtx);
908
		put_device(dev);
909
	}
910
	list_splice(&list, &dpm_list);
911 912 913 914
	mutex_unlock(&dpm_list_mtx);
}

/**
915 916
 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
 * @state: PM transition of the system being carried out.
917
 *
918 919
 * Execute "resume" callbacks for all devices and complete the PM transition of
 * the system.
920
 */
921
void dpm_resume_end(pm_message_t state)
922
{
923 924
	dpm_resume(state);
	dpm_complete(state);
925
}
926
EXPORT_SYMBOL_GPL(dpm_resume_end);
927 928 929 930


/*------------------------- Suspend routines -------------------------*/

931
/**
932 933 934 935 936
 * resume_event - Return a "resume" message for given "suspend" sleep state.
 * @sleep_state: PM message representing a sleep state.
 *
 * Return a PM message representing the resume event corresponding to given
 * sleep state.
937 938
 */
static pm_message_t resume_event(pm_message_t sleep_state)
939
{
940 941 942 943 944 945 946 947
	switch (sleep_state.event) {
	case PM_EVENT_SUSPEND:
		return PMSG_RESUME;
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		return PMSG_RECOVER;
	case PM_EVENT_HIBERNATE:
		return PMSG_RESTORE;
948
	}
949
	return PMSG_ON;
950 951 952
}

/**
953 954 955
 * device_suspend_noirq - Execute a "late suspend" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
956
 *
957 958
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
959
 */
960
static int device_suspend_noirq(struct device *dev, pm_message_t state)
961
{
962 963
	pm_callback_t callback = NULL;
	char *info = NULL;
964
	int error;
965

966 967 968
	if (dev->power.syscore)
		return 0;

969
	if (dev->pm_domain) {
970
		info = "noirq power domain ";
971
		callback = pm_noirq_op(&dev->pm_domain->ops, state);
972
	} else if (dev->type && dev->type->pm) {
973
		info = "noirq type ";
974
		callback = pm_noirq_op(dev->type->pm, state);
975
	} else if (dev->class && dev->class->pm) {
976
		info = "noirq class ";
977
		callback = pm_noirq_op(dev->class->pm, state);
978
	} else if (dev->bus && dev->bus->pm) {
979
		info = "noirq bus ";
980
		callback = pm_noirq_op(dev->bus->pm, state);
981 982
	}

983
	if (!callback && dev->driver && dev->driver->pm) {
984
		info = "noirq driver ";
985 986 987
		callback = pm_noirq_op(dev->driver->pm, state);
	}

988 989 990 991 992
	error = dpm_run_callback(callback, dev, state, info);
	if (!error)
		dev->power.is_noirq_suspended = true;

	return error;
993 994 995
}

/**
996
 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
997
 * @state: PM transition of the system being carried out.
998
 *
999 1000
 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
 * handlers for all non-sysdev devices.
1001
 */
1002
static int dpm_suspend_noirq(pm_message_t state)
1003
{
1004
	ktime_t starttime = ktime_get();
1005 1006
	int error = 0;

1007
	cpuidle_pause();
1008
	suspend_device_irqs();
1009
	mutex_lock(&dpm_list_mtx);
1010 1011
	while (!list_empty(&dpm_late_early_list)) {
		struct device *dev = to_device(dpm_late_early_list.prev);
1012 1013 1014 1015

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

1016
		error = device_suspend_noirq(dev, state);
1017 1018

		mutex_lock(&dpm_list_mtx);
1019
		if (error) {
1020
			pm_dev_err(dev, state, " noirq", error);
1021 1022 1023
			suspend_stats.failed_suspend_noirq++;
			dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
			dpm_save_failed_dev(dev_name(dev));
1024
			put_device(dev);
1025 1026
			break;
		}
1027
		if (!list_empty(&dev->power.entry))
1028
			list_move(&dev->power.entry, &dpm_noirq_list);
1029
		put_device(dev);
1030 1031 1032 1033 1034

		if (pm_wakeup_pending()) {
			error = -EBUSY;
			break;
		}
1035
	}
1036
	mutex_unlock(&dpm_list_mtx);
1037
	if (error)
1038
		dpm_resume_noirq(resume_event(state));
1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054
	else
		dpm_show_time(starttime, state, "noirq");
	return error;
}

/**
 * device_suspend_late - Execute a "late suspend" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
 *
 * Runtime PM is disabled for @dev while this function is being executed.
 */
static int device_suspend_late(struct device *dev, pm_message_t state)
{
	pm_callback_t callback = NULL;
	char *info = NULL;
1055
	int error;
1056

1057 1058
	__pm_runtime_disable(dev, false);

1059 1060 1061
	if (dev->power.syscore)
		return 0;

1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080
	if (dev->pm_domain) {
		info = "late power domain ";
		callback = pm_late_early_op(&dev->pm_domain->ops, state);
	} else if (dev->type && dev->type->pm) {
		info = "late type ";
		callback = pm_late_early_op(dev->type->pm, state);
	} else if (dev->class && dev->class->pm) {
		info = "late class ";
		callback = pm_late_early_op(dev->class->pm, state);
	} else if (dev->bus && dev->bus->pm) {
		info = "late bus ";
		callback = pm_late_early_op(dev->bus->pm, state);
	}

	if (!callback && dev->driver && dev->driver->pm) {
		info = "late driver ";
		callback = pm_late_early_op(dev->driver->pm, state);
	}

1081 1082 1083 1084 1085
	error = dpm_run_callback(callback, dev, state, info);
	if (!error)
		dev->power.is_late_suspended = true;

	return error;
1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117
}

/**
 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
 * @state: PM transition of the system being carried out.
 */
static int dpm_suspend_late(pm_message_t state)
{
	ktime_t starttime = ktime_get();
	int error = 0;

	mutex_lock(&dpm_list_mtx);
	while (!list_empty(&dpm_suspended_list)) {
		struct device *dev = to_device(dpm_suspended_list.prev);

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

		error = device_suspend_late(dev, state);

		mutex_lock(&dpm_list_mtx);
		if (error) {
			pm_dev_err(dev, state, " late", error);
			suspend_stats.failed_suspend_late++;
			dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
			dpm_save_failed_dev(dev_name(dev));
			put_device(dev);
			break;
		}
		if (!list_empty(&dev->power.entry))
			list_move(&dev->power.entry, &dpm_late_early_list);
		put_device(dev);
1118 1119 1120 1121 1122

		if (pm_wakeup_pending()) {
			error = -EBUSY;
			break;
		}
1123 1124 1125 1126
	}
	mutex_unlock(&dpm_list_mtx);
	if (error)
		dpm_resume_early(resume_event(state));
1127 1128
	else
		dpm_show_time(starttime, state, "late");
1129

1130 1131
	return error;
}
1132 1133 1134 1135 1136 1137 1138 1139

/**
 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
 * @state: PM transition of the system being carried out.
 */
int dpm_suspend_end(pm_message_t state)
{
	int error = dpm_suspend_late(state);
1140 1141 1142 1143 1144
	if (error)
		return error;

	error = dpm_suspend_noirq(state);
	if (error) {
1145
		dpm_resume_early(resume_event(state));
1146 1147
		return error;
	}
1148

1149
	return 0;
1150 1151
}
EXPORT_SYMBOL_GPL(dpm_suspend_end);
1152

1153 1154
/**
 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
R
Randy Dunlap 已提交
1155 1156 1157
 * @dev: Device to suspend.
 * @state: PM transition of the system being carried out.
 * @cb: Suspend callback to execute.
1158 1159
 */
static int legacy_suspend(struct device *dev, pm_message_t state,
1160 1161
			  int (*cb)(struct device *dev, pm_message_t state),
			  char *info)
1162 1163 1164 1165 1166 1167 1168 1169 1170
{
	int error;
	ktime_t calltime;

	calltime = initcall_debug_start(dev);

	error = cb(dev, state);
	suspend_report_result(cb, error);

1171
	initcall_debug_report(dev, calltime, error, state, info);
1172 1173 1174 1175

	return error;
}

1176
/**
1177 1178 1179
 * device_suspend - Execute "suspend" callbacks for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
1180
 * @async: If true, the device is being suspended asynchronously.
1181
 */
1182
static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1183
{
1184 1185
	pm_callback_t callback = NULL;
	char *info = NULL;
1186
	int error = 0;
1187
	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1188

1189
	dpm_wait_for_children(dev, async);
1190

1191
	if (async_error)
1192
		goto Complete;
1193

1194 1195 1196 1197 1198 1199
	/*
	 * If a device configured to wake up the system from sleep states
	 * has been suspended at run time and there's a resume request pending
	 * for it, this is equivalent to the device signaling wakeup, so the
	 * system suspend operation should be aborted.
	 */
1200 1201
	if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
		pm_wakeup_event(dev, 0);
1202

1203 1204
	if (pm_wakeup_pending()) {
		async_error = -EBUSY;
1205
		goto Complete;
1206 1207
	}

1208 1209 1210
	if (dev->power.syscore)
		goto Complete;

1211
	dpm_watchdog_set(&wd, dev);
1212 1213
	device_lock(dev);

1214
	if (dev->pm_domain) {
1215 1216 1217
		info = "power domain ";
		callback = pm_op(&dev->pm_domain->ops, state);
		goto Run;
1218 1219
	}

1220
	if (dev->type && dev->type->pm) {
1221 1222 1223
		info = "type ";
		callback = pm_op(dev->type->pm, state);
		goto Run;
1224 1225
	}

1226 1227
	if (dev->class) {
		if (dev->class->pm) {
1228 1229 1230
			info = "class ";
			callback = pm_op(dev->class->pm, state);
			goto Run;
1231 1232
		} else if (dev->class->suspend) {
			pm_dev_dbg(dev, state, "legacy class ");
1233 1234
			error = legacy_suspend(dev, state, dev->class->suspend,
						"legacy class ");
1235
			goto End;
1236
		}
1237 1238
	}

1239 1240
	if (dev->bus) {
		if (dev->bus->pm) {
1241
			info = "bus ";
1242
			callback = pm_op(dev->bus->pm, state);
1243
		} else if (dev->bus->suspend) {
1244
			pm_dev_dbg(dev, state, "legacy bus ");
1245 1246
			error = legacy_suspend(dev, state, dev->bus->suspend,
						"legacy bus ");
1247
			goto End;
1248
		}
1249 1250
	}

1251
 Run:
1252 1253 1254 1255 1256
	if (!callback && dev->driver && dev->driver->pm) {
		info = "driver ";
		callback = pm_op(dev->driver->pm, state);
	}

1257 1258
	error = dpm_run_callback(callback, dev, state, info);

1259
 End:
1260 1261
	if (!error) {
		dev->power.is_suspended = true;
1262 1263
		if (dev->power.wakeup_path
		    && dev->parent && !dev->parent->power.ignore_children)
1264 1265
			dev->parent->power.wakeup_path = true;
	}
1266

1267
	device_unlock(dev);
1268
	dpm_watchdog_clear(&wd);
1269 1270

 Complete:
1271
	complete_all(&dev->power.completion);
1272
	if (error)
1273 1274
		async_error = error;

1275 1276 1277
	return error;
}

1278 1279 1280 1281 1282 1283
static void async_suspend(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = __device_suspend(dev, pm_transition, true);
1284 1285
	if (error) {
		dpm_save_failed_dev(dev_name(dev));
1286
		pm_dev_err(dev, pm_transition, " async", error);
1287
	}
1288 1289 1290 1291 1292 1293

	put_device(dev);
}

static int device_suspend(struct device *dev)
{
1294
	reinit_completion(&dev->power.completion);
1295

1296
	if (pm_async_enabled && dev->power.async_suspend) {
1297 1298 1299 1300 1301 1302 1303 1304
		get_device(dev);
		async_schedule(async_suspend, dev);
		return 0;
	}

	return __device_suspend(dev, pm_transition, false);
}

1305
/**
1306 1307
 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
 * @state: PM transition of the system being carried out.
1308
 */
1309
int dpm_suspend(pm_message_t state)
1310
{
1311
	ktime_t starttime = ktime_get();
1312 1313
	int error = 0;

1314 1315
	might_sleep();

1316
	mutex_lock(&dpm_list_mtx);
1317 1318
	pm_transition = state;
	async_error = 0;
1319 1320
	while (!list_empty(&dpm_prepared_list)) {
		struct device *dev = to_device(dpm_prepared_list.prev);
1321

1322
		get_device(dev);
1323
		mutex_unlock(&dpm_list_mtx);
1324

1325
		error = device_suspend(dev);
1326

1327
		mutex_lock(&dpm_list_mtx);
1328
		if (error) {
1329
			pm_dev_err(dev, state, "", error);
1330
			dpm_save_failed_dev(dev_name(dev));
1331
			put_device(dev);
1332 1333
			break;
		}
1334
		if (!list_empty(&dev->power.entry))
1335
			list_move(&dev->power.entry, &dpm_suspended_list);
1336
		put_device(dev);
1337 1338
		if (async_error)
			break;
1339 1340
	}
	mutex_unlock(&dpm_list_mtx);
1341 1342 1343
	async_synchronize_full();
	if (!error)
		error = async_error;
1344 1345 1346 1347
	if (error) {
		suspend_stats.failed_suspend++;
		dpm_save_failed_step(SUSPEND_SUSPEND);
	} else
1348
		dpm_show_time(starttime, state, NULL);
1349 1350 1351 1352
	return error;
}

/**
1353 1354 1355 1356 1357 1358
 * device_prepare - Prepare a device for system power transition.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
 *
 * Execute the ->prepare() callback(s) for given device.  No new children of the
 * device may be registered after this function has returned.
1359
 */
1360
static int device_prepare(struct device *dev, pm_message_t state)
1361
{
1362 1363
	int (*callback)(struct device *) = NULL;
	char *info = NULL;
1364 1365
	int error = 0;

1366 1367 1368
	if (dev->power.syscore)
		return 0;

1369 1370 1371 1372 1373 1374 1375 1376
	/*
	 * If a device's parent goes into runtime suspend at the wrong time,
	 * it won't be possible to resume the device.  To prevent this we
	 * block runtime suspend here, during the prepare phase, and allow
	 * it again during the complete phase.
	 */
	pm_runtime_get_noresume(dev);

1377
	device_lock(dev);
1378

1379 1380
	dev->power.wakeup_path = device_may_wakeup(dev);

1381
	if (dev->pm_domain) {
1382 1383
		info = "preparing power domain ";
		callback = dev->pm_domain->ops.prepare;
1384
	} else if (dev->type && dev->type->pm) {
1385 1386
		info = "preparing type ";
		callback = dev->type->pm->prepare;
1387
	} else if (dev->class && dev->class->pm) {
1388 1389
		info = "preparing class ";
		callback = dev->class->pm->prepare;
1390
	} else if (dev->bus && dev->bus->pm) {
1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402
		info = "preparing bus ";
		callback = dev->bus->pm->prepare;
	}

	if (!callback && dev->driver && dev->driver->pm) {
		info = "preparing driver ";
		callback = dev->driver->pm->prepare;
	}

	if (callback) {
		error = callback(dev);
		suspend_report_result(callback, error);
1403
	}
1404

1405
	device_unlock(dev);
1406

1407 1408 1409
	if (error)
		pm_runtime_put(dev);

1410 1411
	return error;
}
1412

1413
/**
1414 1415
 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
 * @state: PM transition of the system being carried out.
1416
 *
1417
 * Execute the ->prepare() callback(s) for all devices.
1418
 */
1419
int dpm_prepare(pm_message_t state)
1420 1421 1422
{
	int error = 0;

1423 1424
	might_sleep();

1425 1426 1427 1428 1429 1430 1431
	mutex_lock(&dpm_list_mtx);
	while (!list_empty(&dpm_list)) {
		struct device *dev = to_device(dpm_list.next);

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

1432
		error = device_prepare(dev, state);
1433 1434 1435 1436 1437

		mutex_lock(&dpm_list_mtx);
		if (error) {
			if (error == -EAGAIN) {
				put_device(dev);
S
Sebastian Ott 已提交
1438
				error = 0;
1439 1440
				continue;
			}
1441 1442
			printk(KERN_INFO "PM: Device %s not prepared "
				"for power transition: code %d\n",
1443
				dev_name(dev), error);
1444 1445 1446
			put_device(dev);
			break;
		}
1447
		dev->power.is_prepared = true;
1448
		if (!list_empty(&dev->power.entry))
1449
			list_move_tail(&dev->power.entry, &dpm_prepared_list);
1450 1451 1452
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
1453 1454 1455
	return error;
}

1456
/**
1457 1458
 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
 * @state: PM transition of the system being carried out.
1459
 *
1460 1461
 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
 * callbacks for them.
1462
 */
1463
int dpm_suspend_start(pm_message_t state)
1464 1465
{
	int error;
1466

1467
	error = dpm_prepare(state);
1468 1469 1470 1471
	if (error) {
		suspend_stats.failed_prepare++;
		dpm_save_failed_step(SUSPEND_PREPARE);
	} else
1472
		error = dpm_suspend(state);
1473 1474
	return error;
}
1475
EXPORT_SYMBOL_GPL(dpm_suspend_start);
1476 1477 1478

void __suspend_report_result(const char *function, void *fn, int ret)
{
1479 1480
	if (ret)
		printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1481 1482
}
EXPORT_SYMBOL_GPL(__suspend_report_result);
1483 1484 1485 1486 1487 1488

/**
 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
 * @dev: Device to wait for.
 * @subordinate: Device that needs to wait for @dev.
 */
1489
int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1490 1491
{
	dpm_wait(dev, subordinate->power.async_suspend);
1492
	return async_error;
1493 1494
}
EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516

/**
 * dpm_for_each_dev - device iterator.
 * @data: data for the callback.
 * @fn: function to be called for each device.
 *
 * Iterate over devices in dpm_list, and call @fn for each device,
 * passing it @data.
 */
void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
{
	struct device *dev;

	if (!fn)
		return;

	device_pm_lock();
	list_for_each_entry(dev, &dpm_list, power.entry)
		fn(dev, data);
	device_pm_unlock();
}
EXPORT_SYMBOL_GPL(dpm_for_each_dev);