main.c 35.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/*
 * drivers/base/power/main.c - Where the driver meets power management.
 *
 * Copyright (c) 2003 Patrick Mochel
 * Copyright (c) 2003 Open Source Development Lab
 *
 * This file is released under the GPLv2
 *
 *
 * The driver model core calls device_pm_add() when a device is registered.
11
 * This will initialize the embedded device_pm_info object in the device
L
Linus Torvalds 已提交
12 13 14
 * and add it to the list of power-controlled devices. sysfs entries for
 * controlling device power management will also be added.
 *
15 16 17
 * A separate list is used for keeping track of power info, because the power
 * domain dependencies may differ from the ancestral dependencies that the
 * subsystem list maintains.
L
Linus Torvalds 已提交
18 19 20
 */

#include <linux/device.h>
21
#include <linux/kallsyms.h>
22
#include <linux/export.h>
23
#include <linux/mutex.h>
24
#include <linux/pm.h>
25
#include <linux/pm_runtime.h>
26
#include <linux/resume-trace.h>
27
#include <linux/interrupt.h>
28
#include <linux/sched.h>
29
#include <linux/async.h>
30
#include <linux/suspend.h>
31
#include <trace/events/power.h>
32
#include <linux/cpuidle.h>
33 34
#include <linux/timer.h>

35
#include "../base.h"
L
Linus Torvalds 已提交
36 37
#include "power.h"

38 39
typedef int (*pm_callback_t)(struct device *);

40
/*
41
 * The entries in the dpm_list list are in a depth first order, simply
42 43 44
 * because children are guaranteed to be discovered after parents, and
 * are inserted at the back of the list on discovery.
 *
45 46
 * Since device_pm_add() may be called with a device lock held,
 * we must never try to acquire a device lock while holding
47 48 49
 * dpm_list_mutex.
 */

50
LIST_HEAD(dpm_list);
51 52 53 54
static LIST_HEAD(dpm_prepared_list);
static LIST_HEAD(dpm_suspended_list);
static LIST_HEAD(dpm_late_early_list);
static LIST_HEAD(dpm_noirq_list);
L
Linus Torvalds 已提交
55

56
struct suspend_stats suspend_stats;
57
static DEFINE_MUTEX(dpm_list_mtx);
58
static pm_message_t pm_transition;
L
Linus Torvalds 已提交
59

60 61
static int async_error;

62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
static char *pm_verb(int event)
{
	switch (event) {
	case PM_EVENT_SUSPEND:
		return "suspend";
	case PM_EVENT_RESUME:
		return "resume";
	case PM_EVENT_FREEZE:
		return "freeze";
	case PM_EVENT_QUIESCE:
		return "quiesce";
	case PM_EVENT_HIBERNATE:
		return "hibernate";
	case PM_EVENT_THAW:
		return "thaw";
	case PM_EVENT_RESTORE:
		return "restore";
	case PM_EVENT_RECOVER:
		return "recover";
	default:
		return "(unknown PM event)";
	}
}

86
/**
87
 * device_pm_sleep_init - Initialize system suspend-related device fields.
88 89
 * @dev: Device object being initialized.
 */
90
void device_pm_sleep_init(struct device *dev)
91
{
92
	dev->power.is_prepared = false;
93
	dev->power.is_suspended = false;
94
	init_completion(&dev->power.completion);
95
	complete_all(&dev->power.completion);
96
	dev->power.wakeup = NULL;
97
	INIT_LIST_HEAD(&dev->power.entry);
98 99
}

100
/**
101
 * device_pm_lock - Lock the list of active devices used by the PM core.
102 103 104 105 106 107 108
 */
void device_pm_lock(void)
{
	mutex_lock(&dpm_list_mtx);
}

/**
109
 * device_pm_unlock - Unlock the list of active devices used by the PM core.
110 111 112 113 114
 */
void device_pm_unlock(void)
{
	mutex_unlock(&dpm_list_mtx);
}
115

116
/**
117 118
 * device_pm_add - Add a device to the PM core's list of active devices.
 * @dev: Device to add to the list.
119
 */
120
void device_pm_add(struct device *dev)
L
Linus Torvalds 已提交
121 122
{
	pr_debug("PM: Adding info for %s:%s\n",
123
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
124
	mutex_lock(&dpm_list_mtx);
125
	if (dev->parent && dev->parent->power.is_prepared)
126 127
		dev_warn(dev, "parent %s should not be sleeping\n",
			dev_name(dev->parent));
128
	list_add_tail(&dev->power.entry, &dpm_list);
129
	mutex_unlock(&dpm_list_mtx);
L
Linus Torvalds 已提交
130 131
}

132
/**
133 134
 * device_pm_remove - Remove a device from the PM core's list of active devices.
 * @dev: Device to be removed from the list.
135
 */
136
void device_pm_remove(struct device *dev)
L
Linus Torvalds 已提交
137 138
{
	pr_debug("PM: Removing info for %s:%s\n",
139
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
140
	complete_all(&dev->power.completion);
141
	mutex_lock(&dpm_list_mtx);
L
Linus Torvalds 已提交
142
	list_del_init(&dev->power.entry);
143
	mutex_unlock(&dpm_list_mtx);
144
	device_wakeup_disable(dev);
145
	pm_runtime_remove(dev);
146 147
}

148
/**
149 150 151
 * device_pm_move_before - Move device in the PM core's list of active devices.
 * @deva: Device to move in dpm_list.
 * @devb: Device @deva should come before.
152 153 154 155
 */
void device_pm_move_before(struct device *deva, struct device *devb)
{
	pr_debug("PM: Moving %s:%s before %s:%s\n",
156 157
		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
158 159 160 161 162
	/* Delete deva from dpm_list and reinsert before devb. */
	list_move_tail(&deva->power.entry, &devb->power.entry);
}

/**
163 164 165
 * device_pm_move_after - Move device in the PM core's list of active devices.
 * @deva: Device to move in dpm_list.
 * @devb: Device @deva should come after.
166 167 168 169
 */
void device_pm_move_after(struct device *deva, struct device *devb)
{
	pr_debug("PM: Moving %s:%s after %s:%s\n",
170 171
		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
172 173 174 175 176
	/* Delete deva from dpm_list and reinsert after devb. */
	list_move(&deva->power.entry, &devb->power.entry);
}

/**
177 178
 * device_pm_move_last - Move device to end of the PM core's list of devices.
 * @dev: Device to move in dpm_list.
179 180 181 182
 */
void device_pm_move_last(struct device *dev)
{
	pr_debug("PM: Moving %s:%s to end of list\n",
183
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
184 185 186
	list_move_tail(&dev->power.entry, &dpm_list);
}

187 188 189 190
static ktime_t initcall_debug_start(struct device *dev)
{
	ktime_t calltime = ktime_set(0, 0);

191
	if (pm_print_times_enabled) {
192 193 194
		pr_info("calling  %s+ @ %i, parent: %s\n",
			dev_name(dev), task_pid_nr(current),
			dev->parent ? dev_name(dev->parent) : "none");
195 196 197 198 199 200 201
		calltime = ktime_get();
	}

	return calltime;
}

static void initcall_debug_report(struct device *dev, ktime_t calltime,
202
				  int error, pm_message_t state, char *info)
203
{
204 205 206 207 208
	ktime_t rettime;
	s64 nsecs;

	rettime = ktime_get();
	nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
209

210
	if (pm_print_times_enabled) {
211
		pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
212
			error, (unsigned long long)nsecs >> 10);
213
	}
214 215 216

	trace_device_pm_report_time(dev, info, nsecs, pm_verb(state.event),
				    error);
217 218
}

219 220 221 222 223 224 225 226 227 228
/**
 * dpm_wait - Wait for a PM operation to complete.
 * @dev: Device to wait for.
 * @async: If unset, wait only if the device's power.async_suspend flag is set.
 */
static void dpm_wait(struct device *dev, bool async)
{
	if (!dev)
		return;

229
	if (async || (pm_async_enabled && dev->power.async_suspend))
230 231 232 233 234 235 236 237 238 239 240 241 242 243
		wait_for_completion(&dev->power.completion);
}

static int dpm_wait_fn(struct device *dev, void *async_ptr)
{
	dpm_wait(dev, *((bool *)async_ptr));
	return 0;
}

static void dpm_wait_for_children(struct device *dev, bool async)
{
       device_for_each_child(dev, &async, dpm_wait_fn);
}

244
/**
245
 * pm_op - Return the PM operation appropriate for given PM event.
246 247
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
248
 */
249
static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
250 251 252 253
{
	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
254
		return ops->suspend;
255
	case PM_EVENT_RESUME:
256
		return ops->resume;
257
#endif /* CONFIG_SUSPEND */
258
#ifdef CONFIG_HIBERNATE_CALLBACKS
259 260
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
261
		return ops->freeze;
262
	case PM_EVENT_HIBERNATE:
263
		return ops->poweroff;
264 265
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
266
		return ops->thaw;
267 268
		break;
	case PM_EVENT_RESTORE:
269
		return ops->restore;
270
#endif /* CONFIG_HIBERNATE_CALLBACKS */
271
	}
272

273
	return NULL;
274 275
}

276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309
/**
 * pm_late_early_op - Return the PM operation appropriate for given PM event.
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
 *
 * Runtime PM is disabled for @dev while this function is being executed.
 */
static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
				      pm_message_t state)
{
	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
		return ops->suspend_late;
	case PM_EVENT_RESUME:
		return ops->resume_early;
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATE_CALLBACKS
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		return ops->freeze_late;
	case PM_EVENT_HIBERNATE:
		return ops->poweroff_late;
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
		return ops->thaw_early;
	case PM_EVENT_RESTORE:
		return ops->restore_early;
#endif /* CONFIG_HIBERNATE_CALLBACKS */
	}

	return NULL;
}

310
/**
311
 * pm_noirq_op - Return the PM operation appropriate for given PM event.
312 313
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
314
 *
315 316
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
317
 */
318
static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
319 320 321 322
{
	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
323
		return ops->suspend_noirq;
324
	case PM_EVENT_RESUME:
325
		return ops->resume_noirq;
326
#endif /* CONFIG_SUSPEND */
327
#ifdef CONFIG_HIBERNATE_CALLBACKS
328 329
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
330
		return ops->freeze_noirq;
331
	case PM_EVENT_HIBERNATE:
332
		return ops->poweroff_noirq;
333 334
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
335
		return ops->thaw_noirq;
336
	case PM_EVENT_RESTORE:
337
		return ops->restore_noirq;
338
#endif /* CONFIG_HIBERNATE_CALLBACKS */
339
	}
340

341
	return NULL;
342 343 344 345 346 347 348 349 350 351 352 353 354
}

static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
{
	dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
		", may wakeup" : "");
}

static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
			int error)
{
	printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
355
		dev_name(dev), pm_verb(state.event), info, error);
356 357
}

358 359 360
static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
{
	ktime_t calltime;
361
	u64 usecs64;
362 363 364 365 366 367 368 369 370 371 372 373 374
	int usecs;

	calltime = ktime_get();
	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
	do_div(usecs64, NSEC_PER_USEC);
	usecs = usecs64;
	if (usecs == 0)
		usecs = 1;
	pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
		info ?: "", info ? " " : "", pm_verb(state.event),
		usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
}

375 376 377 378 379 380 381 382 383 384 385 386 387 388 389
static int dpm_run_callback(pm_callback_t cb, struct device *dev,
			    pm_message_t state, char *info)
{
	ktime_t calltime;
	int error;

	if (!cb)
		return 0;

	calltime = initcall_debug_start(dev);

	pm_dev_dbg(dev, state, info);
	error = cb(dev);
	suspend_report_result(cb, error);

390
	initcall_debug_report(dev, calltime, error, state, info);
391 392 393 394

	return error;
}

395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459
#ifdef CONFIG_DPM_WATCHDOG
struct dpm_watchdog {
	struct device		*dev;
	struct task_struct	*tsk;
	struct timer_list	timer;
};

#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
	struct dpm_watchdog wd

/**
 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
 * @data: Watchdog object address.
 *
 * Called when a driver has timed out suspending or resuming.
 * There's not much we can do here to recover so panic() to
 * capture a crash-dump in pstore.
 */
static void dpm_watchdog_handler(unsigned long data)
{
	struct dpm_watchdog *wd = (void *)data;

	dev_emerg(wd->dev, "**** DPM device timeout ****\n");
	show_stack(wd->tsk, NULL);
	panic("%s %s: unrecoverable failure\n",
		dev_driver_string(wd->dev), dev_name(wd->dev));
}

/**
 * dpm_watchdog_set - Enable pm watchdog for given device.
 * @wd: Watchdog. Must be allocated on the stack.
 * @dev: Device to handle.
 */
static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
{
	struct timer_list *timer = &wd->timer;

	wd->dev = dev;
	wd->tsk = current;

	init_timer_on_stack(timer);
	/* use same timeout value for both suspend and resume */
	timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
	timer->function = dpm_watchdog_handler;
	timer->data = (unsigned long)wd;
	add_timer(timer);
}

/**
 * dpm_watchdog_clear - Disable suspend/resume watchdog.
 * @wd: Watchdog to disable.
 */
static void dpm_watchdog_clear(struct dpm_watchdog *wd)
{
	struct timer_list *timer = &wd->timer;

	del_timer_sync(timer);
	destroy_timer_on_stack(timer);
}
#else
#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
#define dpm_watchdog_set(x, y)
#define dpm_watchdog_clear(x)
#endif

460 461 462
/*------------------------- Resume routines -------------------------*/

/**
463 464 465
 * device_resume_noirq - Execute an "early resume" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
466
 *
467 468
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
469
 */
470
static int device_resume_noirq(struct device *dev, pm_message_t state)
471
{
472 473
	pm_callback_t callback = NULL;
	char *info = NULL;
474 475 476 477 478
	int error = 0;

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);

479 480 481
	if (dev->power.syscore)
		goto Out;

482
	if (dev->pm_domain) {
483
		info = "noirq power domain ";
484
		callback = pm_noirq_op(&dev->pm_domain->ops, state);
485
	} else if (dev->type && dev->type->pm) {
486
		info = "noirq type ";
487
		callback = pm_noirq_op(dev->type->pm, state);
488
	} else if (dev->class && dev->class->pm) {
489
		info = "noirq class ";
490
		callback = pm_noirq_op(dev->class->pm, state);
491
	} else if (dev->bus && dev->bus->pm) {
492
		info = "noirq bus ";
493
		callback = pm_noirq_op(dev->bus->pm, state);
494 495
	}

496
	if (!callback && dev->driver && dev->driver->pm) {
497
		info = "noirq driver ";
498 499 500
		callback = pm_noirq_op(dev->driver->pm, state);
	}

501 502
	error = dpm_run_callback(callback, dev, state, info);

503
 Out:
504 505 506 507 508
	TRACE_RESUME(error);
	return error;
}

/**
509
 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
510
 * @state: PM transition of the system being carried out.
511
 *
512
 * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
513
 * enable device drivers to receive interrupts.
514
 */
515
static void dpm_resume_noirq(pm_message_t state)
516
{
517
	ktime_t starttime = ktime_get();
518

519
	mutex_lock(&dpm_list_mtx);
520 521
	while (!list_empty(&dpm_noirq_list)) {
		struct device *dev = to_device(dpm_noirq_list.next);
522
		int error;
523 524

		get_device(dev);
525
		list_move_tail(&dev->power.entry, &dpm_late_early_list);
526
		mutex_unlock(&dpm_list_mtx);
527

528
		error = device_resume_noirq(dev, state);
529 530 531 532
		if (error) {
			suspend_stats.failed_resume_noirq++;
			dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
			dpm_save_failed_dev(dev_name(dev));
533 534 535 536 537 538 539 540 541
			pm_dev_err(dev, state, " noirq", error);
		}

		mutex_lock(&dpm_list_mtx);
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
	dpm_show_time(starttime, state, "noirq");
	resume_device_irqs();
542
	cpuidle_resume();
543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560
}

/**
 * device_resume_early - Execute an "early resume" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
 *
 * Runtime PM is disabled for @dev while this function is being executed.
 */
static int device_resume_early(struct device *dev, pm_message_t state)
{
	pm_callback_t callback = NULL;
	char *info = NULL;
	int error = 0;

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);

561 562 563
	if (dev->power.syscore)
		goto Out;

564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584
	if (dev->pm_domain) {
		info = "early power domain ";
		callback = pm_late_early_op(&dev->pm_domain->ops, state);
	} else if (dev->type && dev->type->pm) {
		info = "early type ";
		callback = pm_late_early_op(dev->type->pm, state);
	} else if (dev->class && dev->class->pm) {
		info = "early class ";
		callback = pm_late_early_op(dev->class->pm, state);
	} else if (dev->bus && dev->bus->pm) {
		info = "early bus ";
		callback = pm_late_early_op(dev->bus->pm, state);
	}

	if (!callback && dev->driver && dev->driver->pm) {
		info = "early driver ";
		callback = pm_late_early_op(dev->driver->pm, state);
	}

	error = dpm_run_callback(callback, dev, state, info);

585
 Out:
586
	TRACE_RESUME(error);
587 588

	pm_runtime_enable(dev);
589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613
	return error;
}

/**
 * dpm_resume_early - Execute "early resume" callbacks for all devices.
 * @state: PM transition of the system being carried out.
 */
static void dpm_resume_early(pm_message_t state)
{
	ktime_t starttime = ktime_get();

	mutex_lock(&dpm_list_mtx);
	while (!list_empty(&dpm_late_early_list)) {
		struct device *dev = to_device(dpm_late_early_list.next);
		int error;

		get_device(dev);
		list_move_tail(&dev->power.entry, &dpm_suspended_list);
		mutex_unlock(&dpm_list_mtx);

		error = device_resume_early(dev, state);
		if (error) {
			suspend_stats.failed_resume_early++;
			dpm_save_failed_step(SUSPEND_RESUME_EARLY);
			dpm_save_failed_dev(dev_name(dev));
614
			pm_dev_err(dev, state, " early", error);
615
		}
616

617
		mutex_lock(&dpm_list_mtx);
618 619
		put_device(dev);
	}
620
	mutex_unlock(&dpm_list_mtx);
621
	dpm_show_time(starttime, state, "early");
622
}
623 624 625 626 627 628 629 630 631 632 633

/**
 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
 * @state: PM transition of the system being carried out.
 */
void dpm_resume_start(pm_message_t state)
{
	dpm_resume_noirq(state);
	dpm_resume_early(state);
}
EXPORT_SYMBOL_GPL(dpm_resume_start);
634 635

/**
636
 * device_resume - Execute "resume" callbacks for given device.
637 638
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
639
 * @async: If true, the device is being resumed asynchronously.
640
 */
641
static int device_resume(struct device *dev, pm_message_t state, bool async)
642
{
643 644
	pm_callback_t callback = NULL;
	char *info = NULL;
645
	int error = 0;
646
	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
647 648 649

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);
650

651 652 653
	if (dev->power.syscore)
		goto Complete;

654
	dpm_wait(dev->parent, async);
655
	dpm_watchdog_set(&wd, dev);
656
	device_lock(dev);
657

658 659 660 661 662
	/*
	 * This is a fib.  But we'll allow new children to be added below
	 * a resumed device, even if the device hasn't been completed yet.
	 */
	dev->power.is_prepared = false;
663

664 665 666
	if (!dev->power.is_suspended)
		goto Unlock;

667
	if (dev->pm_domain) {
668 669
		info = "power domain ";
		callback = pm_op(&dev->pm_domain->ops, state);
670
		goto Driver;
671 672
	}

673
	if (dev->type && dev->type->pm) {
674 675
		info = "type ";
		callback = pm_op(dev->type->pm, state);
676
		goto Driver;
677 678
	}

679 680
	if (dev->class) {
		if (dev->class->pm) {
681 682
			info = "class ";
			callback = pm_op(dev->class->pm, state);
683
			goto Driver;
684
		} else if (dev->class->resume) {
685 686
			info = "legacy class ";
			callback = dev->class->resume;
687
			goto End;
688
		}
689
	}
690 691 692

	if (dev->bus) {
		if (dev->bus->pm) {
693
			info = "bus ";
694
			callback = pm_op(dev->bus->pm, state);
695
		} else if (dev->bus->resume) {
696
			info = "legacy bus ";
697
			callback = dev->bus->resume;
698
			goto End;
699 700 701
		}
	}

702 703 704 705 706 707
 Driver:
	if (!callback && dev->driver && dev->driver->pm) {
		info = "driver ";
		callback = pm_op(dev->driver->pm, state);
	}

708
 End:
709
	error = dpm_run_callback(callback, dev, state, info);
710 711 712
	dev->power.is_suspended = false;

 Unlock:
713
	device_unlock(dev);
714
	dpm_watchdog_clear(&wd);
715 716

 Complete:
717
	complete_all(&dev->power.completion);
718

719
	TRACE_RESUME(error);
720

721 722 723
	return error;
}

724 725 726 727 728
static void async_resume(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

729
	error = device_resume(dev, pm_transition, true);
730 731 732 733 734
	if (error)
		pm_dev_err(dev, pm_transition, " async", error);
	put_device(dev);
}

735
static bool is_async(struct device *dev)
736
{
737 738
	return dev->power.async_suspend && pm_async_enabled
		&& !pm_trace_is_enabled();
739 740
}

741
/**
742 743
 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
 * @state: PM transition of the system being carried out.
744
 *
745 746
 * Execute the appropriate "resume" callback for all devices whose status
 * indicates that they are suspended.
747
 */
748
void dpm_resume(pm_message_t state)
749
{
750
	struct device *dev;
751
	ktime_t starttime = ktime_get();
752

753 754
	might_sleep();

755
	mutex_lock(&dpm_list_mtx);
756
	pm_transition = state;
757
	async_error = 0;
758

759
	list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
760
		reinit_completion(&dev->power.completion);
761 762 763 764 765 766
		if (is_async(dev)) {
			get_device(dev);
			async_schedule(async_resume, dev);
		}
	}

767 768
	while (!list_empty(&dpm_suspended_list)) {
		dev = to_device(dpm_suspended_list.next);
769
		get_device(dev);
770
		if (!is_async(dev)) {
771 772 773 774
			int error;

			mutex_unlock(&dpm_list_mtx);

775
			error = device_resume(dev, state, false);
776 777 778 779
			if (error) {
				suspend_stats.failed_resume++;
				dpm_save_failed_step(SUSPEND_RESUME);
				dpm_save_failed_dev(dev_name(dev));
780
				pm_dev_err(dev, state, "", error);
781
			}
782 783

			mutex_lock(&dpm_list_mtx);
784 785
		}
		if (!list_empty(&dev->power.entry))
786
			list_move_tail(&dev->power.entry, &dpm_prepared_list);
787 788 789
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
790
	async_synchronize_full();
791
	dpm_show_time(starttime, state, NULL);
792 793 794
}

/**
795 796 797
 * device_complete - Complete a PM transition for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
798
 */
799
static void device_complete(struct device *dev, pm_message_t state)
800
{
801 802 803
	void (*callback)(struct device *) = NULL;
	char *info = NULL;

804 805 806
	if (dev->power.syscore)
		return;

807
	device_lock(dev);
808

809
	if (dev->pm_domain) {
810 811
		info = "completing power domain ";
		callback = dev->pm_domain->ops.complete;
812
	} else if (dev->type && dev->type->pm) {
813 814
		info = "completing type ";
		callback = dev->type->pm->complete;
815
	} else if (dev->class && dev->class->pm) {
816 817
		info = "completing class ";
		callback = dev->class->pm->complete;
818
	} else if (dev->bus && dev->bus->pm) {
819 820 821 822 823 824 825 826 827 828 829 830
		info = "completing bus ";
		callback = dev->bus->pm->complete;
	}

	if (!callback && dev->driver && dev->driver->pm) {
		info = "completing driver ";
		callback = dev->driver->pm->complete;
	}

	if (callback) {
		pm_dev_dbg(dev, state, info);
		callback(dev);
831 832
	}

833
	device_unlock(dev);
834

835
	pm_runtime_put(dev);
836 837 838
}

/**
839 840
 * dpm_complete - Complete a PM transition for all non-sysdev devices.
 * @state: PM transition of the system being carried out.
841
 *
842 843
 * Execute the ->complete() callbacks for all devices whose PM status is not
 * DPM_ON (this allows new devices to be registered).
844
 */
845
void dpm_complete(pm_message_t state)
846
{
847 848
	struct list_head list;

849 850
	might_sleep();

851
	INIT_LIST_HEAD(&list);
852
	mutex_lock(&dpm_list_mtx);
853 854
	while (!list_empty(&dpm_prepared_list)) {
		struct device *dev = to_device(dpm_prepared_list.prev);
855

856
		get_device(dev);
857
		dev->power.is_prepared = false;
858 859
		list_move(&dev->power.entry, &list);
		mutex_unlock(&dpm_list_mtx);
860

861
		device_complete(dev, state);
862

863
		mutex_lock(&dpm_list_mtx);
864
		put_device(dev);
865
	}
866
	list_splice(&list, &dpm_list);
867 868 869 870
	mutex_unlock(&dpm_list_mtx);
}

/**
871 872
 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
 * @state: PM transition of the system being carried out.
873
 *
874 875
 * Execute "resume" callbacks for all devices and complete the PM transition of
 * the system.
876
 */
877
void dpm_resume_end(pm_message_t state)
878
{
879 880
	dpm_resume(state);
	dpm_complete(state);
881
}
882
EXPORT_SYMBOL_GPL(dpm_resume_end);
883 884 885 886


/*------------------------- Suspend routines -------------------------*/

887
/**
888 889 890 891 892
 * resume_event - Return a "resume" message for given "suspend" sleep state.
 * @sleep_state: PM message representing a sleep state.
 *
 * Return a PM message representing the resume event corresponding to given
 * sleep state.
893 894
 */
static pm_message_t resume_event(pm_message_t sleep_state)
895
{
896 897 898 899 900 901 902 903
	switch (sleep_state.event) {
	case PM_EVENT_SUSPEND:
		return PMSG_RESUME;
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		return PMSG_RECOVER;
	case PM_EVENT_HIBERNATE:
		return PMSG_RESTORE;
904
	}
905
	return PMSG_ON;
906 907 908
}

/**
909 910 911
 * device_suspend_noirq - Execute a "late suspend" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
912
 *
913 914
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
915
 */
916
static int device_suspend_noirq(struct device *dev, pm_message_t state)
917
{
918 919
	pm_callback_t callback = NULL;
	char *info = NULL;
920

921 922 923
	if (dev->power.syscore)
		return 0;

924
	if (dev->pm_domain) {
925
		info = "noirq power domain ";
926
		callback = pm_noirq_op(&dev->pm_domain->ops, state);
927
	} else if (dev->type && dev->type->pm) {
928
		info = "noirq type ";
929
		callback = pm_noirq_op(dev->type->pm, state);
930
	} else if (dev->class && dev->class->pm) {
931
		info = "noirq class ";
932
		callback = pm_noirq_op(dev->class->pm, state);
933
	} else if (dev->bus && dev->bus->pm) {
934
		info = "noirq bus ";
935
		callback = pm_noirq_op(dev->bus->pm, state);
936 937
	}

938
	if (!callback && dev->driver && dev->driver->pm) {
939
		info = "noirq driver ";
940 941 942
		callback = pm_noirq_op(dev->driver->pm, state);
	}

943
	return dpm_run_callback(callback, dev, state, info);
944 945 946
}

/**
947
 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
948
 * @state: PM transition of the system being carried out.
949
 *
950 951
 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
 * handlers for all non-sysdev devices.
952
 */
953
static int dpm_suspend_noirq(pm_message_t state)
954
{
955
	ktime_t starttime = ktime_get();
956 957
	int error = 0;

958
	cpuidle_pause();
959
	suspend_device_irqs();
960
	mutex_lock(&dpm_list_mtx);
961 962
	while (!list_empty(&dpm_late_early_list)) {
		struct device *dev = to_device(dpm_late_early_list.prev);
963 964 965 966

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

967
		error = device_suspend_noirq(dev, state);
968 969

		mutex_lock(&dpm_list_mtx);
970
		if (error) {
971
			pm_dev_err(dev, state, " noirq", error);
972 973 974
			suspend_stats.failed_suspend_noirq++;
			dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
			dpm_save_failed_dev(dev_name(dev));
975
			put_device(dev);
976 977
			break;
		}
978
		if (!list_empty(&dev->power.entry))
979
			list_move(&dev->power.entry, &dpm_noirq_list);
980
		put_device(dev);
981 982 983 984 985

		if (pm_wakeup_pending()) {
			error = -EBUSY;
			break;
		}
986
	}
987
	mutex_unlock(&dpm_list_mtx);
988
	if (error)
989
		dpm_resume_noirq(resume_event(state));
990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006
	else
		dpm_show_time(starttime, state, "noirq");
	return error;
}

/**
 * device_suspend_late - Execute a "late suspend" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
 *
 * Runtime PM is disabled for @dev while this function is being executed.
 */
static int device_suspend_late(struct device *dev, pm_message_t state)
{
	pm_callback_t callback = NULL;
	char *info = NULL;

1007 1008
	__pm_runtime_disable(dev, false);

1009 1010 1011
	if (dev->power.syscore)
		return 0;

1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063
	if (dev->pm_domain) {
		info = "late power domain ";
		callback = pm_late_early_op(&dev->pm_domain->ops, state);
	} else if (dev->type && dev->type->pm) {
		info = "late type ";
		callback = pm_late_early_op(dev->type->pm, state);
	} else if (dev->class && dev->class->pm) {
		info = "late class ";
		callback = pm_late_early_op(dev->class->pm, state);
	} else if (dev->bus && dev->bus->pm) {
		info = "late bus ";
		callback = pm_late_early_op(dev->bus->pm, state);
	}

	if (!callback && dev->driver && dev->driver->pm) {
		info = "late driver ";
		callback = pm_late_early_op(dev->driver->pm, state);
	}

	return dpm_run_callback(callback, dev, state, info);
}

/**
 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
 * @state: PM transition of the system being carried out.
 */
static int dpm_suspend_late(pm_message_t state)
{
	ktime_t starttime = ktime_get();
	int error = 0;

	mutex_lock(&dpm_list_mtx);
	while (!list_empty(&dpm_suspended_list)) {
		struct device *dev = to_device(dpm_suspended_list.prev);

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

		error = device_suspend_late(dev, state);

		mutex_lock(&dpm_list_mtx);
		if (error) {
			pm_dev_err(dev, state, " late", error);
			suspend_stats.failed_suspend_late++;
			dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
			dpm_save_failed_dev(dev_name(dev));
			put_device(dev);
			break;
		}
		if (!list_empty(&dev->power.entry))
			list_move(&dev->power.entry, &dpm_late_early_list);
		put_device(dev);
1064 1065 1066 1067 1068

		if (pm_wakeup_pending()) {
			error = -EBUSY;
			break;
		}
1069 1070 1071 1072
	}
	mutex_unlock(&dpm_list_mtx);
	if (error)
		dpm_resume_early(resume_event(state));
1073 1074
	else
		dpm_show_time(starttime, state, "late");
1075

1076 1077
	return error;
}
1078 1079 1080 1081 1082 1083 1084 1085

/**
 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
 * @state: PM transition of the system being carried out.
 */
int dpm_suspend_end(pm_message_t state)
{
	int error = dpm_suspend_late(state);
1086 1087 1088 1089 1090
	if (error)
		return error;

	error = dpm_suspend_noirq(state);
	if (error) {
1091
		dpm_resume_early(resume_event(state));
1092 1093
		return error;
	}
1094

1095
	return 0;
1096 1097
}
EXPORT_SYMBOL_GPL(dpm_suspend_end);
1098

1099 1100
/**
 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
R
Randy Dunlap 已提交
1101 1102 1103
 * @dev: Device to suspend.
 * @state: PM transition of the system being carried out.
 * @cb: Suspend callback to execute.
1104 1105
 */
static int legacy_suspend(struct device *dev, pm_message_t state,
1106 1107
			  int (*cb)(struct device *dev, pm_message_t state),
			  char *info)
1108 1109 1110 1111 1112 1113 1114 1115 1116
{
	int error;
	ktime_t calltime;

	calltime = initcall_debug_start(dev);

	error = cb(dev, state);
	suspend_report_result(cb, error);

1117
	initcall_debug_report(dev, calltime, error, state, info);
1118 1119 1120 1121

	return error;
}

1122
/**
1123 1124 1125
 * device_suspend - Execute "suspend" callbacks for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
1126
 * @async: If true, the device is being suspended asynchronously.
1127
 */
1128
static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1129
{
1130 1131
	pm_callback_t callback = NULL;
	char *info = NULL;
1132
	int error = 0;
1133
	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1134

1135
	dpm_wait_for_children(dev, async);
1136

1137
	if (async_error)
1138
		goto Complete;
1139

1140 1141 1142 1143 1144 1145
	/*
	 * If a device configured to wake up the system from sleep states
	 * has been suspended at run time and there's a resume request pending
	 * for it, this is equivalent to the device signaling wakeup, so the
	 * system suspend operation should be aborted.
	 */
1146 1147
	if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
		pm_wakeup_event(dev, 0);
1148

1149 1150
	if (pm_wakeup_pending()) {
		async_error = -EBUSY;
1151
		goto Complete;
1152 1153
	}

1154 1155 1156
	if (dev->power.syscore)
		goto Complete;

1157
	dpm_watchdog_set(&wd, dev);
1158 1159
	device_lock(dev);

1160
	if (dev->pm_domain) {
1161 1162 1163
		info = "power domain ";
		callback = pm_op(&dev->pm_domain->ops, state);
		goto Run;
1164 1165
	}

1166
	if (dev->type && dev->type->pm) {
1167 1168 1169
		info = "type ";
		callback = pm_op(dev->type->pm, state);
		goto Run;
1170 1171
	}

1172 1173
	if (dev->class) {
		if (dev->class->pm) {
1174 1175 1176
			info = "class ";
			callback = pm_op(dev->class->pm, state);
			goto Run;
1177 1178
		} else if (dev->class->suspend) {
			pm_dev_dbg(dev, state, "legacy class ");
1179 1180
			error = legacy_suspend(dev, state, dev->class->suspend,
						"legacy class ");
1181
			goto End;
1182
		}
1183 1184
	}

1185 1186
	if (dev->bus) {
		if (dev->bus->pm) {
1187
			info = "bus ";
1188
			callback = pm_op(dev->bus->pm, state);
1189
		} else if (dev->bus->suspend) {
1190
			pm_dev_dbg(dev, state, "legacy bus ");
1191 1192
			error = legacy_suspend(dev, state, dev->bus->suspend,
						"legacy bus ");
1193
			goto End;
1194
		}
1195 1196
	}

1197
 Run:
1198 1199 1200 1201 1202
	if (!callback && dev->driver && dev->driver->pm) {
		info = "driver ";
		callback = pm_op(dev->driver->pm, state);
	}

1203 1204
	error = dpm_run_callback(callback, dev, state, info);

1205
 End:
1206 1207
	if (!error) {
		dev->power.is_suspended = true;
1208 1209
		if (dev->power.wakeup_path
		    && dev->parent && !dev->parent->power.ignore_children)
1210 1211
			dev->parent->power.wakeup_path = true;
	}
1212

1213
	device_unlock(dev);
1214
	dpm_watchdog_clear(&wd);
1215 1216

 Complete:
1217
	complete_all(&dev->power.completion);
1218
	if (error)
1219 1220
		async_error = error;

1221 1222 1223
	return error;
}

1224 1225 1226 1227 1228 1229
static void async_suspend(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = __device_suspend(dev, pm_transition, true);
1230 1231
	if (error) {
		dpm_save_failed_dev(dev_name(dev));
1232
		pm_dev_err(dev, pm_transition, " async", error);
1233
	}
1234 1235 1236 1237 1238 1239

	put_device(dev);
}

static int device_suspend(struct device *dev)
{
1240
	reinit_completion(&dev->power.completion);
1241

1242
	if (pm_async_enabled && dev->power.async_suspend) {
1243 1244 1245 1246 1247 1248 1249 1250
		get_device(dev);
		async_schedule(async_suspend, dev);
		return 0;
	}

	return __device_suspend(dev, pm_transition, false);
}

1251
/**
1252 1253
 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
 * @state: PM transition of the system being carried out.
1254
 */
1255
int dpm_suspend(pm_message_t state)
1256
{
1257
	ktime_t starttime = ktime_get();
1258 1259
	int error = 0;

1260 1261
	might_sleep();

1262
	mutex_lock(&dpm_list_mtx);
1263 1264
	pm_transition = state;
	async_error = 0;
1265 1266
	while (!list_empty(&dpm_prepared_list)) {
		struct device *dev = to_device(dpm_prepared_list.prev);
1267

1268
		get_device(dev);
1269
		mutex_unlock(&dpm_list_mtx);
1270

1271
		error = device_suspend(dev);
1272

1273
		mutex_lock(&dpm_list_mtx);
1274
		if (error) {
1275
			pm_dev_err(dev, state, "", error);
1276
			dpm_save_failed_dev(dev_name(dev));
1277
			put_device(dev);
1278 1279
			break;
		}
1280
		if (!list_empty(&dev->power.entry))
1281
			list_move(&dev->power.entry, &dpm_suspended_list);
1282
		put_device(dev);
1283 1284
		if (async_error)
			break;
1285 1286
	}
	mutex_unlock(&dpm_list_mtx);
1287 1288 1289
	async_synchronize_full();
	if (!error)
		error = async_error;
1290 1291 1292 1293
	if (error) {
		suspend_stats.failed_suspend++;
		dpm_save_failed_step(SUSPEND_SUSPEND);
	} else
1294
		dpm_show_time(starttime, state, NULL);
1295 1296 1297 1298
	return error;
}

/**
1299 1300 1301 1302 1303 1304
 * device_prepare - Prepare a device for system power transition.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
 *
 * Execute the ->prepare() callback(s) for given device.  No new children of the
 * device may be registered after this function has returned.
1305
 */
1306
static int device_prepare(struct device *dev, pm_message_t state)
1307
{
1308 1309
	int (*callback)(struct device *) = NULL;
	char *info = NULL;
1310 1311
	int error = 0;

1312 1313 1314
	if (dev->power.syscore)
		return 0;

1315 1316 1317 1318 1319 1320 1321 1322
	/*
	 * If a device's parent goes into runtime suspend at the wrong time,
	 * it won't be possible to resume the device.  To prevent this we
	 * block runtime suspend here, during the prepare phase, and allow
	 * it again during the complete phase.
	 */
	pm_runtime_get_noresume(dev);

1323
	device_lock(dev);
1324

1325 1326
	dev->power.wakeup_path = device_may_wakeup(dev);

1327
	if (dev->pm_domain) {
1328 1329
		info = "preparing power domain ";
		callback = dev->pm_domain->ops.prepare;
1330
	} else if (dev->type && dev->type->pm) {
1331 1332
		info = "preparing type ";
		callback = dev->type->pm->prepare;
1333
	} else if (dev->class && dev->class->pm) {
1334 1335
		info = "preparing class ";
		callback = dev->class->pm->prepare;
1336
	} else if (dev->bus && dev->bus->pm) {
1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348
		info = "preparing bus ";
		callback = dev->bus->pm->prepare;
	}

	if (!callback && dev->driver && dev->driver->pm) {
		info = "preparing driver ";
		callback = dev->driver->pm->prepare;
	}

	if (callback) {
		error = callback(dev);
		suspend_report_result(callback, error);
1349
	}
1350

1351
	device_unlock(dev);
1352

1353 1354 1355
	if (error)
		pm_runtime_put(dev);

1356 1357
	return error;
}
1358

1359
/**
1360 1361
 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
 * @state: PM transition of the system being carried out.
1362
 *
1363
 * Execute the ->prepare() callback(s) for all devices.
1364
 */
1365
int dpm_prepare(pm_message_t state)
1366 1367 1368
{
	int error = 0;

1369 1370
	might_sleep();

1371 1372 1373 1374 1375 1376 1377
	mutex_lock(&dpm_list_mtx);
	while (!list_empty(&dpm_list)) {
		struct device *dev = to_device(dpm_list.next);

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

1378
		error = device_prepare(dev, state);
1379 1380 1381 1382 1383

		mutex_lock(&dpm_list_mtx);
		if (error) {
			if (error == -EAGAIN) {
				put_device(dev);
S
Sebastian Ott 已提交
1384
				error = 0;
1385 1386
				continue;
			}
1387 1388
			printk(KERN_INFO "PM: Device %s not prepared "
				"for power transition: code %d\n",
1389
				dev_name(dev), error);
1390 1391 1392
			put_device(dev);
			break;
		}
1393
		dev->power.is_prepared = true;
1394
		if (!list_empty(&dev->power.entry))
1395
			list_move_tail(&dev->power.entry, &dpm_prepared_list);
1396 1397 1398
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
1399 1400 1401
	return error;
}

1402
/**
1403 1404
 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
 * @state: PM transition of the system being carried out.
1405
 *
1406 1407
 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
 * callbacks for them.
1408
 */
1409
int dpm_suspend_start(pm_message_t state)
1410 1411
{
	int error;
1412

1413
	error = dpm_prepare(state);
1414 1415 1416 1417
	if (error) {
		suspend_stats.failed_prepare++;
		dpm_save_failed_step(SUSPEND_PREPARE);
	} else
1418
		error = dpm_suspend(state);
1419 1420
	return error;
}
1421
EXPORT_SYMBOL_GPL(dpm_suspend_start);
1422 1423 1424

void __suspend_report_result(const char *function, void *fn, int ret)
{
1425 1426
	if (ret)
		printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1427 1428
}
EXPORT_SYMBOL_GPL(__suspend_report_result);
1429 1430 1431 1432 1433 1434

/**
 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
 * @dev: Device to wait for.
 * @subordinate: Device that needs to wait for @dev.
 */
1435
int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1436 1437
{
	dpm_wait(dev, subordinate->power.async_suspend);
1438
	return async_error;
1439 1440
}
EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462

/**
 * dpm_for_each_dev - device iterator.
 * @data: data for the callback.
 * @fn: function to be called for each device.
 *
 * Iterate over devices in dpm_list, and call @fn for each device,
 * passing it @data.
 */
void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
{
	struct device *dev;

	if (!fn)
		return;

	device_pm_lock();
	list_for_each_entry(dev, &dpm_list, power.entry)
		fn(dev, data);
	device_pm_unlock();
}
EXPORT_SYMBOL_GPL(dpm_for_each_dev);