main.c 37.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/*
 * drivers/base/power/main.c - Where the driver meets power management.
 *
 * Copyright (c) 2003 Patrick Mochel
 * Copyright (c) 2003 Open Source Development Lab
 *
 * This file is released under the GPLv2
 *
 *
 * The driver model core calls device_pm_add() when a device is registered.
11
 * This will initialize the embedded device_pm_info object in the device
L
Linus Torvalds 已提交
12 13 14
 * and add it to the list of power-controlled devices. sysfs entries for
 * controlling device power management will also be added.
 *
15 16 17
 * A separate list is used for keeping track of power info, because the power
 * domain dependencies may differ from the ancestral dependencies that the
 * subsystem list maintains.
L
Linus Torvalds 已提交
18 19 20
 */

#include <linux/device.h>
21
#include <linux/kallsyms.h>
22
#include <linux/export.h>
23
#include <linux/mutex.h>
24
#include <linux/pm.h>
25
#include <linux/pm_runtime.h>
26
#include <linux/resume-trace.h>
27
#include <linux/interrupt.h>
28
#include <linux/sched.h>
29
#include <linux/async.h>
30
#include <linux/suspend.h>
31
#include <trace/events/power.h>
32
#include <linux/cpuidle.h>
33 34
#include <linux/timer.h>

35
#include "../base.h"
L
Linus Torvalds 已提交
36 37
#include "power.h"

38 39
typedef int (*pm_callback_t)(struct device *);

40
/*
41
 * The entries in the dpm_list list are in a depth first order, simply
42 43 44
 * because children are guaranteed to be discovered after parents, and
 * are inserted at the back of the list on discovery.
 *
45 46
 * Since device_pm_add() may be called with a device lock held,
 * we must never try to acquire a device lock while holding
47 48 49
 * dpm_list_mutex.
 */

50
LIST_HEAD(dpm_list);
51 52 53 54
static LIST_HEAD(dpm_prepared_list);
static LIST_HEAD(dpm_suspended_list);
static LIST_HEAD(dpm_late_early_list);
static LIST_HEAD(dpm_noirq_list);
L
Linus Torvalds 已提交
55

56
struct suspend_stats suspend_stats;
57
static DEFINE_MUTEX(dpm_list_mtx);
58
static pm_message_t pm_transition;
L
Linus Torvalds 已提交
59

60 61
static int async_error;

62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
static char *pm_verb(int event)
{
	switch (event) {
	case PM_EVENT_SUSPEND:
		return "suspend";
	case PM_EVENT_RESUME:
		return "resume";
	case PM_EVENT_FREEZE:
		return "freeze";
	case PM_EVENT_QUIESCE:
		return "quiesce";
	case PM_EVENT_HIBERNATE:
		return "hibernate";
	case PM_EVENT_THAW:
		return "thaw";
	case PM_EVENT_RESTORE:
		return "restore";
	case PM_EVENT_RECOVER:
		return "recover";
	default:
		return "(unknown PM event)";
	}
}

86
/**
87
 * device_pm_sleep_init - Initialize system suspend-related device fields.
88 89
 * @dev: Device object being initialized.
 */
90
void device_pm_sleep_init(struct device *dev)
91
{
92
	dev->power.is_prepared = false;
93
	dev->power.is_suspended = false;
94 95
	dev->power.is_noirq_suspended = false;
	dev->power.is_late_suspended = false;
96
	init_completion(&dev->power.completion);
97
	complete_all(&dev->power.completion);
98
	dev->power.wakeup = NULL;
99
	INIT_LIST_HEAD(&dev->power.entry);
100 101
}

102
/**
103
 * device_pm_lock - Lock the list of active devices used by the PM core.
104 105 106 107 108 109 110
 */
void device_pm_lock(void)
{
	mutex_lock(&dpm_list_mtx);
}

/**
111
 * device_pm_unlock - Unlock the list of active devices used by the PM core.
112 113 114 115 116
 */
void device_pm_unlock(void)
{
	mutex_unlock(&dpm_list_mtx);
}
117

118
/**
119 120
 * device_pm_add - Add a device to the PM core's list of active devices.
 * @dev: Device to add to the list.
121
 */
122
void device_pm_add(struct device *dev)
L
Linus Torvalds 已提交
123 124
{
	pr_debug("PM: Adding info for %s:%s\n",
125
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
126
	mutex_lock(&dpm_list_mtx);
127
	if (dev->parent && dev->parent->power.is_prepared)
128 129
		dev_warn(dev, "parent %s should not be sleeping\n",
			dev_name(dev->parent));
130
	list_add_tail(&dev->power.entry, &dpm_list);
131
	mutex_unlock(&dpm_list_mtx);
L
Linus Torvalds 已提交
132 133
}

134
/**
135 136
 * device_pm_remove - Remove a device from the PM core's list of active devices.
 * @dev: Device to be removed from the list.
137
 */
138
void device_pm_remove(struct device *dev)
L
Linus Torvalds 已提交
139 140
{
	pr_debug("PM: Removing info for %s:%s\n",
141
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
142
	complete_all(&dev->power.completion);
143
	mutex_lock(&dpm_list_mtx);
L
Linus Torvalds 已提交
144
	list_del_init(&dev->power.entry);
145
	mutex_unlock(&dpm_list_mtx);
146
	device_wakeup_disable(dev);
147
	pm_runtime_remove(dev);
148 149
}

150
/**
151 152 153
 * device_pm_move_before - Move device in the PM core's list of active devices.
 * @deva: Device to move in dpm_list.
 * @devb: Device @deva should come before.
154 155 156 157
 */
void device_pm_move_before(struct device *deva, struct device *devb)
{
	pr_debug("PM: Moving %s:%s before %s:%s\n",
158 159
		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
160 161 162 163 164
	/* Delete deva from dpm_list and reinsert before devb. */
	list_move_tail(&deva->power.entry, &devb->power.entry);
}

/**
165 166 167
 * device_pm_move_after - Move device in the PM core's list of active devices.
 * @deva: Device to move in dpm_list.
 * @devb: Device @deva should come after.
168 169 170 171
 */
void device_pm_move_after(struct device *deva, struct device *devb)
{
	pr_debug("PM: Moving %s:%s after %s:%s\n",
172 173
		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
174 175 176 177 178
	/* Delete deva from dpm_list and reinsert after devb. */
	list_move(&deva->power.entry, &devb->power.entry);
}

/**
179 180
 * device_pm_move_last - Move device to end of the PM core's list of devices.
 * @dev: Device to move in dpm_list.
181 182 183 184
 */
void device_pm_move_last(struct device *dev)
{
	pr_debug("PM: Moving %s:%s to end of list\n",
185
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
186 187 188
	list_move_tail(&dev->power.entry, &dpm_list);
}

189 190 191 192
static ktime_t initcall_debug_start(struct device *dev)
{
	ktime_t calltime = ktime_set(0, 0);

193
	if (pm_print_times_enabled) {
194 195 196
		pr_info("calling  %s+ @ %i, parent: %s\n",
			dev_name(dev), task_pid_nr(current),
			dev->parent ? dev_name(dev->parent) : "none");
197 198 199 200 201 202 203
		calltime = ktime_get();
	}

	return calltime;
}

static void initcall_debug_report(struct device *dev, ktime_t calltime,
204
				  int error, pm_message_t state, char *info)
205
{
206 207 208 209 210
	ktime_t rettime;
	s64 nsecs;

	rettime = ktime_get();
	nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
211

212
	if (pm_print_times_enabled) {
213
		pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
214
			error, (unsigned long long)nsecs >> 10);
215
	}
216 217 218

	trace_device_pm_report_time(dev, info, nsecs, pm_verb(state.event),
				    error);
219 220
}

221 222 223 224 225 226 227 228 229 230
/**
 * dpm_wait - Wait for a PM operation to complete.
 * @dev: Device to wait for.
 * @async: If unset, wait only if the device's power.async_suspend flag is set.
 */
static void dpm_wait(struct device *dev, bool async)
{
	if (!dev)
		return;

231
	if (async || (pm_async_enabled && dev->power.async_suspend))
232 233 234 235 236 237 238 239 240 241 242 243 244 245
		wait_for_completion(&dev->power.completion);
}

static int dpm_wait_fn(struct device *dev, void *async_ptr)
{
	dpm_wait(dev, *((bool *)async_ptr));
	return 0;
}

static void dpm_wait_for_children(struct device *dev, bool async)
{
       device_for_each_child(dev, &async, dpm_wait_fn);
}

246
/**
247
 * pm_op - Return the PM operation appropriate for given PM event.
248 249
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
250
 */
251
static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
252 253 254 255
{
	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
256
		return ops->suspend;
257
	case PM_EVENT_RESUME:
258
		return ops->resume;
259
#endif /* CONFIG_SUSPEND */
260
#ifdef CONFIG_HIBERNATE_CALLBACKS
261 262
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
263
		return ops->freeze;
264
	case PM_EVENT_HIBERNATE:
265
		return ops->poweroff;
266 267
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
268
		return ops->thaw;
269 270
		break;
	case PM_EVENT_RESTORE:
271
		return ops->restore;
272
#endif /* CONFIG_HIBERNATE_CALLBACKS */
273
	}
274

275
	return NULL;
276 277
}

278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311
/**
 * pm_late_early_op - Return the PM operation appropriate for given PM event.
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
 *
 * Runtime PM is disabled for @dev while this function is being executed.
 */
static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
				      pm_message_t state)
{
	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
		return ops->suspend_late;
	case PM_EVENT_RESUME:
		return ops->resume_early;
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATE_CALLBACKS
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		return ops->freeze_late;
	case PM_EVENT_HIBERNATE:
		return ops->poweroff_late;
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
		return ops->thaw_early;
	case PM_EVENT_RESTORE:
		return ops->restore_early;
#endif /* CONFIG_HIBERNATE_CALLBACKS */
	}

	return NULL;
}

312
/**
313
 * pm_noirq_op - Return the PM operation appropriate for given PM event.
314 315
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
316
 *
317 318
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
319
 */
320
static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
321 322 323 324
{
	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
325
		return ops->suspend_noirq;
326
	case PM_EVENT_RESUME:
327
		return ops->resume_noirq;
328
#endif /* CONFIG_SUSPEND */
329
#ifdef CONFIG_HIBERNATE_CALLBACKS
330 331
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
332
		return ops->freeze_noirq;
333
	case PM_EVENT_HIBERNATE:
334
		return ops->poweroff_noirq;
335 336
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
337
		return ops->thaw_noirq;
338
	case PM_EVENT_RESTORE:
339
		return ops->restore_noirq;
340
#endif /* CONFIG_HIBERNATE_CALLBACKS */
341
	}
342

343
	return NULL;
344 345 346 347 348 349 350 351 352 353 354 355 356
}

static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
{
	dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
		", may wakeup" : "");
}

static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
			int error)
{
	printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
357
		dev_name(dev), pm_verb(state.event), info, error);
358 359
}

360 361 362
static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
{
	ktime_t calltime;
363
	u64 usecs64;
364 365 366 367 368 369 370 371 372 373 374 375 376
	int usecs;

	calltime = ktime_get();
	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
	do_div(usecs64, NSEC_PER_USEC);
	usecs = usecs64;
	if (usecs == 0)
		usecs = 1;
	pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
		info ?: "", info ? " " : "", pm_verb(state.event),
		usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
}

377 378 379 380 381 382 383 384 385 386 387 388 389 390 391
static int dpm_run_callback(pm_callback_t cb, struct device *dev,
			    pm_message_t state, char *info)
{
	ktime_t calltime;
	int error;

	if (!cb)
		return 0;

	calltime = initcall_debug_start(dev);

	pm_dev_dbg(dev, state, info);
	error = cb(dev);
	suspend_report_result(cb, error);

392
	initcall_debug_report(dev, calltime, error, state, info);
393 394 395 396

	return error;
}

397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461
#ifdef CONFIG_DPM_WATCHDOG
struct dpm_watchdog {
	struct device		*dev;
	struct task_struct	*tsk;
	struct timer_list	timer;
};

#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
	struct dpm_watchdog wd

/**
 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
 * @data: Watchdog object address.
 *
 * Called when a driver has timed out suspending or resuming.
 * There's not much we can do here to recover so panic() to
 * capture a crash-dump in pstore.
 */
static void dpm_watchdog_handler(unsigned long data)
{
	struct dpm_watchdog *wd = (void *)data;

	dev_emerg(wd->dev, "**** DPM device timeout ****\n");
	show_stack(wd->tsk, NULL);
	panic("%s %s: unrecoverable failure\n",
		dev_driver_string(wd->dev), dev_name(wd->dev));
}

/**
 * dpm_watchdog_set - Enable pm watchdog for given device.
 * @wd: Watchdog. Must be allocated on the stack.
 * @dev: Device to handle.
 */
static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
{
	struct timer_list *timer = &wd->timer;

	wd->dev = dev;
	wd->tsk = current;

	init_timer_on_stack(timer);
	/* use same timeout value for both suspend and resume */
	timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
	timer->function = dpm_watchdog_handler;
	timer->data = (unsigned long)wd;
	add_timer(timer);
}

/**
 * dpm_watchdog_clear - Disable suspend/resume watchdog.
 * @wd: Watchdog to disable.
 */
static void dpm_watchdog_clear(struct dpm_watchdog *wd)
{
	struct timer_list *timer = &wd->timer;

	del_timer_sync(timer);
	destroy_timer_on_stack(timer);
}
#else
#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
#define dpm_watchdog_set(x, y)
#define dpm_watchdog_clear(x)
#endif

462 463 464
/*------------------------- Resume routines -------------------------*/

/**
465 466 467
 * device_resume_noirq - Execute an "early resume" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
468
 *
469 470
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
471
 */
472
static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
473
{
474 475
	pm_callback_t callback = NULL;
	char *info = NULL;
476 477 478 479 480
	int error = 0;

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);

481 482 483
	if (dev->power.syscore)
		goto Out;

484 485 486
	if (!dev->power.is_noirq_suspended)
		goto Out;

487 488
	dpm_wait(dev->parent, async);

489
	if (dev->pm_domain) {
490
		info = "noirq power domain ";
491
		callback = pm_noirq_op(&dev->pm_domain->ops, state);
492
	} else if (dev->type && dev->type->pm) {
493
		info = "noirq type ";
494
		callback = pm_noirq_op(dev->type->pm, state);
495
	} else if (dev->class && dev->class->pm) {
496
		info = "noirq class ";
497
		callback = pm_noirq_op(dev->class->pm, state);
498
	} else if (dev->bus && dev->bus->pm) {
499
		info = "noirq bus ";
500
		callback = pm_noirq_op(dev->bus->pm, state);
501 502
	}

503
	if (!callback && dev->driver && dev->driver->pm) {
504
		info = "noirq driver ";
505 506 507
		callback = pm_noirq_op(dev->driver->pm, state);
	}

508
	error = dpm_run_callback(callback, dev, state, info);
509
	dev->power.is_noirq_suspended = false;
510

511
 Out:
512
	complete_all(&dev->power.completion);
513 514 515 516
	TRACE_RESUME(error);
	return error;
}

517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534
static bool is_async(struct device *dev)
{
	return dev->power.async_suspend && pm_async_enabled
		&& !pm_trace_is_enabled();
}

static void async_resume_noirq(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = device_resume_noirq(dev, pm_transition, true);
	if (error)
		pm_dev_err(dev, pm_transition, " async", error);

	put_device(dev);
}

535
/**
536
 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
537
 * @state: PM transition of the system being carried out.
538
 *
539
 * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
540
 * enable device drivers to receive interrupts.
541
 */
542
static void dpm_resume_noirq(pm_message_t state)
543
{
544
	struct device *dev;
545
	ktime_t starttime = ktime_get();
546

547
	mutex_lock(&dpm_list_mtx);
548
	pm_transition = state;
549

550 551 552 553 554 555 556 557 558 559 560 561 562 563 564
	/*
	 * Advanced the async threads upfront,
	 * in case the starting of async threads is
	 * delayed by non-async resuming devices.
	 */
	list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
		reinit_completion(&dev->power.completion);
		if (is_async(dev)) {
			get_device(dev);
			async_schedule(async_resume_noirq, dev);
		}
	}

	while (!list_empty(&dpm_noirq_list)) {
		dev = to_device(dpm_noirq_list.next);
565
		get_device(dev);
566
		list_move_tail(&dev->power.entry, &dpm_late_early_list);
567
		mutex_unlock(&dpm_list_mtx);
568

569 570 571 572 573 574 575 576 577 578
		if (!is_async(dev)) {
			int error;

			error = device_resume_noirq(dev, state, false);
			if (error) {
				suspend_stats.failed_resume_noirq++;
				dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
				dpm_save_failed_dev(dev_name(dev));
				pm_dev_err(dev, state, " noirq", error);
			}
579 580 581 582 583 584
		}

		mutex_lock(&dpm_list_mtx);
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
585
	async_synchronize_full();
586 587
	dpm_show_time(starttime, state, "noirq");
	resume_device_irqs();
588
	cpuidle_resume();
589 590 591 592 593 594 595 596 597
}

/**
 * device_resume_early - Execute an "early resume" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
 *
 * Runtime PM is disabled for @dev while this function is being executed.
 */
598
static int device_resume_early(struct device *dev, pm_message_t state, bool async)
599 600 601 602 603 604 605 606
{
	pm_callback_t callback = NULL;
	char *info = NULL;
	int error = 0;

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);

607 608 609
	if (dev->power.syscore)
		goto Out;

610 611 612
	if (!dev->power.is_late_suspended)
		goto Out;

613 614
	dpm_wait(dev->parent, async);

615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634
	if (dev->pm_domain) {
		info = "early power domain ";
		callback = pm_late_early_op(&dev->pm_domain->ops, state);
	} else if (dev->type && dev->type->pm) {
		info = "early type ";
		callback = pm_late_early_op(dev->type->pm, state);
	} else if (dev->class && dev->class->pm) {
		info = "early class ";
		callback = pm_late_early_op(dev->class->pm, state);
	} else if (dev->bus && dev->bus->pm) {
		info = "early bus ";
		callback = pm_late_early_op(dev->bus->pm, state);
	}

	if (!callback && dev->driver && dev->driver->pm) {
		info = "early driver ";
		callback = pm_late_early_op(dev->driver->pm, state);
	}

	error = dpm_run_callback(callback, dev, state, info);
635
	dev->power.is_late_suspended = false;
636

637
 Out:
638
	TRACE_RESUME(error);
639 640

	pm_runtime_enable(dev);
641
	complete_all(&dev->power.completion);
642 643 644
	return error;
}

645 646 647 648 649 650 651 652 653 654 655 656
static void async_resume_early(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = device_resume_early(dev, pm_transition, true);
	if (error)
		pm_dev_err(dev, pm_transition, " async", error);

	put_device(dev);
}

657 658 659 660 661 662
/**
 * dpm_resume_early - Execute "early resume" callbacks for all devices.
 * @state: PM transition of the system being carried out.
 */
static void dpm_resume_early(pm_message_t state)
{
663
	struct device *dev;
664 665 666
	ktime_t starttime = ktime_get();

	mutex_lock(&dpm_list_mtx);
667 668 669 670 671 672 673 674 675 676 677 678 679 680
	pm_transition = state;

	/*
	 * Advanced the async threads upfront,
	 * in case the starting of async threads is
	 * delayed by non-async resuming devices.
	 */
	list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
		reinit_completion(&dev->power.completion);
		if (is_async(dev)) {
			get_device(dev);
			async_schedule(async_resume_early, dev);
		}
	}
681

682 683
	while (!list_empty(&dpm_late_early_list)) {
		dev = to_device(dpm_late_early_list.next);
684 685 686 687
		get_device(dev);
		list_move_tail(&dev->power.entry, &dpm_suspended_list);
		mutex_unlock(&dpm_list_mtx);

688 689
		if (!is_async(dev)) {
			int error;
690

691 692 693 694 695 696 697 698
			error = device_resume_early(dev, state, false);
			if (error) {
				suspend_stats.failed_resume_early++;
				dpm_save_failed_step(SUSPEND_RESUME_EARLY);
				dpm_save_failed_dev(dev_name(dev));
				pm_dev_err(dev, state, " early", error);
			}
		}
699
		mutex_lock(&dpm_list_mtx);
700 701
		put_device(dev);
	}
702
	mutex_unlock(&dpm_list_mtx);
703
	async_synchronize_full();
704
	dpm_show_time(starttime, state, "early");
705
}
706 707 708 709 710 711 712 713 714 715 716

/**
 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
 * @state: PM transition of the system being carried out.
 */
void dpm_resume_start(pm_message_t state)
{
	dpm_resume_noirq(state);
	dpm_resume_early(state);
}
EXPORT_SYMBOL_GPL(dpm_resume_start);
717 718

/**
719
 * device_resume - Execute "resume" callbacks for given device.
720 721
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
722
 * @async: If true, the device is being resumed asynchronously.
723
 */
724
static int device_resume(struct device *dev, pm_message_t state, bool async)
725
{
726 727
	pm_callback_t callback = NULL;
	char *info = NULL;
728
	int error = 0;
729
	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
730 731 732

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);
733

734 735 736
	if (dev->power.syscore)
		goto Complete;

737
	dpm_wait(dev->parent, async);
738
	dpm_watchdog_set(&wd, dev);
739
	device_lock(dev);
740

741 742 743 744 745
	/*
	 * This is a fib.  But we'll allow new children to be added below
	 * a resumed device, even if the device hasn't been completed yet.
	 */
	dev->power.is_prepared = false;
746

747 748 749
	if (!dev->power.is_suspended)
		goto Unlock;

750
	if (dev->pm_domain) {
751 752
		info = "power domain ";
		callback = pm_op(&dev->pm_domain->ops, state);
753
		goto Driver;
754 755
	}

756
	if (dev->type && dev->type->pm) {
757 758
		info = "type ";
		callback = pm_op(dev->type->pm, state);
759
		goto Driver;
760 761
	}

762 763
	if (dev->class) {
		if (dev->class->pm) {
764 765
			info = "class ";
			callback = pm_op(dev->class->pm, state);
766
			goto Driver;
767
		} else if (dev->class->resume) {
768 769
			info = "legacy class ";
			callback = dev->class->resume;
770
			goto End;
771
		}
772
	}
773 774 775

	if (dev->bus) {
		if (dev->bus->pm) {
776
			info = "bus ";
777
			callback = pm_op(dev->bus->pm, state);
778
		} else if (dev->bus->resume) {
779
			info = "legacy bus ";
780
			callback = dev->bus->resume;
781
			goto End;
782 783 784
		}
	}

785 786 787 788 789 790
 Driver:
	if (!callback && dev->driver && dev->driver->pm) {
		info = "driver ";
		callback = pm_op(dev->driver->pm, state);
	}

791
 End:
792
	error = dpm_run_callback(callback, dev, state, info);
793 794 795
	dev->power.is_suspended = false;

 Unlock:
796
	device_unlock(dev);
797
	dpm_watchdog_clear(&wd);
798 799

 Complete:
800
	complete_all(&dev->power.completion);
801

802
	TRACE_RESUME(error);
803

804 805 806
	return error;
}

807 808 809 810 811
static void async_resume(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

812
	error = device_resume(dev, pm_transition, true);
813 814 815 816 817
	if (error)
		pm_dev_err(dev, pm_transition, " async", error);
	put_device(dev);
}

818
/**
819 820
 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
 * @state: PM transition of the system being carried out.
821
 *
822 823
 * Execute the appropriate "resume" callback for all devices whose status
 * indicates that they are suspended.
824
 */
825
void dpm_resume(pm_message_t state)
826
{
827
	struct device *dev;
828
	ktime_t starttime = ktime_get();
829

830 831
	might_sleep();

832
	mutex_lock(&dpm_list_mtx);
833
	pm_transition = state;
834
	async_error = 0;
835

836
	list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
837
		reinit_completion(&dev->power.completion);
838 839 840 841 842 843
		if (is_async(dev)) {
			get_device(dev);
			async_schedule(async_resume, dev);
		}
	}

844 845
	while (!list_empty(&dpm_suspended_list)) {
		dev = to_device(dpm_suspended_list.next);
846
		get_device(dev);
847
		if (!is_async(dev)) {
848 849 850 851
			int error;

			mutex_unlock(&dpm_list_mtx);

852
			error = device_resume(dev, state, false);
853 854 855 856
			if (error) {
				suspend_stats.failed_resume++;
				dpm_save_failed_step(SUSPEND_RESUME);
				dpm_save_failed_dev(dev_name(dev));
857
				pm_dev_err(dev, state, "", error);
858
			}
859 860

			mutex_lock(&dpm_list_mtx);
861 862
		}
		if (!list_empty(&dev->power.entry))
863
			list_move_tail(&dev->power.entry, &dpm_prepared_list);
864 865 866
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
867
	async_synchronize_full();
868
	dpm_show_time(starttime, state, NULL);
869 870 871
}

/**
872 873 874
 * device_complete - Complete a PM transition for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
875
 */
876
static void device_complete(struct device *dev, pm_message_t state)
877
{
878 879 880
	void (*callback)(struct device *) = NULL;
	char *info = NULL;

881 882 883
	if (dev->power.syscore)
		return;

884
	device_lock(dev);
885

886
	if (dev->pm_domain) {
887 888
		info = "completing power domain ";
		callback = dev->pm_domain->ops.complete;
889
	} else if (dev->type && dev->type->pm) {
890 891
		info = "completing type ";
		callback = dev->type->pm->complete;
892
	} else if (dev->class && dev->class->pm) {
893 894
		info = "completing class ";
		callback = dev->class->pm->complete;
895
	} else if (dev->bus && dev->bus->pm) {
896 897 898 899 900 901 902 903 904 905 906 907
		info = "completing bus ";
		callback = dev->bus->pm->complete;
	}

	if (!callback && dev->driver && dev->driver->pm) {
		info = "completing driver ";
		callback = dev->driver->pm->complete;
	}

	if (callback) {
		pm_dev_dbg(dev, state, info);
		callback(dev);
908 909
	}

910
	device_unlock(dev);
911

912
	pm_runtime_put(dev);
913 914 915
}

/**
916 917
 * dpm_complete - Complete a PM transition for all non-sysdev devices.
 * @state: PM transition of the system being carried out.
918
 *
919 920
 * Execute the ->complete() callbacks for all devices whose PM status is not
 * DPM_ON (this allows new devices to be registered).
921
 */
922
void dpm_complete(pm_message_t state)
923
{
924 925
	struct list_head list;

926 927
	might_sleep();

928
	INIT_LIST_HEAD(&list);
929
	mutex_lock(&dpm_list_mtx);
930 931
	while (!list_empty(&dpm_prepared_list)) {
		struct device *dev = to_device(dpm_prepared_list.prev);
932

933
		get_device(dev);
934
		dev->power.is_prepared = false;
935 936
		list_move(&dev->power.entry, &list);
		mutex_unlock(&dpm_list_mtx);
937

938
		device_complete(dev, state);
939

940
		mutex_lock(&dpm_list_mtx);
941
		put_device(dev);
942
	}
943
	list_splice(&list, &dpm_list);
944 945 946 947
	mutex_unlock(&dpm_list_mtx);
}

/**
948 949
 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
 * @state: PM transition of the system being carried out.
950
 *
951 952
 * Execute "resume" callbacks for all devices and complete the PM transition of
 * the system.
953
 */
954
void dpm_resume_end(pm_message_t state)
955
{
956 957
	dpm_resume(state);
	dpm_complete(state);
958
}
959
EXPORT_SYMBOL_GPL(dpm_resume_end);
960 961 962 963


/*------------------------- Suspend routines -------------------------*/

964
/**
965 966 967 968 969
 * resume_event - Return a "resume" message for given "suspend" sleep state.
 * @sleep_state: PM message representing a sleep state.
 *
 * Return a PM message representing the resume event corresponding to given
 * sleep state.
970 971
 */
static pm_message_t resume_event(pm_message_t sleep_state)
972
{
973 974 975 976 977 978 979 980
	switch (sleep_state.event) {
	case PM_EVENT_SUSPEND:
		return PMSG_RESUME;
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		return PMSG_RECOVER;
	case PM_EVENT_HIBERNATE:
		return PMSG_RESTORE;
981
	}
982
	return PMSG_ON;
983 984 985
}

/**
986 987 988
 * device_suspend_noirq - Execute a "late suspend" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
989
 *
990 991
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
992
 */
993
static int device_suspend_noirq(struct device *dev, pm_message_t state)
994
{
995 996
	pm_callback_t callback = NULL;
	char *info = NULL;
997
	int error;
998

999 1000 1001
	if (dev->power.syscore)
		return 0;

1002
	if (dev->pm_domain) {
1003
		info = "noirq power domain ";
1004
		callback = pm_noirq_op(&dev->pm_domain->ops, state);
1005
	} else if (dev->type && dev->type->pm) {
1006
		info = "noirq type ";
1007
		callback = pm_noirq_op(dev->type->pm, state);
1008
	} else if (dev->class && dev->class->pm) {
1009
		info = "noirq class ";
1010
		callback = pm_noirq_op(dev->class->pm, state);
1011
	} else if (dev->bus && dev->bus->pm) {
1012
		info = "noirq bus ";
1013
		callback = pm_noirq_op(dev->bus->pm, state);
1014 1015
	}

1016
	if (!callback && dev->driver && dev->driver->pm) {
1017
		info = "noirq driver ";
1018 1019 1020
		callback = pm_noirq_op(dev->driver->pm, state);
	}

1021 1022 1023 1024 1025
	error = dpm_run_callback(callback, dev, state, info);
	if (!error)
		dev->power.is_noirq_suspended = true;

	return error;
1026 1027 1028
}

/**
1029
 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1030
 * @state: PM transition of the system being carried out.
1031
 *
1032 1033
 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
 * handlers for all non-sysdev devices.
1034
 */
1035
static int dpm_suspend_noirq(pm_message_t state)
1036
{
1037
	ktime_t starttime = ktime_get();
1038 1039
	int error = 0;

1040
	cpuidle_pause();
1041
	suspend_device_irqs();
1042
	mutex_lock(&dpm_list_mtx);
1043 1044
	while (!list_empty(&dpm_late_early_list)) {
		struct device *dev = to_device(dpm_late_early_list.prev);
1045 1046 1047 1048

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

1049
		error = device_suspend_noirq(dev, state);
1050 1051

		mutex_lock(&dpm_list_mtx);
1052
		if (error) {
1053
			pm_dev_err(dev, state, " noirq", error);
1054 1055 1056
			suspend_stats.failed_suspend_noirq++;
			dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
			dpm_save_failed_dev(dev_name(dev));
1057
			put_device(dev);
1058 1059
			break;
		}
1060
		if (!list_empty(&dev->power.entry))
1061
			list_move(&dev->power.entry, &dpm_noirq_list);
1062
		put_device(dev);
1063 1064 1065 1066 1067

		if (pm_wakeup_pending()) {
			error = -EBUSY;
			break;
		}
1068
	}
1069
	mutex_unlock(&dpm_list_mtx);
1070
	if (error)
1071
		dpm_resume_noirq(resume_event(state));
1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087
	else
		dpm_show_time(starttime, state, "noirq");
	return error;
}

/**
 * device_suspend_late - Execute a "late suspend" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
 *
 * Runtime PM is disabled for @dev while this function is being executed.
 */
static int device_suspend_late(struct device *dev, pm_message_t state)
{
	pm_callback_t callback = NULL;
	char *info = NULL;
1088
	int error;
1089

1090 1091
	__pm_runtime_disable(dev, false);

1092 1093 1094
	if (dev->power.syscore)
		return 0;

1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113
	if (dev->pm_domain) {
		info = "late power domain ";
		callback = pm_late_early_op(&dev->pm_domain->ops, state);
	} else if (dev->type && dev->type->pm) {
		info = "late type ";
		callback = pm_late_early_op(dev->type->pm, state);
	} else if (dev->class && dev->class->pm) {
		info = "late class ";
		callback = pm_late_early_op(dev->class->pm, state);
	} else if (dev->bus && dev->bus->pm) {
		info = "late bus ";
		callback = pm_late_early_op(dev->bus->pm, state);
	}

	if (!callback && dev->driver && dev->driver->pm) {
		info = "late driver ";
		callback = pm_late_early_op(dev->driver->pm, state);
	}

1114 1115 1116 1117 1118
	error = dpm_run_callback(callback, dev, state, info);
	if (!error)
		dev->power.is_late_suspended = true;

	return error;
1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150
}

/**
 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
 * @state: PM transition of the system being carried out.
 */
static int dpm_suspend_late(pm_message_t state)
{
	ktime_t starttime = ktime_get();
	int error = 0;

	mutex_lock(&dpm_list_mtx);
	while (!list_empty(&dpm_suspended_list)) {
		struct device *dev = to_device(dpm_suspended_list.prev);

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

		error = device_suspend_late(dev, state);

		mutex_lock(&dpm_list_mtx);
		if (error) {
			pm_dev_err(dev, state, " late", error);
			suspend_stats.failed_suspend_late++;
			dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
			dpm_save_failed_dev(dev_name(dev));
			put_device(dev);
			break;
		}
		if (!list_empty(&dev->power.entry))
			list_move(&dev->power.entry, &dpm_late_early_list);
		put_device(dev);
1151 1152 1153 1154 1155

		if (pm_wakeup_pending()) {
			error = -EBUSY;
			break;
		}
1156 1157 1158 1159
	}
	mutex_unlock(&dpm_list_mtx);
	if (error)
		dpm_resume_early(resume_event(state));
1160 1161
	else
		dpm_show_time(starttime, state, "late");
1162

1163 1164
	return error;
}
1165 1166 1167 1168 1169 1170 1171 1172

/**
 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
 * @state: PM transition of the system being carried out.
 */
int dpm_suspend_end(pm_message_t state)
{
	int error = dpm_suspend_late(state);
1173 1174 1175 1176 1177
	if (error)
		return error;

	error = dpm_suspend_noirq(state);
	if (error) {
1178
		dpm_resume_early(resume_event(state));
1179 1180
		return error;
	}
1181

1182
	return 0;
1183 1184
}
EXPORT_SYMBOL_GPL(dpm_suspend_end);
1185

1186 1187
/**
 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
R
Randy Dunlap 已提交
1188 1189 1190
 * @dev: Device to suspend.
 * @state: PM transition of the system being carried out.
 * @cb: Suspend callback to execute.
1191 1192
 */
static int legacy_suspend(struct device *dev, pm_message_t state,
1193 1194
			  int (*cb)(struct device *dev, pm_message_t state),
			  char *info)
1195 1196 1197 1198 1199 1200 1201 1202 1203
{
	int error;
	ktime_t calltime;

	calltime = initcall_debug_start(dev);

	error = cb(dev, state);
	suspend_report_result(cb, error);

1204
	initcall_debug_report(dev, calltime, error, state, info);
1205 1206 1207 1208

	return error;
}

1209
/**
1210 1211 1212
 * device_suspend - Execute "suspend" callbacks for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
1213
 * @async: If true, the device is being suspended asynchronously.
1214
 */
1215
static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1216
{
1217 1218
	pm_callback_t callback = NULL;
	char *info = NULL;
1219
	int error = 0;
1220
	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1221

1222
	dpm_wait_for_children(dev, async);
1223

1224
	if (async_error)
1225
		goto Complete;
1226

1227 1228 1229 1230 1231 1232
	/*
	 * If a device configured to wake up the system from sleep states
	 * has been suspended at run time and there's a resume request pending
	 * for it, this is equivalent to the device signaling wakeup, so the
	 * system suspend operation should be aborted.
	 */
1233 1234
	if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
		pm_wakeup_event(dev, 0);
1235

1236 1237
	if (pm_wakeup_pending()) {
		async_error = -EBUSY;
1238
		goto Complete;
1239 1240
	}

1241 1242 1243
	if (dev->power.syscore)
		goto Complete;

1244
	dpm_watchdog_set(&wd, dev);
1245 1246
	device_lock(dev);

1247
	if (dev->pm_domain) {
1248 1249 1250
		info = "power domain ";
		callback = pm_op(&dev->pm_domain->ops, state);
		goto Run;
1251 1252
	}

1253
	if (dev->type && dev->type->pm) {
1254 1255 1256
		info = "type ";
		callback = pm_op(dev->type->pm, state);
		goto Run;
1257 1258
	}

1259 1260
	if (dev->class) {
		if (dev->class->pm) {
1261 1262 1263
			info = "class ";
			callback = pm_op(dev->class->pm, state);
			goto Run;
1264 1265
		} else if (dev->class->suspend) {
			pm_dev_dbg(dev, state, "legacy class ");
1266 1267
			error = legacy_suspend(dev, state, dev->class->suspend,
						"legacy class ");
1268
			goto End;
1269
		}
1270 1271
	}

1272 1273
	if (dev->bus) {
		if (dev->bus->pm) {
1274
			info = "bus ";
1275
			callback = pm_op(dev->bus->pm, state);
1276
		} else if (dev->bus->suspend) {
1277
			pm_dev_dbg(dev, state, "legacy bus ");
1278 1279
			error = legacy_suspend(dev, state, dev->bus->suspend,
						"legacy bus ");
1280
			goto End;
1281
		}
1282 1283
	}

1284
 Run:
1285 1286 1287 1288 1289
	if (!callback && dev->driver && dev->driver->pm) {
		info = "driver ";
		callback = pm_op(dev->driver->pm, state);
	}

1290 1291
	error = dpm_run_callback(callback, dev, state, info);

1292
 End:
1293 1294
	if (!error) {
		dev->power.is_suspended = true;
1295 1296
		if (dev->power.wakeup_path
		    && dev->parent && !dev->parent->power.ignore_children)
1297 1298
			dev->parent->power.wakeup_path = true;
	}
1299

1300
	device_unlock(dev);
1301
	dpm_watchdog_clear(&wd);
1302 1303

 Complete:
1304
	complete_all(&dev->power.completion);
1305
	if (error)
1306 1307
		async_error = error;

1308 1309 1310
	return error;
}

1311 1312 1313 1314 1315 1316
static void async_suspend(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = __device_suspend(dev, pm_transition, true);
1317 1318
	if (error) {
		dpm_save_failed_dev(dev_name(dev));
1319
		pm_dev_err(dev, pm_transition, " async", error);
1320
	}
1321 1322 1323 1324 1325 1326

	put_device(dev);
}

static int device_suspend(struct device *dev)
{
1327
	reinit_completion(&dev->power.completion);
1328

1329
	if (pm_async_enabled && dev->power.async_suspend) {
1330 1331 1332 1333 1334 1335 1336 1337
		get_device(dev);
		async_schedule(async_suspend, dev);
		return 0;
	}

	return __device_suspend(dev, pm_transition, false);
}

1338
/**
1339 1340
 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
 * @state: PM transition of the system being carried out.
1341
 */
1342
int dpm_suspend(pm_message_t state)
1343
{
1344
	ktime_t starttime = ktime_get();
1345 1346
	int error = 0;

1347 1348
	might_sleep();

1349
	mutex_lock(&dpm_list_mtx);
1350 1351
	pm_transition = state;
	async_error = 0;
1352 1353
	while (!list_empty(&dpm_prepared_list)) {
		struct device *dev = to_device(dpm_prepared_list.prev);
1354

1355
		get_device(dev);
1356
		mutex_unlock(&dpm_list_mtx);
1357

1358
		error = device_suspend(dev);
1359

1360
		mutex_lock(&dpm_list_mtx);
1361
		if (error) {
1362
			pm_dev_err(dev, state, "", error);
1363
			dpm_save_failed_dev(dev_name(dev));
1364
			put_device(dev);
1365 1366
			break;
		}
1367
		if (!list_empty(&dev->power.entry))
1368
			list_move(&dev->power.entry, &dpm_suspended_list);
1369
		put_device(dev);
1370 1371
		if (async_error)
			break;
1372 1373
	}
	mutex_unlock(&dpm_list_mtx);
1374 1375 1376
	async_synchronize_full();
	if (!error)
		error = async_error;
1377 1378 1379 1380
	if (error) {
		suspend_stats.failed_suspend++;
		dpm_save_failed_step(SUSPEND_SUSPEND);
	} else
1381
		dpm_show_time(starttime, state, NULL);
1382 1383 1384 1385
	return error;
}

/**
1386 1387 1388 1389 1390 1391
 * device_prepare - Prepare a device for system power transition.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
 *
 * Execute the ->prepare() callback(s) for given device.  No new children of the
 * device may be registered after this function has returned.
1392
 */
1393
static int device_prepare(struct device *dev, pm_message_t state)
1394
{
1395 1396
	int (*callback)(struct device *) = NULL;
	char *info = NULL;
1397 1398
	int error = 0;

1399 1400 1401
	if (dev->power.syscore)
		return 0;

1402 1403 1404 1405 1406 1407 1408 1409
	/*
	 * If a device's parent goes into runtime suspend at the wrong time,
	 * it won't be possible to resume the device.  To prevent this we
	 * block runtime suspend here, during the prepare phase, and allow
	 * it again during the complete phase.
	 */
	pm_runtime_get_noresume(dev);

1410
	device_lock(dev);
1411

1412 1413
	dev->power.wakeup_path = device_may_wakeup(dev);

1414
	if (dev->pm_domain) {
1415 1416
		info = "preparing power domain ";
		callback = dev->pm_domain->ops.prepare;
1417
	} else if (dev->type && dev->type->pm) {
1418 1419
		info = "preparing type ";
		callback = dev->type->pm->prepare;
1420
	} else if (dev->class && dev->class->pm) {
1421 1422
		info = "preparing class ";
		callback = dev->class->pm->prepare;
1423
	} else if (dev->bus && dev->bus->pm) {
1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435
		info = "preparing bus ";
		callback = dev->bus->pm->prepare;
	}

	if (!callback && dev->driver && dev->driver->pm) {
		info = "preparing driver ";
		callback = dev->driver->pm->prepare;
	}

	if (callback) {
		error = callback(dev);
		suspend_report_result(callback, error);
1436
	}
1437

1438
	device_unlock(dev);
1439

1440 1441 1442
	if (error)
		pm_runtime_put(dev);

1443 1444
	return error;
}
1445

1446
/**
1447 1448
 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
 * @state: PM transition of the system being carried out.
1449
 *
1450
 * Execute the ->prepare() callback(s) for all devices.
1451
 */
1452
int dpm_prepare(pm_message_t state)
1453 1454 1455
{
	int error = 0;

1456 1457
	might_sleep();

1458 1459 1460 1461 1462 1463 1464
	mutex_lock(&dpm_list_mtx);
	while (!list_empty(&dpm_list)) {
		struct device *dev = to_device(dpm_list.next);

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

1465
		error = device_prepare(dev, state);
1466 1467 1468 1469 1470

		mutex_lock(&dpm_list_mtx);
		if (error) {
			if (error == -EAGAIN) {
				put_device(dev);
S
Sebastian Ott 已提交
1471
				error = 0;
1472 1473
				continue;
			}
1474 1475
			printk(KERN_INFO "PM: Device %s not prepared "
				"for power transition: code %d\n",
1476
				dev_name(dev), error);
1477 1478 1479
			put_device(dev);
			break;
		}
1480
		dev->power.is_prepared = true;
1481
		if (!list_empty(&dev->power.entry))
1482
			list_move_tail(&dev->power.entry, &dpm_prepared_list);
1483 1484 1485
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
1486 1487 1488
	return error;
}

1489
/**
1490 1491
 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
 * @state: PM transition of the system being carried out.
1492
 *
1493 1494
 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
 * callbacks for them.
1495
 */
1496
int dpm_suspend_start(pm_message_t state)
1497 1498
{
	int error;
1499

1500
	error = dpm_prepare(state);
1501 1502 1503 1504
	if (error) {
		suspend_stats.failed_prepare++;
		dpm_save_failed_step(SUSPEND_PREPARE);
	} else
1505
		error = dpm_suspend(state);
1506 1507
	return error;
}
1508
EXPORT_SYMBOL_GPL(dpm_suspend_start);
1509 1510 1511

void __suspend_report_result(const char *function, void *fn, int ret)
{
1512 1513
	if (ret)
		printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1514 1515
}
EXPORT_SYMBOL_GPL(__suspend_report_result);
1516 1517 1518 1519 1520 1521

/**
 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
 * @dev: Device to wait for.
 * @subordinate: Device that needs to wait for @dev.
 */
1522
int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1523 1524
{
	dpm_wait(dev, subordinate->power.async_suspend);
1525
	return async_error;
1526 1527
}
EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549

/**
 * dpm_for_each_dev - device iterator.
 * @data: data for the callback.
 * @fn: function to be called for each device.
 *
 * Iterate over devices in dpm_list, and call @fn for each device,
 * passing it @data.
 */
void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
{
	struct device *dev;

	if (!fn)
		return;

	device_pm_lock();
	list_for_each_entry(dev, &dpm_list, power.entry)
		fn(dev, data);
	device_pm_unlock();
}
EXPORT_SYMBOL_GPL(dpm_for_each_dev);