main.c 40.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/*
 * drivers/base/power/main.c - Where the driver meets power management.
 *
 * Copyright (c) 2003 Patrick Mochel
 * Copyright (c) 2003 Open Source Development Lab
 *
 * This file is released under the GPLv2
 *
 *
 * The driver model core calls device_pm_add() when a device is registered.
11
 * This will initialize the embedded device_pm_info object in the device
L
Linus Torvalds 已提交
12 13 14
 * and add it to the list of power-controlled devices. sysfs entries for
 * controlling device power management will also be added.
 *
15 16 17
 * A separate list is used for keeping track of power info, because the power
 * domain dependencies may differ from the ancestral dependencies that the
 * subsystem list maintains.
L
Linus Torvalds 已提交
18 19 20
 */

#include <linux/device.h>
21
#include <linux/kallsyms.h>
22
#include <linux/export.h>
23
#include <linux/mutex.h>
24
#include <linux/pm.h>
25
#include <linux/pm_runtime.h>
26
#include <linux/resume-trace.h>
27
#include <linux/interrupt.h>
28
#include <linux/sched.h>
29
#include <linux/async.h>
30
#include <linux/suspend.h>
31
#include <trace/events/power.h>
32
#include <linux/cpufreq.h>
33
#include <linux/cpuidle.h>
34 35
#include <linux/timer.h>

36
#include "../base.h"
L
Linus Torvalds 已提交
37 38
#include "power.h"

39 40
typedef int (*pm_callback_t)(struct device *);

41
/*
42
 * The entries in the dpm_list list are in a depth first order, simply
43 44 45
 * because children are guaranteed to be discovered after parents, and
 * are inserted at the back of the list on discovery.
 *
46 47
 * Since device_pm_add() may be called with a device lock held,
 * we must never try to acquire a device lock while holding
48 49 50
 * dpm_list_mutex.
 */

51
LIST_HEAD(dpm_list);
52 53 54 55
static LIST_HEAD(dpm_prepared_list);
static LIST_HEAD(dpm_suspended_list);
static LIST_HEAD(dpm_late_early_list);
static LIST_HEAD(dpm_noirq_list);
L
Linus Torvalds 已提交
56

57
struct suspend_stats suspend_stats;
58
static DEFINE_MUTEX(dpm_list_mtx);
59
static pm_message_t pm_transition;
L
Linus Torvalds 已提交
60

61 62
static int async_error;

63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86
static char *pm_verb(int event)
{
	switch (event) {
	case PM_EVENT_SUSPEND:
		return "suspend";
	case PM_EVENT_RESUME:
		return "resume";
	case PM_EVENT_FREEZE:
		return "freeze";
	case PM_EVENT_QUIESCE:
		return "quiesce";
	case PM_EVENT_HIBERNATE:
		return "hibernate";
	case PM_EVENT_THAW:
		return "thaw";
	case PM_EVENT_RESTORE:
		return "restore";
	case PM_EVENT_RECOVER:
		return "recover";
	default:
		return "(unknown PM event)";
	}
}

87
/**
88
 * device_pm_sleep_init - Initialize system suspend-related device fields.
89 90
 * @dev: Device object being initialized.
 */
91
void device_pm_sleep_init(struct device *dev)
92
{
93
	dev->power.is_prepared = false;
94
	dev->power.is_suspended = false;
95 96
	dev->power.is_noirq_suspended = false;
	dev->power.is_late_suspended = false;
97
	init_completion(&dev->power.completion);
98
	complete_all(&dev->power.completion);
99
	dev->power.wakeup = NULL;
100
	INIT_LIST_HEAD(&dev->power.entry);
101 102
}

103
/**
104
 * device_pm_lock - Lock the list of active devices used by the PM core.
105 106 107 108 109 110 111
 */
void device_pm_lock(void)
{
	mutex_lock(&dpm_list_mtx);
}

/**
112
 * device_pm_unlock - Unlock the list of active devices used by the PM core.
113 114 115 116 117
 */
void device_pm_unlock(void)
{
	mutex_unlock(&dpm_list_mtx);
}
118

119
/**
120 121
 * device_pm_add - Add a device to the PM core's list of active devices.
 * @dev: Device to add to the list.
122
 */
123
void device_pm_add(struct device *dev)
L
Linus Torvalds 已提交
124 125
{
	pr_debug("PM: Adding info for %s:%s\n",
126
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
127
	mutex_lock(&dpm_list_mtx);
128
	if (dev->parent && dev->parent->power.is_prepared)
129 130
		dev_warn(dev, "parent %s should not be sleeping\n",
			dev_name(dev->parent));
131
	list_add_tail(&dev->power.entry, &dpm_list);
132
	mutex_unlock(&dpm_list_mtx);
L
Linus Torvalds 已提交
133 134
}

135
/**
136 137
 * device_pm_remove - Remove a device from the PM core's list of active devices.
 * @dev: Device to be removed from the list.
138
 */
139
void device_pm_remove(struct device *dev)
L
Linus Torvalds 已提交
140 141
{
	pr_debug("PM: Removing info for %s:%s\n",
142
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
143
	complete_all(&dev->power.completion);
144
	mutex_lock(&dpm_list_mtx);
L
Linus Torvalds 已提交
145
	list_del_init(&dev->power.entry);
146
	mutex_unlock(&dpm_list_mtx);
147
	device_wakeup_disable(dev);
148
	pm_runtime_remove(dev);
149 150
}

151
/**
152 153 154
 * device_pm_move_before - Move device in the PM core's list of active devices.
 * @deva: Device to move in dpm_list.
 * @devb: Device @deva should come before.
155 156 157 158
 */
void device_pm_move_before(struct device *deva, struct device *devb)
{
	pr_debug("PM: Moving %s:%s before %s:%s\n",
159 160
		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
161 162 163 164 165
	/* Delete deva from dpm_list and reinsert before devb. */
	list_move_tail(&deva->power.entry, &devb->power.entry);
}

/**
166 167 168
 * device_pm_move_after - Move device in the PM core's list of active devices.
 * @deva: Device to move in dpm_list.
 * @devb: Device @deva should come after.
169 170 171 172
 */
void device_pm_move_after(struct device *deva, struct device *devb)
{
	pr_debug("PM: Moving %s:%s after %s:%s\n",
173 174
		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
175 176 177 178 179
	/* Delete deva from dpm_list and reinsert after devb. */
	list_move(&deva->power.entry, &devb->power.entry);
}

/**
180 181
 * device_pm_move_last - Move device to end of the PM core's list of devices.
 * @dev: Device to move in dpm_list.
182 183 184 185
 */
void device_pm_move_last(struct device *dev)
{
	pr_debug("PM: Moving %s:%s to end of list\n",
186
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
187 188 189
	list_move_tail(&dev->power.entry, &dpm_list);
}

190 191 192 193
static ktime_t initcall_debug_start(struct device *dev)
{
	ktime_t calltime = ktime_set(0, 0);

194
	if (pm_print_times_enabled) {
195 196 197
		pr_info("calling  %s+ @ %i, parent: %s\n",
			dev_name(dev), task_pid_nr(current),
			dev->parent ? dev_name(dev->parent) : "none");
198 199 200 201 202 203 204
		calltime = ktime_get();
	}

	return calltime;
}

static void initcall_debug_report(struct device *dev, ktime_t calltime,
205
				  int error, pm_message_t state, char *info)
206
{
207 208 209 210 211
	ktime_t rettime;
	s64 nsecs;

	rettime = ktime_get();
	nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
212

213
	if (pm_print_times_enabled) {
214
		pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
215
			error, (unsigned long long)nsecs >> 10);
216
	}
217 218 219

	trace_device_pm_report_time(dev, info, nsecs, pm_verb(state.event),
				    error);
220 221
}

222 223 224 225 226 227 228 229 230 231
/**
 * dpm_wait - Wait for a PM operation to complete.
 * @dev: Device to wait for.
 * @async: If unset, wait only if the device's power.async_suspend flag is set.
 */
static void dpm_wait(struct device *dev, bool async)
{
	if (!dev)
		return;

232
	if (async || (pm_async_enabled && dev->power.async_suspend))
233 234 235 236 237 238 239 240 241 242 243 244 245 246
		wait_for_completion(&dev->power.completion);
}

static int dpm_wait_fn(struct device *dev, void *async_ptr)
{
	dpm_wait(dev, *((bool *)async_ptr));
	return 0;
}

static void dpm_wait_for_children(struct device *dev, bool async)
{
       device_for_each_child(dev, &async, dpm_wait_fn);
}

247
/**
248
 * pm_op - Return the PM operation appropriate for given PM event.
249 250
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
251
 */
252
static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
253 254 255 256
{
	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
257
		return ops->suspend;
258
	case PM_EVENT_RESUME:
259
		return ops->resume;
260
#endif /* CONFIG_SUSPEND */
261
#ifdef CONFIG_HIBERNATE_CALLBACKS
262 263
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
264
		return ops->freeze;
265
	case PM_EVENT_HIBERNATE:
266
		return ops->poweroff;
267 268
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
269
		return ops->thaw;
270 271
		break;
	case PM_EVENT_RESTORE:
272
		return ops->restore;
273
#endif /* CONFIG_HIBERNATE_CALLBACKS */
274
	}
275

276
	return NULL;
277 278
}

279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312
/**
 * pm_late_early_op - Return the PM operation appropriate for given PM event.
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
 *
 * Runtime PM is disabled for @dev while this function is being executed.
 */
static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
				      pm_message_t state)
{
	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
		return ops->suspend_late;
	case PM_EVENT_RESUME:
		return ops->resume_early;
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATE_CALLBACKS
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		return ops->freeze_late;
	case PM_EVENT_HIBERNATE:
		return ops->poweroff_late;
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
		return ops->thaw_early;
	case PM_EVENT_RESTORE:
		return ops->restore_early;
#endif /* CONFIG_HIBERNATE_CALLBACKS */
	}

	return NULL;
}

313
/**
314
 * pm_noirq_op - Return the PM operation appropriate for given PM event.
315 316
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
317
 *
318 319
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
320
 */
321
static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
322 323 324 325
{
	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
326
		return ops->suspend_noirq;
327
	case PM_EVENT_RESUME:
328
		return ops->resume_noirq;
329
#endif /* CONFIG_SUSPEND */
330
#ifdef CONFIG_HIBERNATE_CALLBACKS
331 332
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
333
		return ops->freeze_noirq;
334
	case PM_EVENT_HIBERNATE:
335
		return ops->poweroff_noirq;
336 337
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
338
		return ops->thaw_noirq;
339
	case PM_EVENT_RESTORE:
340
		return ops->restore_noirq;
341
#endif /* CONFIG_HIBERNATE_CALLBACKS */
342
	}
343

344
	return NULL;
345 346 347 348 349 350 351 352 353 354 355 356 357
}

static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
{
	dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
		", may wakeup" : "");
}

static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
			int error)
{
	printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
358
		dev_name(dev), pm_verb(state.event), info, error);
359 360
}

361 362 363
static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
{
	ktime_t calltime;
364
	u64 usecs64;
365 366 367 368 369 370 371 372 373 374 375 376 377
	int usecs;

	calltime = ktime_get();
	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
	do_div(usecs64, NSEC_PER_USEC);
	usecs = usecs64;
	if (usecs == 0)
		usecs = 1;
	pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
		info ?: "", info ? " " : "", pm_verb(state.event),
		usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
}

378 379 380 381 382 383 384 385 386 387 388 389 390 391 392
static int dpm_run_callback(pm_callback_t cb, struct device *dev,
			    pm_message_t state, char *info)
{
	ktime_t calltime;
	int error;

	if (!cb)
		return 0;

	calltime = initcall_debug_start(dev);

	pm_dev_dbg(dev, state, info);
	error = cb(dev);
	suspend_report_result(cb, error);

393
	initcall_debug_report(dev, calltime, error, state, info);
394 395 396 397

	return error;
}

398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462
#ifdef CONFIG_DPM_WATCHDOG
struct dpm_watchdog {
	struct device		*dev;
	struct task_struct	*tsk;
	struct timer_list	timer;
};

#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
	struct dpm_watchdog wd

/**
 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
 * @data: Watchdog object address.
 *
 * Called when a driver has timed out suspending or resuming.
 * There's not much we can do here to recover so panic() to
 * capture a crash-dump in pstore.
 */
static void dpm_watchdog_handler(unsigned long data)
{
	struct dpm_watchdog *wd = (void *)data;

	dev_emerg(wd->dev, "**** DPM device timeout ****\n");
	show_stack(wd->tsk, NULL);
	panic("%s %s: unrecoverable failure\n",
		dev_driver_string(wd->dev), dev_name(wd->dev));
}

/**
 * dpm_watchdog_set - Enable pm watchdog for given device.
 * @wd: Watchdog. Must be allocated on the stack.
 * @dev: Device to handle.
 */
static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
{
	struct timer_list *timer = &wd->timer;

	wd->dev = dev;
	wd->tsk = current;

	init_timer_on_stack(timer);
	/* use same timeout value for both suspend and resume */
	timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
	timer->function = dpm_watchdog_handler;
	timer->data = (unsigned long)wd;
	add_timer(timer);
}

/**
 * dpm_watchdog_clear - Disable suspend/resume watchdog.
 * @wd: Watchdog to disable.
 */
static void dpm_watchdog_clear(struct dpm_watchdog *wd)
{
	struct timer_list *timer = &wd->timer;

	del_timer_sync(timer);
	destroy_timer_on_stack(timer);
}
#else
#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
#define dpm_watchdog_set(x, y)
#define dpm_watchdog_clear(x)
#endif

463 464 465
/*------------------------- Resume routines -------------------------*/

/**
466 467 468
 * device_resume_noirq - Execute an "early resume" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
469
 *
470 471
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
472
 */
473
static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
474
{
475 476
	pm_callback_t callback = NULL;
	char *info = NULL;
477 478 479 480 481
	int error = 0;

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);

482
	if (dev->power.syscore || dev->power.direct_complete)
483 484
		goto Out;

485 486 487
	if (!dev->power.is_noirq_suspended)
		goto Out;

488 489
	dpm_wait(dev->parent, async);

490
	if (dev->pm_domain) {
491
		info = "noirq power domain ";
492
		callback = pm_noirq_op(&dev->pm_domain->ops, state);
493
	} else if (dev->type && dev->type->pm) {
494
		info = "noirq type ";
495
		callback = pm_noirq_op(dev->type->pm, state);
496
	} else if (dev->class && dev->class->pm) {
497
		info = "noirq class ";
498
		callback = pm_noirq_op(dev->class->pm, state);
499
	} else if (dev->bus && dev->bus->pm) {
500
		info = "noirq bus ";
501
		callback = pm_noirq_op(dev->bus->pm, state);
502 503
	}

504
	if (!callback && dev->driver && dev->driver->pm) {
505
		info = "noirq driver ";
506 507 508
		callback = pm_noirq_op(dev->driver->pm, state);
	}

509
	error = dpm_run_callback(callback, dev, state, info);
510
	dev->power.is_noirq_suspended = false;
511

512
 Out:
513
	complete_all(&dev->power.completion);
514 515 516 517
	TRACE_RESUME(error);
	return error;
}

518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535
static bool is_async(struct device *dev)
{
	return dev->power.async_suspend && pm_async_enabled
		&& !pm_trace_is_enabled();
}

static void async_resume_noirq(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = device_resume_noirq(dev, pm_transition, true);
	if (error)
		pm_dev_err(dev, pm_transition, " async", error);

	put_device(dev);
}

536
/**
537
 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
538
 * @state: PM transition of the system being carried out.
539
 *
540
 * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
541
 * enable device drivers to receive interrupts.
542
 */
543
static void dpm_resume_noirq(pm_message_t state)
544
{
545
	struct device *dev;
546
	ktime_t starttime = ktime_get();
547

548
	mutex_lock(&dpm_list_mtx);
549
	pm_transition = state;
550

551 552 553 554 555 556 557 558 559 560 561 562 563 564 565
	/*
	 * Advanced the async threads upfront,
	 * in case the starting of async threads is
	 * delayed by non-async resuming devices.
	 */
	list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
		reinit_completion(&dev->power.completion);
		if (is_async(dev)) {
			get_device(dev);
			async_schedule(async_resume_noirq, dev);
		}
	}

	while (!list_empty(&dpm_noirq_list)) {
		dev = to_device(dpm_noirq_list.next);
566
		get_device(dev);
567
		list_move_tail(&dev->power.entry, &dpm_late_early_list);
568
		mutex_unlock(&dpm_list_mtx);
569

570 571 572 573 574 575 576 577 578 579
		if (!is_async(dev)) {
			int error;

			error = device_resume_noirq(dev, state, false);
			if (error) {
				suspend_stats.failed_resume_noirq++;
				dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
				dpm_save_failed_dev(dev_name(dev));
				pm_dev_err(dev, state, " noirq", error);
			}
580 581 582 583 584 585
		}

		mutex_lock(&dpm_list_mtx);
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
586
	async_synchronize_full();
587 588
	dpm_show_time(starttime, state, "noirq");
	resume_device_irqs();
589
	cpuidle_resume();
590 591 592 593 594 595 596 597 598
}

/**
 * device_resume_early - Execute an "early resume" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
 *
 * Runtime PM is disabled for @dev while this function is being executed.
 */
599
static int device_resume_early(struct device *dev, pm_message_t state, bool async)
600 601 602 603 604 605 606 607
{
	pm_callback_t callback = NULL;
	char *info = NULL;
	int error = 0;

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);

608
	if (dev->power.syscore || dev->power.direct_complete)
609 610
		goto Out;

611 612 613
	if (!dev->power.is_late_suspended)
		goto Out;

614 615
	dpm_wait(dev->parent, async);

616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635
	if (dev->pm_domain) {
		info = "early power domain ";
		callback = pm_late_early_op(&dev->pm_domain->ops, state);
	} else if (dev->type && dev->type->pm) {
		info = "early type ";
		callback = pm_late_early_op(dev->type->pm, state);
	} else if (dev->class && dev->class->pm) {
		info = "early class ";
		callback = pm_late_early_op(dev->class->pm, state);
	} else if (dev->bus && dev->bus->pm) {
		info = "early bus ";
		callback = pm_late_early_op(dev->bus->pm, state);
	}

	if (!callback && dev->driver && dev->driver->pm) {
		info = "early driver ";
		callback = pm_late_early_op(dev->driver->pm, state);
	}

	error = dpm_run_callback(callback, dev, state, info);
636
	dev->power.is_late_suspended = false;
637

638
 Out:
639
	TRACE_RESUME(error);
640 641

	pm_runtime_enable(dev);
642
	complete_all(&dev->power.completion);
643 644 645
	return error;
}

646 647 648 649 650 651 652 653 654 655 656 657
static void async_resume_early(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = device_resume_early(dev, pm_transition, true);
	if (error)
		pm_dev_err(dev, pm_transition, " async", error);

	put_device(dev);
}

658 659 660 661 662 663
/**
 * dpm_resume_early - Execute "early resume" callbacks for all devices.
 * @state: PM transition of the system being carried out.
 */
static void dpm_resume_early(pm_message_t state)
{
664
	struct device *dev;
665 666 667
	ktime_t starttime = ktime_get();

	mutex_lock(&dpm_list_mtx);
668 669 670 671 672 673 674 675 676 677 678 679 680 681
	pm_transition = state;

	/*
	 * Advanced the async threads upfront,
	 * in case the starting of async threads is
	 * delayed by non-async resuming devices.
	 */
	list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
		reinit_completion(&dev->power.completion);
		if (is_async(dev)) {
			get_device(dev);
			async_schedule(async_resume_early, dev);
		}
	}
682

683 684
	while (!list_empty(&dpm_late_early_list)) {
		dev = to_device(dpm_late_early_list.next);
685 686 687 688
		get_device(dev);
		list_move_tail(&dev->power.entry, &dpm_suspended_list);
		mutex_unlock(&dpm_list_mtx);

689 690
		if (!is_async(dev)) {
			int error;
691

692 693 694 695 696 697 698 699
			error = device_resume_early(dev, state, false);
			if (error) {
				suspend_stats.failed_resume_early++;
				dpm_save_failed_step(SUSPEND_RESUME_EARLY);
				dpm_save_failed_dev(dev_name(dev));
				pm_dev_err(dev, state, " early", error);
			}
		}
700
		mutex_lock(&dpm_list_mtx);
701 702
		put_device(dev);
	}
703
	mutex_unlock(&dpm_list_mtx);
704
	async_synchronize_full();
705
	dpm_show_time(starttime, state, "early");
706
}
707 708 709 710 711 712 713 714 715 716 717

/**
 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
 * @state: PM transition of the system being carried out.
 */
void dpm_resume_start(pm_message_t state)
{
	dpm_resume_noirq(state);
	dpm_resume_early(state);
}
EXPORT_SYMBOL_GPL(dpm_resume_start);
718 719

/**
720
 * device_resume - Execute "resume" callbacks for given device.
721 722
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
723
 * @async: If true, the device is being resumed asynchronously.
724
 */
725
static int device_resume(struct device *dev, pm_message_t state, bool async)
726
{
727 728
	pm_callback_t callback = NULL;
	char *info = NULL;
729
	int error = 0;
730
	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
731 732 733

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);
734

735 736 737
	if (dev->power.syscore)
		goto Complete;

738 739 740 741 742 743
	if (dev->power.direct_complete) {
		/* Match the pm_runtime_disable() in __device_suspend(). */
		pm_runtime_enable(dev);
		goto Complete;
	}

744
	dpm_wait(dev->parent, async);
745
	dpm_watchdog_set(&wd, dev);
746
	device_lock(dev);
747

748 749 750 751 752
	/*
	 * This is a fib.  But we'll allow new children to be added below
	 * a resumed device, even if the device hasn't been completed yet.
	 */
	dev->power.is_prepared = false;
753

754 755 756
	if (!dev->power.is_suspended)
		goto Unlock;

757
	if (dev->pm_domain) {
758 759
		info = "power domain ";
		callback = pm_op(&dev->pm_domain->ops, state);
760
		goto Driver;
761 762
	}

763
	if (dev->type && dev->type->pm) {
764 765
		info = "type ";
		callback = pm_op(dev->type->pm, state);
766
		goto Driver;
767 768
	}

769 770
	if (dev->class) {
		if (dev->class->pm) {
771 772
			info = "class ";
			callback = pm_op(dev->class->pm, state);
773
			goto Driver;
774
		} else if (dev->class->resume) {
775 776
			info = "legacy class ";
			callback = dev->class->resume;
777
			goto End;
778
		}
779
	}
780 781 782

	if (dev->bus) {
		if (dev->bus->pm) {
783
			info = "bus ";
784
			callback = pm_op(dev->bus->pm, state);
785
		} else if (dev->bus->resume) {
786
			info = "legacy bus ";
787
			callback = dev->bus->resume;
788
			goto End;
789 790 791
		}
	}

792 793 794 795 796 797
 Driver:
	if (!callback && dev->driver && dev->driver->pm) {
		info = "driver ";
		callback = pm_op(dev->driver->pm, state);
	}

798
 End:
799
	error = dpm_run_callback(callback, dev, state, info);
800 801 802
	dev->power.is_suspended = false;

 Unlock:
803
	device_unlock(dev);
804
	dpm_watchdog_clear(&wd);
805 806

 Complete:
807
	complete_all(&dev->power.completion);
808

809
	TRACE_RESUME(error);
810

811 812 813
	return error;
}

814 815 816 817 818
static void async_resume(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

819
	error = device_resume(dev, pm_transition, true);
820 821 822 823 824
	if (error)
		pm_dev_err(dev, pm_transition, " async", error);
	put_device(dev);
}

825
/**
826 827
 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
 * @state: PM transition of the system being carried out.
828
 *
829 830
 * Execute the appropriate "resume" callback for all devices whose status
 * indicates that they are suspended.
831
 */
832
void dpm_resume(pm_message_t state)
833
{
834
	struct device *dev;
835
	ktime_t starttime = ktime_get();
836

837 838
	might_sleep();

839
	mutex_lock(&dpm_list_mtx);
840
	pm_transition = state;
841
	async_error = 0;
842

843
	list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
844
		reinit_completion(&dev->power.completion);
845 846 847 848 849 850
		if (is_async(dev)) {
			get_device(dev);
			async_schedule(async_resume, dev);
		}
	}

851 852
	while (!list_empty(&dpm_suspended_list)) {
		dev = to_device(dpm_suspended_list.next);
853
		get_device(dev);
854
		if (!is_async(dev)) {
855 856 857 858
			int error;

			mutex_unlock(&dpm_list_mtx);

859
			error = device_resume(dev, state, false);
860 861 862 863
			if (error) {
				suspend_stats.failed_resume++;
				dpm_save_failed_step(SUSPEND_RESUME);
				dpm_save_failed_dev(dev_name(dev));
864
				pm_dev_err(dev, state, "", error);
865
			}
866 867

			mutex_lock(&dpm_list_mtx);
868 869
		}
		if (!list_empty(&dev->power.entry))
870
			list_move_tail(&dev->power.entry, &dpm_prepared_list);
871 872 873
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
874
	async_synchronize_full();
875
	dpm_show_time(starttime, state, NULL);
876 877

	cpufreq_resume();
878 879 880
}

/**
881 882 883
 * device_complete - Complete a PM transition for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
884
 */
885
static void device_complete(struct device *dev, pm_message_t state)
886
{
887 888 889
	void (*callback)(struct device *) = NULL;
	char *info = NULL;

890 891 892
	if (dev->power.syscore)
		return;

893
	device_lock(dev);
894

895
	if (dev->pm_domain) {
896 897
		info = "completing power domain ";
		callback = dev->pm_domain->ops.complete;
898
	} else if (dev->type && dev->type->pm) {
899 900
		info = "completing type ";
		callback = dev->type->pm->complete;
901
	} else if (dev->class && dev->class->pm) {
902 903
		info = "completing class ";
		callback = dev->class->pm->complete;
904
	} else if (dev->bus && dev->bus->pm) {
905 906 907 908 909 910 911 912 913 914 915 916
		info = "completing bus ";
		callback = dev->bus->pm->complete;
	}

	if (!callback && dev->driver && dev->driver->pm) {
		info = "completing driver ";
		callback = dev->driver->pm->complete;
	}

	if (callback) {
		pm_dev_dbg(dev, state, info);
		callback(dev);
917 918
	}

919
	device_unlock(dev);
920

921
	pm_runtime_put(dev);
922 923 924
}

/**
925 926
 * dpm_complete - Complete a PM transition for all non-sysdev devices.
 * @state: PM transition of the system being carried out.
927
 *
928 929
 * Execute the ->complete() callbacks for all devices whose PM status is not
 * DPM_ON (this allows new devices to be registered).
930
 */
931
void dpm_complete(pm_message_t state)
932
{
933 934
	struct list_head list;

935 936
	might_sleep();

937
	INIT_LIST_HEAD(&list);
938
	mutex_lock(&dpm_list_mtx);
939 940
	while (!list_empty(&dpm_prepared_list)) {
		struct device *dev = to_device(dpm_prepared_list.prev);
941

942
		get_device(dev);
943
		dev->power.is_prepared = false;
944 945
		list_move(&dev->power.entry, &list);
		mutex_unlock(&dpm_list_mtx);
946

947
		device_complete(dev, state);
948

949
		mutex_lock(&dpm_list_mtx);
950
		put_device(dev);
951
	}
952
	list_splice(&list, &dpm_list);
953 954 955 956
	mutex_unlock(&dpm_list_mtx);
}

/**
957 958
 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
 * @state: PM transition of the system being carried out.
959
 *
960 961
 * Execute "resume" callbacks for all devices and complete the PM transition of
 * the system.
962
 */
963
void dpm_resume_end(pm_message_t state)
964
{
965 966
	dpm_resume(state);
	dpm_complete(state);
967
}
968
EXPORT_SYMBOL_GPL(dpm_resume_end);
969 970 971 972


/*------------------------- Suspend routines -------------------------*/

973
/**
974 975 976 977 978
 * resume_event - Return a "resume" message for given "suspend" sleep state.
 * @sleep_state: PM message representing a sleep state.
 *
 * Return a PM message representing the resume event corresponding to given
 * sleep state.
979 980
 */
static pm_message_t resume_event(pm_message_t sleep_state)
981
{
982 983 984 985 986 987 988 989
	switch (sleep_state.event) {
	case PM_EVENT_SUSPEND:
		return PMSG_RESUME;
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		return PMSG_RECOVER;
	case PM_EVENT_HIBERNATE:
		return PMSG_RESTORE;
990
	}
991
	return PMSG_ON;
992 993 994
}

/**
995 996 997
 * device_suspend_noirq - Execute a "late suspend" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
998
 *
999 1000
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
1001
 */
1002
static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1003
{
1004 1005
	pm_callback_t callback = NULL;
	char *info = NULL;
1006 1007 1008 1009 1010 1011 1012 1013 1014
	int error = 0;

	if (async_error)
		goto Complete;

	if (pm_wakeup_pending()) {
		async_error = -EBUSY;
		goto Complete;
	}
1015

1016
	if (dev->power.syscore || dev->power.direct_complete)
1017 1018 1019
		goto Complete;

	dpm_wait_for_children(dev, async);
1020

1021
	if (dev->pm_domain) {
1022
		info = "noirq power domain ";
1023
		callback = pm_noirq_op(&dev->pm_domain->ops, state);
1024
	} else if (dev->type && dev->type->pm) {
1025
		info = "noirq type ";
1026
		callback = pm_noirq_op(dev->type->pm, state);
1027
	} else if (dev->class && dev->class->pm) {
1028
		info = "noirq class ";
1029
		callback = pm_noirq_op(dev->class->pm, state);
1030
	} else if (dev->bus && dev->bus->pm) {
1031
		info = "noirq bus ";
1032
		callback = pm_noirq_op(dev->bus->pm, state);
1033 1034
	}

1035
	if (!callback && dev->driver && dev->driver->pm) {
1036
		info = "noirq driver ";
1037 1038 1039
		callback = pm_noirq_op(dev->driver->pm, state);
	}

1040 1041 1042
	error = dpm_run_callback(callback, dev, state, info);
	if (!error)
		dev->power.is_noirq_suspended = true;
1043 1044
	else
		async_error = error;
1045

1046 1047
Complete:
	complete_all(&dev->power.completion);
1048
	return error;
1049 1050
}

1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076
static void async_suspend_noirq(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = __device_suspend_noirq(dev, pm_transition, true);
	if (error) {
		dpm_save_failed_dev(dev_name(dev));
		pm_dev_err(dev, pm_transition, " async", error);
	}

	put_device(dev);
}

static int device_suspend_noirq(struct device *dev)
{
	reinit_completion(&dev->power.completion);

	if (pm_async_enabled && dev->power.async_suspend) {
		get_device(dev);
		async_schedule(async_suspend_noirq, dev);
		return 0;
	}
	return __device_suspend_noirq(dev, pm_transition, false);
}

1077
/**
1078
 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1079
 * @state: PM transition of the system being carried out.
1080
 *
1081 1082
 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
 * handlers for all non-sysdev devices.
1083
 */
1084
static int dpm_suspend_noirq(pm_message_t state)
1085
{
1086
	ktime_t starttime = ktime_get();
1087 1088
	int error = 0;

1089
	cpuidle_pause();
1090
	suspend_device_irqs();
1091
	mutex_lock(&dpm_list_mtx);
1092 1093 1094
	pm_transition = state;
	async_error = 0;

1095 1096
	while (!list_empty(&dpm_late_early_list)) {
		struct device *dev = to_device(dpm_late_early_list.prev);
1097 1098 1099 1100

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

1101
		error = device_suspend_noirq(dev);
1102 1103

		mutex_lock(&dpm_list_mtx);
1104
		if (error) {
1105
			pm_dev_err(dev, state, " noirq", error);
1106
			dpm_save_failed_dev(dev_name(dev));
1107
			put_device(dev);
1108 1109
			break;
		}
1110
		if (!list_empty(&dev->power.entry))
1111
			list_move(&dev->power.entry, &dpm_noirq_list);
1112
		put_device(dev);
1113

1114
		if (async_error)
1115
			break;
1116
	}
1117
	mutex_unlock(&dpm_list_mtx);
1118 1119 1120 1121 1122 1123 1124
	async_synchronize_full();
	if (!error)
		error = async_error;

	if (error) {
		suspend_stats.failed_suspend_noirq++;
		dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1125
		dpm_resume_noirq(resume_event(state));
1126
	} else {
1127
		dpm_show_time(starttime, state, "noirq");
1128
	}
1129 1130 1131 1132 1133 1134 1135 1136 1137 1138
	return error;
}

/**
 * device_suspend_late - Execute a "late suspend" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
 *
 * Runtime PM is disabled for @dev while this function is being executed.
 */
1139
static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1140 1141 1142
{
	pm_callback_t callback = NULL;
	char *info = NULL;
1143
	int error = 0;
1144

1145 1146
	__pm_runtime_disable(dev, false);

1147 1148 1149 1150 1151 1152 1153 1154
	if (async_error)
		goto Complete;

	if (pm_wakeup_pending()) {
		async_error = -EBUSY;
		goto Complete;
	}

1155
	if (dev->power.syscore || dev->power.direct_complete)
1156 1157 1158
		goto Complete;

	dpm_wait_for_children(dev, async);
1159

1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178
	if (dev->pm_domain) {
		info = "late power domain ";
		callback = pm_late_early_op(&dev->pm_domain->ops, state);
	} else if (dev->type && dev->type->pm) {
		info = "late type ";
		callback = pm_late_early_op(dev->type->pm, state);
	} else if (dev->class && dev->class->pm) {
		info = "late class ";
		callback = pm_late_early_op(dev->class->pm, state);
	} else if (dev->bus && dev->bus->pm) {
		info = "late bus ";
		callback = pm_late_early_op(dev->bus->pm, state);
	}

	if (!callback && dev->driver && dev->driver->pm) {
		info = "late driver ";
		callback = pm_late_early_op(dev->driver->pm, state);
	}

1179 1180 1181
	error = dpm_run_callback(callback, dev, state, info);
	if (!error)
		dev->power.is_late_suspended = true;
1182 1183
	else
		async_error = error;
1184

1185 1186
Complete:
	complete_all(&dev->power.completion);
1187
	return error;
1188 1189
}

1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215
static void async_suspend_late(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = __device_suspend_late(dev, pm_transition, true);
	if (error) {
		dpm_save_failed_dev(dev_name(dev));
		pm_dev_err(dev, pm_transition, " async", error);
	}
	put_device(dev);
}

static int device_suspend_late(struct device *dev)
{
	reinit_completion(&dev->power.completion);

	if (pm_async_enabled && dev->power.async_suspend) {
		get_device(dev);
		async_schedule(async_suspend_late, dev);
		return 0;
	}

	return __device_suspend_late(dev, pm_transition, false);
}

1216 1217 1218 1219 1220 1221 1222 1223 1224 1225
/**
 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
 * @state: PM transition of the system being carried out.
 */
static int dpm_suspend_late(pm_message_t state)
{
	ktime_t starttime = ktime_get();
	int error = 0;

	mutex_lock(&dpm_list_mtx);
1226 1227 1228
	pm_transition = state;
	async_error = 0;

1229 1230 1231 1232 1233 1234
	while (!list_empty(&dpm_suspended_list)) {
		struct device *dev = to_device(dpm_suspended_list.prev);

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

1235
		error = device_suspend_late(dev);
1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246

		mutex_lock(&dpm_list_mtx);
		if (error) {
			pm_dev_err(dev, state, " late", error);
			dpm_save_failed_dev(dev_name(dev));
			put_device(dev);
			break;
		}
		if (!list_empty(&dev->power.entry))
			list_move(&dev->power.entry, &dpm_late_early_list);
		put_device(dev);
1247

1248
		if (async_error)
1249
			break;
1250 1251
	}
	mutex_unlock(&dpm_list_mtx);
1252 1253 1254 1255
	async_synchronize_full();
	if (error) {
		suspend_stats.failed_suspend_late++;
		dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1256
		dpm_resume_early(resume_event(state));
1257
	} else {
1258
		dpm_show_time(starttime, state, "late");
1259
	}
1260 1261
	return error;
}
1262 1263 1264 1265 1266 1267 1268 1269

/**
 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
 * @state: PM transition of the system being carried out.
 */
int dpm_suspend_end(pm_message_t state)
{
	int error = dpm_suspend_late(state);
1270 1271 1272 1273 1274
	if (error)
		return error;

	error = dpm_suspend_noirq(state);
	if (error) {
1275
		dpm_resume_early(resume_event(state));
1276 1277
		return error;
	}
1278

1279
	return 0;
1280 1281
}
EXPORT_SYMBOL_GPL(dpm_suspend_end);
1282

1283 1284
/**
 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
R
Randy Dunlap 已提交
1285 1286 1287
 * @dev: Device to suspend.
 * @state: PM transition of the system being carried out.
 * @cb: Suspend callback to execute.
1288 1289
 */
static int legacy_suspend(struct device *dev, pm_message_t state,
1290 1291
			  int (*cb)(struct device *dev, pm_message_t state),
			  char *info)
1292 1293 1294 1295 1296 1297 1298 1299 1300
{
	int error;
	ktime_t calltime;

	calltime = initcall_debug_start(dev);

	error = cb(dev, state);
	suspend_report_result(cb, error);

1301
	initcall_debug_report(dev, calltime, error, state, info);
1302 1303 1304 1305

	return error;
}

1306
/**
1307 1308 1309
 * device_suspend - Execute "suspend" callbacks for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
1310
 * @async: If true, the device is being suspended asynchronously.
1311
 */
1312
static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1313
{
1314 1315
	pm_callback_t callback = NULL;
	char *info = NULL;
1316
	int error = 0;
1317
	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1318

1319
	dpm_wait_for_children(dev, async);
1320

1321
	if (async_error)
1322
		goto Complete;
1323

1324 1325 1326 1327 1328 1329
	/*
	 * If a device configured to wake up the system from sleep states
	 * has been suspended at run time and there's a resume request pending
	 * for it, this is equivalent to the device signaling wakeup, so the
	 * system suspend operation should be aborted.
	 */
1330 1331
	if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
		pm_wakeup_event(dev, 0);
1332

1333 1334
	if (pm_wakeup_pending()) {
		async_error = -EBUSY;
1335
		goto Complete;
1336 1337
	}

1338 1339 1340
	if (dev->power.syscore)
		goto Complete;

1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351
	if (dev->power.direct_complete) {
		if (pm_runtime_status_suspended(dev)) {
			pm_runtime_disable(dev);
			if (pm_runtime_suspended_if_enabled(dev))
				goto Complete;

			pm_runtime_enable(dev);
		}
		dev->power.direct_complete = false;
	}

1352
	dpm_watchdog_set(&wd, dev);
1353 1354
	device_lock(dev);

1355
	if (dev->pm_domain) {
1356 1357 1358
		info = "power domain ";
		callback = pm_op(&dev->pm_domain->ops, state);
		goto Run;
1359 1360
	}

1361
	if (dev->type && dev->type->pm) {
1362 1363 1364
		info = "type ";
		callback = pm_op(dev->type->pm, state);
		goto Run;
1365 1366
	}

1367 1368
	if (dev->class) {
		if (dev->class->pm) {
1369 1370 1371
			info = "class ";
			callback = pm_op(dev->class->pm, state);
			goto Run;
1372 1373
		} else if (dev->class->suspend) {
			pm_dev_dbg(dev, state, "legacy class ");
1374 1375
			error = legacy_suspend(dev, state, dev->class->suspend,
						"legacy class ");
1376
			goto End;
1377
		}
1378 1379
	}

1380 1381
	if (dev->bus) {
		if (dev->bus->pm) {
1382
			info = "bus ";
1383
			callback = pm_op(dev->bus->pm, state);
1384
		} else if (dev->bus->suspend) {
1385
			pm_dev_dbg(dev, state, "legacy bus ");
1386 1387
			error = legacy_suspend(dev, state, dev->bus->suspend,
						"legacy bus ");
1388
			goto End;
1389
		}
1390 1391
	}

1392
 Run:
1393 1394 1395 1396 1397
	if (!callback && dev->driver && dev->driver->pm) {
		info = "driver ";
		callback = pm_op(dev->driver->pm, state);
	}

1398 1399
	error = dpm_run_callback(callback, dev, state, info);

1400
 End:
1401
	if (!error) {
1402 1403
		struct device *parent = dev->parent;

1404
		dev->power.is_suspended = true;
1405 1406 1407 1408 1409 1410 1411 1412 1413 1414
		if (parent) {
			spin_lock_irq(&parent->power.lock);

			dev->parent->power.direct_complete = false;
			if (dev->power.wakeup_path
			    && !dev->parent->power.ignore_children)
				dev->parent->power.wakeup_path = true;

			spin_unlock_irq(&parent->power.lock);
		}
1415
	}
1416

1417
	device_unlock(dev);
1418
	dpm_watchdog_clear(&wd);
1419 1420

 Complete:
1421
	complete_all(&dev->power.completion);
1422
	if (error)
1423 1424
		async_error = error;

1425 1426 1427
	return error;
}

1428 1429 1430 1431 1432 1433
static void async_suspend(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = __device_suspend(dev, pm_transition, true);
1434 1435
	if (error) {
		dpm_save_failed_dev(dev_name(dev));
1436
		pm_dev_err(dev, pm_transition, " async", error);
1437
	}
1438 1439 1440 1441 1442 1443

	put_device(dev);
}

static int device_suspend(struct device *dev)
{
1444
	reinit_completion(&dev->power.completion);
1445

1446
	if (pm_async_enabled && dev->power.async_suspend) {
1447 1448 1449 1450 1451 1452 1453 1454
		get_device(dev);
		async_schedule(async_suspend, dev);
		return 0;
	}

	return __device_suspend(dev, pm_transition, false);
}

1455
/**
1456 1457
 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
 * @state: PM transition of the system being carried out.
1458
 */
1459
int dpm_suspend(pm_message_t state)
1460
{
1461
	ktime_t starttime = ktime_get();
1462 1463
	int error = 0;

1464 1465
	might_sleep();

1466 1467
	cpufreq_suspend();

1468
	mutex_lock(&dpm_list_mtx);
1469 1470
	pm_transition = state;
	async_error = 0;
1471 1472
	while (!list_empty(&dpm_prepared_list)) {
		struct device *dev = to_device(dpm_prepared_list.prev);
1473

1474
		get_device(dev);
1475
		mutex_unlock(&dpm_list_mtx);
1476

1477
		error = device_suspend(dev);
1478

1479
		mutex_lock(&dpm_list_mtx);
1480
		if (error) {
1481
			pm_dev_err(dev, state, "", error);
1482
			dpm_save_failed_dev(dev_name(dev));
1483
			put_device(dev);
1484 1485
			break;
		}
1486
		if (!list_empty(&dev->power.entry))
1487
			list_move(&dev->power.entry, &dpm_suspended_list);
1488
		put_device(dev);
1489 1490
		if (async_error)
			break;
1491 1492
	}
	mutex_unlock(&dpm_list_mtx);
1493 1494 1495
	async_synchronize_full();
	if (!error)
		error = async_error;
1496 1497 1498 1499
	if (error) {
		suspend_stats.failed_suspend++;
		dpm_save_failed_step(SUSPEND_SUSPEND);
	} else
1500
		dpm_show_time(starttime, state, NULL);
1501 1502 1503 1504
	return error;
}

/**
1505 1506 1507 1508 1509 1510
 * device_prepare - Prepare a device for system power transition.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
 *
 * Execute the ->prepare() callback(s) for given device.  No new children of the
 * device may be registered after this function has returned.
1511
 */
1512
static int device_prepare(struct device *dev, pm_message_t state)
1513
{
1514 1515
	int (*callback)(struct device *) = NULL;
	char *info = NULL;
1516
	int ret = 0;
1517

1518 1519 1520
	if (dev->power.syscore)
		return 0;

1521 1522 1523 1524 1525 1526 1527 1528
	/*
	 * If a device's parent goes into runtime suspend at the wrong time,
	 * it won't be possible to resume the device.  To prevent this we
	 * block runtime suspend here, during the prepare phase, and allow
	 * it again during the complete phase.
	 */
	pm_runtime_get_noresume(dev);

1529
	device_lock(dev);
1530

1531 1532
	dev->power.wakeup_path = device_may_wakeup(dev);

1533
	if (dev->pm_domain) {
1534 1535
		info = "preparing power domain ";
		callback = dev->pm_domain->ops.prepare;
1536
	} else if (dev->type && dev->type->pm) {
1537 1538
		info = "preparing type ";
		callback = dev->type->pm->prepare;
1539
	} else if (dev->class && dev->class->pm) {
1540 1541
		info = "preparing class ";
		callback = dev->class->pm->prepare;
1542
	} else if (dev->bus && dev->bus->pm) {
1543 1544 1545 1546 1547 1548 1549 1550 1551
		info = "preparing bus ";
		callback = dev->bus->pm->prepare;
	}

	if (!callback && dev->driver && dev->driver->pm) {
		info = "preparing driver ";
		callback = dev->driver->pm->prepare;
	}

1552 1553
	if (callback)
		ret = callback(dev);
1554

1555
	device_unlock(dev);
1556

1557 1558
	if (ret < 0) {
		suspend_report_result(callback, ret);
1559
		pm_runtime_put(dev);
1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572
		return ret;
	}
	/*
	 * A positive return value from ->prepare() means "this device appears
	 * to be runtime-suspended and its state is fine, so if it really is
	 * runtime-suspended, you can leave it in that state provided that you
	 * will do the same thing with all of its descendants".  This only
	 * applies to suspend transitions, however.
	 */
	spin_lock_irq(&dev->power.lock);
	dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND;
	spin_unlock_irq(&dev->power.lock);
	return 0;
1573
}
1574

1575
/**
1576 1577
 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
 * @state: PM transition of the system being carried out.
1578
 *
1579
 * Execute the ->prepare() callback(s) for all devices.
1580
 */
1581
int dpm_prepare(pm_message_t state)
1582 1583 1584
{
	int error = 0;

1585 1586
	might_sleep();

1587 1588 1589 1590 1591 1592 1593
	mutex_lock(&dpm_list_mtx);
	while (!list_empty(&dpm_list)) {
		struct device *dev = to_device(dpm_list.next);

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

1594
		error = device_prepare(dev, state);
1595 1596 1597 1598 1599

		mutex_lock(&dpm_list_mtx);
		if (error) {
			if (error == -EAGAIN) {
				put_device(dev);
S
Sebastian Ott 已提交
1600
				error = 0;
1601 1602
				continue;
			}
1603 1604
			printk(KERN_INFO "PM: Device %s not prepared "
				"for power transition: code %d\n",
1605
				dev_name(dev), error);
1606 1607 1608
			put_device(dev);
			break;
		}
1609
		dev->power.is_prepared = true;
1610
		if (!list_empty(&dev->power.entry))
1611
			list_move_tail(&dev->power.entry, &dpm_prepared_list);
1612 1613 1614
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
1615 1616 1617
	return error;
}

1618
/**
1619 1620
 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
 * @state: PM transition of the system being carried out.
1621
 *
1622 1623
 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
 * callbacks for them.
1624
 */
1625
int dpm_suspend_start(pm_message_t state)
1626 1627
{
	int error;
1628

1629
	error = dpm_prepare(state);
1630 1631 1632 1633
	if (error) {
		suspend_stats.failed_prepare++;
		dpm_save_failed_step(SUSPEND_PREPARE);
	} else
1634
		error = dpm_suspend(state);
1635 1636
	return error;
}
1637
EXPORT_SYMBOL_GPL(dpm_suspend_start);
1638 1639 1640

void __suspend_report_result(const char *function, void *fn, int ret)
{
1641 1642
	if (ret)
		printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1643 1644
}
EXPORT_SYMBOL_GPL(__suspend_report_result);
1645 1646 1647 1648 1649 1650

/**
 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
 * @dev: Device to wait for.
 * @subordinate: Device that needs to wait for @dev.
 */
1651
int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1652 1653
{
	dpm_wait(dev, subordinate->power.async_suspend);
1654
	return async_error;
1655 1656
}
EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678

/**
 * dpm_for_each_dev - device iterator.
 * @data: data for the callback.
 * @fn: function to be called for each device.
 *
 * Iterate over devices in dpm_list, and call @fn for each device,
 * passing it @data.
 */
void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
{
	struct device *dev;

	if (!fn)
		return;

	device_pm_lock();
	list_for_each_entry(dev, &dpm_list, power.entry)
		fn(dev, data);
	device_pm_unlock();
}
EXPORT_SYMBOL_GPL(dpm_for_each_dev);