main.c 41.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/*
 * drivers/base/power/main.c - Where the driver meets power management.
 *
 * Copyright (c) 2003 Patrick Mochel
 * Copyright (c) 2003 Open Source Development Lab
 *
 * This file is released under the GPLv2
 *
 *
 * The driver model core calls device_pm_add() when a device is registered.
11
 * This will initialize the embedded device_pm_info object in the device
L
Linus Torvalds 已提交
12 13 14
 * and add it to the list of power-controlled devices. sysfs entries for
 * controlling device power management will also be added.
 *
15 16 17
 * A separate list is used for keeping track of power info, because the power
 * domain dependencies may differ from the ancestral dependencies that the
 * subsystem list maintains.
L
Linus Torvalds 已提交
18 19 20
 */

#include <linux/device.h>
21
#include <linux/kallsyms.h>
22
#include <linux/export.h>
23
#include <linux/mutex.h>
24
#include <linux/pm.h>
25
#include <linux/pm_runtime.h>
26
#include <linux/resume-trace.h>
27
#include <linux/interrupt.h>
28
#include <linux/sched.h>
29
#include <linux/async.h>
30
#include <linux/suspend.h>
31
#include <trace/events/power.h>
32
#include <linux/cpufreq.h>
33
#include <linux/cpuidle.h>
34 35
#include <linux/timer.h>

36
#include "../base.h"
L
Linus Torvalds 已提交
37 38
#include "power.h"

39 40
typedef int (*pm_callback_t)(struct device *);

41
/*
42
 * The entries in the dpm_list list are in a depth first order, simply
43 44 45
 * because children are guaranteed to be discovered after parents, and
 * are inserted at the back of the list on discovery.
 *
46 47
 * Since device_pm_add() may be called with a device lock held,
 * we must never try to acquire a device lock while holding
48 49 50
 * dpm_list_mutex.
 */

51
LIST_HEAD(dpm_list);
52 53 54 55
static LIST_HEAD(dpm_prepared_list);
static LIST_HEAD(dpm_suspended_list);
static LIST_HEAD(dpm_late_early_list);
static LIST_HEAD(dpm_noirq_list);
L
Linus Torvalds 已提交
56

57
struct suspend_stats suspend_stats;
58
static DEFINE_MUTEX(dpm_list_mtx);
59
static pm_message_t pm_transition;
L
Linus Torvalds 已提交
60

61 62
static int async_error;

63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86
static char *pm_verb(int event)
{
	switch (event) {
	case PM_EVENT_SUSPEND:
		return "suspend";
	case PM_EVENT_RESUME:
		return "resume";
	case PM_EVENT_FREEZE:
		return "freeze";
	case PM_EVENT_QUIESCE:
		return "quiesce";
	case PM_EVENT_HIBERNATE:
		return "hibernate";
	case PM_EVENT_THAW:
		return "thaw";
	case PM_EVENT_RESTORE:
		return "restore";
	case PM_EVENT_RECOVER:
		return "recover";
	default:
		return "(unknown PM event)";
	}
}

87
/**
88
 * device_pm_sleep_init - Initialize system suspend-related device fields.
89 90
 * @dev: Device object being initialized.
 */
91
void device_pm_sleep_init(struct device *dev)
92
{
93
	dev->power.is_prepared = false;
94
	dev->power.is_suspended = false;
95 96
	dev->power.is_noirq_suspended = false;
	dev->power.is_late_suspended = false;
97
	init_completion(&dev->power.completion);
98
	complete_all(&dev->power.completion);
99
	dev->power.wakeup = NULL;
100
	INIT_LIST_HEAD(&dev->power.entry);
101 102
}

103
/**
104
 * device_pm_lock - Lock the list of active devices used by the PM core.
105 106 107 108 109 110 111
 */
void device_pm_lock(void)
{
	mutex_lock(&dpm_list_mtx);
}

/**
112
 * device_pm_unlock - Unlock the list of active devices used by the PM core.
113 114 115 116 117
 */
void device_pm_unlock(void)
{
	mutex_unlock(&dpm_list_mtx);
}
118

119
/**
120 121
 * device_pm_add - Add a device to the PM core's list of active devices.
 * @dev: Device to add to the list.
122
 */
123
void device_pm_add(struct device *dev)
L
Linus Torvalds 已提交
124 125
{
	pr_debug("PM: Adding info for %s:%s\n",
126
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
127
	mutex_lock(&dpm_list_mtx);
128
	if (dev->parent && dev->parent->power.is_prepared)
129 130
		dev_warn(dev, "parent %s should not be sleeping\n",
			dev_name(dev->parent));
131
	list_add_tail(&dev->power.entry, &dpm_list);
132
	mutex_unlock(&dpm_list_mtx);
L
Linus Torvalds 已提交
133 134
}

135
/**
136 137
 * device_pm_remove - Remove a device from the PM core's list of active devices.
 * @dev: Device to be removed from the list.
138
 */
139
void device_pm_remove(struct device *dev)
L
Linus Torvalds 已提交
140 141
{
	pr_debug("PM: Removing info for %s:%s\n",
142
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
143
	complete_all(&dev->power.completion);
144
	mutex_lock(&dpm_list_mtx);
L
Linus Torvalds 已提交
145
	list_del_init(&dev->power.entry);
146
	mutex_unlock(&dpm_list_mtx);
147
	device_wakeup_disable(dev);
148
	pm_runtime_remove(dev);
149 150
}

151
/**
152 153 154
 * device_pm_move_before - Move device in the PM core's list of active devices.
 * @deva: Device to move in dpm_list.
 * @devb: Device @deva should come before.
155 156 157 158
 */
void device_pm_move_before(struct device *deva, struct device *devb)
{
	pr_debug("PM: Moving %s:%s before %s:%s\n",
159 160
		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
161 162 163 164 165
	/* Delete deva from dpm_list and reinsert before devb. */
	list_move_tail(&deva->power.entry, &devb->power.entry);
}

/**
166 167 168
 * device_pm_move_after - Move device in the PM core's list of active devices.
 * @deva: Device to move in dpm_list.
 * @devb: Device @deva should come after.
169 170 171 172
 */
void device_pm_move_after(struct device *deva, struct device *devb)
{
	pr_debug("PM: Moving %s:%s after %s:%s\n",
173 174
		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
175 176 177 178 179
	/* Delete deva from dpm_list and reinsert after devb. */
	list_move(&deva->power.entry, &devb->power.entry);
}

/**
180 181
 * device_pm_move_last - Move device to end of the PM core's list of devices.
 * @dev: Device to move in dpm_list.
182 183 184 185
 */
void device_pm_move_last(struct device *dev)
{
	pr_debug("PM: Moving %s:%s to end of list\n",
186
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
187 188 189
	list_move_tail(&dev->power.entry, &dpm_list);
}

190 191 192 193
static ktime_t initcall_debug_start(struct device *dev)
{
	ktime_t calltime = ktime_set(0, 0);

194
	if (pm_print_times_enabled) {
195 196 197
		pr_info("calling  %s+ @ %i, parent: %s\n",
			dev_name(dev), task_pid_nr(current),
			dev->parent ? dev_name(dev->parent) : "none");
198 199 200 201 202 203 204
		calltime = ktime_get();
	}

	return calltime;
}

static void initcall_debug_report(struct device *dev, ktime_t calltime,
205
				  int error, pm_message_t state, char *info)
206
{
207 208 209 210 211
	ktime_t rettime;
	s64 nsecs;

	rettime = ktime_get();
	nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
212

213
	if (pm_print_times_enabled) {
214
		pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
215
			error, (unsigned long long)nsecs >> 10);
216
	}
217 218 219

	trace_device_pm_report_time(dev, info, nsecs, pm_verb(state.event),
				    error);
220 221
}

222 223 224 225 226 227 228 229 230 231
/**
 * dpm_wait - Wait for a PM operation to complete.
 * @dev: Device to wait for.
 * @async: If unset, wait only if the device's power.async_suspend flag is set.
 */
static void dpm_wait(struct device *dev, bool async)
{
	if (!dev)
		return;

232
	if (async || (pm_async_enabled && dev->power.async_suspend))
233 234 235 236 237 238 239 240 241 242 243 244 245 246
		wait_for_completion(&dev->power.completion);
}

static int dpm_wait_fn(struct device *dev, void *async_ptr)
{
	dpm_wait(dev, *((bool *)async_ptr));
	return 0;
}

static void dpm_wait_for_children(struct device *dev, bool async)
{
       device_for_each_child(dev, &async, dpm_wait_fn);
}

247
/**
248
 * pm_op - Return the PM operation appropriate for given PM event.
249 250
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
251
 */
252
static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
253 254 255 256
{
	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
257
		return ops->suspend;
258
	case PM_EVENT_RESUME:
259
		return ops->resume;
260
#endif /* CONFIG_SUSPEND */
261
#ifdef CONFIG_HIBERNATE_CALLBACKS
262 263
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
264
		return ops->freeze;
265
	case PM_EVENT_HIBERNATE:
266
		return ops->poweroff;
267 268
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
269
		return ops->thaw;
270 271
		break;
	case PM_EVENT_RESTORE:
272
		return ops->restore;
273
#endif /* CONFIG_HIBERNATE_CALLBACKS */
274
	}
275

276
	return NULL;
277 278
}

279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312
/**
 * pm_late_early_op - Return the PM operation appropriate for given PM event.
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
 *
 * Runtime PM is disabled for @dev while this function is being executed.
 */
static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
				      pm_message_t state)
{
	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
		return ops->suspend_late;
	case PM_EVENT_RESUME:
		return ops->resume_early;
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATE_CALLBACKS
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		return ops->freeze_late;
	case PM_EVENT_HIBERNATE:
		return ops->poweroff_late;
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
		return ops->thaw_early;
	case PM_EVENT_RESTORE:
		return ops->restore_early;
#endif /* CONFIG_HIBERNATE_CALLBACKS */
	}

	return NULL;
}

313
/**
314
 * pm_noirq_op - Return the PM operation appropriate for given PM event.
315 316
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
317
 *
318 319
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
320
 */
321
static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
322 323 324 325
{
	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
326
		return ops->suspend_noirq;
327
	case PM_EVENT_RESUME:
328
		return ops->resume_noirq;
329
#endif /* CONFIG_SUSPEND */
330
#ifdef CONFIG_HIBERNATE_CALLBACKS
331 332
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
333
		return ops->freeze_noirq;
334
	case PM_EVENT_HIBERNATE:
335
		return ops->poweroff_noirq;
336 337
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
338
		return ops->thaw_noirq;
339
	case PM_EVENT_RESTORE:
340
		return ops->restore_noirq;
341
#endif /* CONFIG_HIBERNATE_CALLBACKS */
342
	}
343

344
	return NULL;
345 346 347 348 349 350 351 352 353 354 355 356 357
}

static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
{
	dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
		", may wakeup" : "");
}

static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
			int error)
{
	printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
358
		dev_name(dev), pm_verb(state.event), info, error);
359 360
}

361 362 363
static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
{
	ktime_t calltime;
364
	u64 usecs64;
365 366 367 368 369 370 371 372 373 374 375 376 377
	int usecs;

	calltime = ktime_get();
	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
	do_div(usecs64, NSEC_PER_USEC);
	usecs = usecs64;
	if (usecs == 0)
		usecs = 1;
	pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
		info ?: "", info ? " " : "", pm_verb(state.event),
		usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
}

378 379 380 381 382 383 384 385 386 387 388 389 390 391 392
static int dpm_run_callback(pm_callback_t cb, struct device *dev,
			    pm_message_t state, char *info)
{
	ktime_t calltime;
	int error;

	if (!cb)
		return 0;

	calltime = initcall_debug_start(dev);

	pm_dev_dbg(dev, state, info);
	error = cb(dev);
	suspend_report_result(cb, error);

393
	initcall_debug_report(dev, calltime, error, state, info);
394 395 396 397

	return error;
}

398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462
#ifdef CONFIG_DPM_WATCHDOG
struct dpm_watchdog {
	struct device		*dev;
	struct task_struct	*tsk;
	struct timer_list	timer;
};

#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
	struct dpm_watchdog wd

/**
 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
 * @data: Watchdog object address.
 *
 * Called when a driver has timed out suspending or resuming.
 * There's not much we can do here to recover so panic() to
 * capture a crash-dump in pstore.
 */
static void dpm_watchdog_handler(unsigned long data)
{
	struct dpm_watchdog *wd = (void *)data;

	dev_emerg(wd->dev, "**** DPM device timeout ****\n");
	show_stack(wd->tsk, NULL);
	panic("%s %s: unrecoverable failure\n",
		dev_driver_string(wd->dev), dev_name(wd->dev));
}

/**
 * dpm_watchdog_set - Enable pm watchdog for given device.
 * @wd: Watchdog. Must be allocated on the stack.
 * @dev: Device to handle.
 */
static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
{
	struct timer_list *timer = &wd->timer;

	wd->dev = dev;
	wd->tsk = current;

	init_timer_on_stack(timer);
	/* use same timeout value for both suspend and resume */
	timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
	timer->function = dpm_watchdog_handler;
	timer->data = (unsigned long)wd;
	add_timer(timer);
}

/**
 * dpm_watchdog_clear - Disable suspend/resume watchdog.
 * @wd: Watchdog to disable.
 */
static void dpm_watchdog_clear(struct dpm_watchdog *wd)
{
	struct timer_list *timer = &wd->timer;

	del_timer_sync(timer);
	destroy_timer_on_stack(timer);
}
#else
#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
#define dpm_watchdog_set(x, y)
#define dpm_watchdog_clear(x)
#endif

463 464 465
/*------------------------- Resume routines -------------------------*/

/**
466 467 468
 * device_resume_noirq - Execute an "early resume" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
469
 *
470 471
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
472
 */
473
static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
474
{
475 476
	pm_callback_t callback = NULL;
	char *info = NULL;
477 478 479 480 481
	int error = 0;

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);

482
	if (dev->power.syscore || dev->power.direct_complete)
483 484
		goto Out;

485 486 487
	if (!dev->power.is_noirq_suspended)
		goto Out;

488 489
	dpm_wait(dev->parent, async);

490
	if (dev->pm_domain) {
491
		info = "noirq power domain ";
492
		callback = pm_noirq_op(&dev->pm_domain->ops, state);
493
	} else if (dev->type && dev->type->pm) {
494
		info = "noirq type ";
495
		callback = pm_noirq_op(dev->type->pm, state);
496
	} else if (dev->class && dev->class->pm) {
497
		info = "noirq class ";
498
		callback = pm_noirq_op(dev->class->pm, state);
499
	} else if (dev->bus && dev->bus->pm) {
500
		info = "noirq bus ";
501
		callback = pm_noirq_op(dev->bus->pm, state);
502 503
	}

504
	if (!callback && dev->driver && dev->driver->pm) {
505
		info = "noirq driver ";
506 507 508
		callback = pm_noirq_op(dev->driver->pm, state);
	}

509
	error = dpm_run_callback(callback, dev, state, info);
510
	dev->power.is_noirq_suspended = false;
511

512
 Out:
513
	complete_all(&dev->power.completion);
514 515 516 517
	TRACE_RESUME(error);
	return error;
}

518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535
static bool is_async(struct device *dev)
{
	return dev->power.async_suspend && pm_async_enabled
		&& !pm_trace_is_enabled();
}

static void async_resume_noirq(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = device_resume_noirq(dev, pm_transition, true);
	if (error)
		pm_dev_err(dev, pm_transition, " async", error);

	put_device(dev);
}

536
/**
537
 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
538
 * @state: PM transition of the system being carried out.
539
 *
540
 * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
541
 * enable device drivers to receive interrupts.
542
 */
543
static void dpm_resume_noirq(pm_message_t state)
544
{
545
	struct device *dev;
546
	ktime_t starttime = ktime_get();
547

548
	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
549
	mutex_lock(&dpm_list_mtx);
550
	pm_transition = state;
551

552 553 554 555 556 557 558 559 560 561 562 563 564 565 566
	/*
	 * Advanced the async threads upfront,
	 * in case the starting of async threads is
	 * delayed by non-async resuming devices.
	 */
	list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
		reinit_completion(&dev->power.completion);
		if (is_async(dev)) {
			get_device(dev);
			async_schedule(async_resume_noirq, dev);
		}
	}

	while (!list_empty(&dpm_noirq_list)) {
		dev = to_device(dpm_noirq_list.next);
567
		get_device(dev);
568
		list_move_tail(&dev->power.entry, &dpm_late_early_list);
569
		mutex_unlock(&dpm_list_mtx);
570

571 572 573 574 575 576 577 578 579 580
		if (!is_async(dev)) {
			int error;

			error = device_resume_noirq(dev, state, false);
			if (error) {
				suspend_stats.failed_resume_noirq++;
				dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
				dpm_save_failed_dev(dev_name(dev));
				pm_dev_err(dev, state, " noirq", error);
			}
581 582 583 584 585 586
		}

		mutex_lock(&dpm_list_mtx);
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
587
	async_synchronize_full();
588 589
	dpm_show_time(starttime, state, "noirq");
	resume_device_irqs();
590
	cpuidle_resume();
591
	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
592 593 594 595 596 597 598 599 600
}

/**
 * device_resume_early - Execute an "early resume" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
 *
 * Runtime PM is disabled for @dev while this function is being executed.
 */
601
static int device_resume_early(struct device *dev, pm_message_t state, bool async)
602 603 604 605 606 607 608 609
{
	pm_callback_t callback = NULL;
	char *info = NULL;
	int error = 0;

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);

610
	if (dev->power.syscore || dev->power.direct_complete)
611 612
		goto Out;

613 614 615
	if (!dev->power.is_late_suspended)
		goto Out;

616 617
	dpm_wait(dev->parent, async);

618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637
	if (dev->pm_domain) {
		info = "early power domain ";
		callback = pm_late_early_op(&dev->pm_domain->ops, state);
	} else if (dev->type && dev->type->pm) {
		info = "early type ";
		callback = pm_late_early_op(dev->type->pm, state);
	} else if (dev->class && dev->class->pm) {
		info = "early class ";
		callback = pm_late_early_op(dev->class->pm, state);
	} else if (dev->bus && dev->bus->pm) {
		info = "early bus ";
		callback = pm_late_early_op(dev->bus->pm, state);
	}

	if (!callback && dev->driver && dev->driver->pm) {
		info = "early driver ";
		callback = pm_late_early_op(dev->driver->pm, state);
	}

	error = dpm_run_callback(callback, dev, state, info);
638
	dev->power.is_late_suspended = false;
639

640
 Out:
641
	TRACE_RESUME(error);
642 643

	pm_runtime_enable(dev);
644
	complete_all(&dev->power.completion);
645 646 647
	return error;
}

648 649 650 651 652 653 654 655 656 657 658 659
static void async_resume_early(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = device_resume_early(dev, pm_transition, true);
	if (error)
		pm_dev_err(dev, pm_transition, " async", error);

	put_device(dev);
}

660 661 662 663 664 665
/**
 * dpm_resume_early - Execute "early resume" callbacks for all devices.
 * @state: PM transition of the system being carried out.
 */
static void dpm_resume_early(pm_message_t state)
{
666
	struct device *dev;
667 668
	ktime_t starttime = ktime_get();

669
	trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
670
	mutex_lock(&dpm_list_mtx);
671 672 673 674 675 676 677 678 679 680 681 682 683 684
	pm_transition = state;

	/*
	 * Advanced the async threads upfront,
	 * in case the starting of async threads is
	 * delayed by non-async resuming devices.
	 */
	list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
		reinit_completion(&dev->power.completion);
		if (is_async(dev)) {
			get_device(dev);
			async_schedule(async_resume_early, dev);
		}
	}
685

686 687
	while (!list_empty(&dpm_late_early_list)) {
		dev = to_device(dpm_late_early_list.next);
688 689 690 691
		get_device(dev);
		list_move_tail(&dev->power.entry, &dpm_suspended_list);
		mutex_unlock(&dpm_list_mtx);

692 693
		if (!is_async(dev)) {
			int error;
694

695 696 697 698 699 700 701 702
			error = device_resume_early(dev, state, false);
			if (error) {
				suspend_stats.failed_resume_early++;
				dpm_save_failed_step(SUSPEND_RESUME_EARLY);
				dpm_save_failed_dev(dev_name(dev));
				pm_dev_err(dev, state, " early", error);
			}
		}
703
		mutex_lock(&dpm_list_mtx);
704 705
		put_device(dev);
	}
706
	mutex_unlock(&dpm_list_mtx);
707
	async_synchronize_full();
708
	dpm_show_time(starttime, state, "early");
709
	trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
710
}
711 712 713 714 715 716 717 718 719 720 721

/**
 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
 * @state: PM transition of the system being carried out.
 */
void dpm_resume_start(pm_message_t state)
{
	dpm_resume_noirq(state);
	dpm_resume_early(state);
}
EXPORT_SYMBOL_GPL(dpm_resume_start);
722 723

/**
724
 * device_resume - Execute "resume" callbacks for given device.
725 726
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
727
 * @async: If true, the device is being resumed asynchronously.
728
 */
729
static int device_resume(struct device *dev, pm_message_t state, bool async)
730
{
731 732
	pm_callback_t callback = NULL;
	char *info = NULL;
733
	int error = 0;
734
	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
735 736 737

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);
738

739 740 741
	if (dev->power.syscore)
		goto Complete;

742 743 744 745 746 747
	if (dev->power.direct_complete) {
		/* Match the pm_runtime_disable() in __device_suspend(). */
		pm_runtime_enable(dev);
		goto Complete;
	}

748
	dpm_wait(dev->parent, async);
749
	dpm_watchdog_set(&wd, dev);
750
	device_lock(dev);
751

752 753 754 755 756
	/*
	 * This is a fib.  But we'll allow new children to be added below
	 * a resumed device, even if the device hasn't been completed yet.
	 */
	dev->power.is_prepared = false;
757

758 759 760
	if (!dev->power.is_suspended)
		goto Unlock;

761
	if (dev->pm_domain) {
762 763
		info = "power domain ";
		callback = pm_op(&dev->pm_domain->ops, state);
764
		goto Driver;
765 766
	}

767
	if (dev->type && dev->type->pm) {
768 769
		info = "type ";
		callback = pm_op(dev->type->pm, state);
770
		goto Driver;
771 772
	}

773 774
	if (dev->class) {
		if (dev->class->pm) {
775 776
			info = "class ";
			callback = pm_op(dev->class->pm, state);
777
			goto Driver;
778
		} else if (dev->class->resume) {
779 780
			info = "legacy class ";
			callback = dev->class->resume;
781
			goto End;
782
		}
783
	}
784 785 786

	if (dev->bus) {
		if (dev->bus->pm) {
787
			info = "bus ";
788
			callback = pm_op(dev->bus->pm, state);
789
		} else if (dev->bus->resume) {
790
			info = "legacy bus ";
791
			callback = dev->bus->resume;
792
			goto End;
793 794 795
		}
	}

796 797 798 799 800 801
 Driver:
	if (!callback && dev->driver && dev->driver->pm) {
		info = "driver ";
		callback = pm_op(dev->driver->pm, state);
	}

802
 End:
803
	error = dpm_run_callback(callback, dev, state, info);
804 805 806
	dev->power.is_suspended = false;

 Unlock:
807
	device_unlock(dev);
808
	dpm_watchdog_clear(&wd);
809 810

 Complete:
811
	complete_all(&dev->power.completion);
812

813
	TRACE_RESUME(error);
814

815 816 817
	return error;
}

818 819 820 821 822
static void async_resume(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

823
	error = device_resume(dev, pm_transition, true);
824 825 826 827 828
	if (error)
		pm_dev_err(dev, pm_transition, " async", error);
	put_device(dev);
}

829
/**
830 831
 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
 * @state: PM transition of the system being carried out.
832
 *
833 834
 * Execute the appropriate "resume" callback for all devices whose status
 * indicates that they are suspended.
835
 */
836
void dpm_resume(pm_message_t state)
837
{
838
	struct device *dev;
839
	ktime_t starttime = ktime_get();
840

841
	trace_suspend_resume(TPS("dpm_resume"), state.event, true);
842 843
	might_sleep();

844
	mutex_lock(&dpm_list_mtx);
845
	pm_transition = state;
846
	async_error = 0;
847

848
	list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
849
		reinit_completion(&dev->power.completion);
850 851 852 853 854 855
		if (is_async(dev)) {
			get_device(dev);
			async_schedule(async_resume, dev);
		}
	}

856 857
	while (!list_empty(&dpm_suspended_list)) {
		dev = to_device(dpm_suspended_list.next);
858
		get_device(dev);
859
		if (!is_async(dev)) {
860 861 862 863
			int error;

			mutex_unlock(&dpm_list_mtx);

864
			error = device_resume(dev, state, false);
865 866 867 868
			if (error) {
				suspend_stats.failed_resume++;
				dpm_save_failed_step(SUSPEND_RESUME);
				dpm_save_failed_dev(dev_name(dev));
869
				pm_dev_err(dev, state, "", error);
870
			}
871 872

			mutex_lock(&dpm_list_mtx);
873 874
		}
		if (!list_empty(&dev->power.entry))
875
			list_move_tail(&dev->power.entry, &dpm_prepared_list);
876 877 878
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
879
	async_synchronize_full();
880
	dpm_show_time(starttime, state, NULL);
881 882

	cpufreq_resume();
883
	trace_suspend_resume(TPS("dpm_resume"), state.event, false);
884 885 886
}

/**
887 888 889
 * device_complete - Complete a PM transition for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
890
 */
891
static void device_complete(struct device *dev, pm_message_t state)
892
{
893 894 895
	void (*callback)(struct device *) = NULL;
	char *info = NULL;

896 897 898
	if (dev->power.syscore)
		return;

899
	device_lock(dev);
900

901
	if (dev->pm_domain) {
902 903
		info = "completing power domain ";
		callback = dev->pm_domain->ops.complete;
904
	} else if (dev->type && dev->type->pm) {
905 906
		info = "completing type ";
		callback = dev->type->pm->complete;
907
	} else if (dev->class && dev->class->pm) {
908 909
		info = "completing class ";
		callback = dev->class->pm->complete;
910
	} else if (dev->bus && dev->bus->pm) {
911 912 913 914 915 916 917 918 919 920 921 922
		info = "completing bus ";
		callback = dev->bus->pm->complete;
	}

	if (!callback && dev->driver && dev->driver->pm) {
		info = "completing driver ";
		callback = dev->driver->pm->complete;
	}

	if (callback) {
		pm_dev_dbg(dev, state, info);
		callback(dev);
923 924
	}

925
	device_unlock(dev);
926

927
	pm_runtime_put(dev);
928 929 930
}

/**
931 932
 * dpm_complete - Complete a PM transition for all non-sysdev devices.
 * @state: PM transition of the system being carried out.
933
 *
934 935
 * Execute the ->complete() callbacks for all devices whose PM status is not
 * DPM_ON (this allows new devices to be registered).
936
 */
937
void dpm_complete(pm_message_t state)
938
{
939 940
	struct list_head list;

941
	trace_suspend_resume(TPS("dpm_complete"), state.event, true);
942 943
	might_sleep();

944
	INIT_LIST_HEAD(&list);
945
	mutex_lock(&dpm_list_mtx);
946 947
	while (!list_empty(&dpm_prepared_list)) {
		struct device *dev = to_device(dpm_prepared_list.prev);
948

949
		get_device(dev);
950
		dev->power.is_prepared = false;
951 952
		list_move(&dev->power.entry, &list);
		mutex_unlock(&dpm_list_mtx);
953

954
		device_complete(dev, state);
955

956
		mutex_lock(&dpm_list_mtx);
957
		put_device(dev);
958
	}
959
	list_splice(&list, &dpm_list);
960
	mutex_unlock(&dpm_list_mtx);
961
	trace_suspend_resume(TPS("dpm_complete"), state.event, false);
962 963 964
}

/**
965 966
 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
 * @state: PM transition of the system being carried out.
967
 *
968 969
 * Execute "resume" callbacks for all devices and complete the PM transition of
 * the system.
970
 */
971
void dpm_resume_end(pm_message_t state)
972
{
973 974
	dpm_resume(state);
	dpm_complete(state);
975
}
976
EXPORT_SYMBOL_GPL(dpm_resume_end);
977 978 979 980


/*------------------------- Suspend routines -------------------------*/

981
/**
982 983 984 985 986
 * resume_event - Return a "resume" message for given "suspend" sleep state.
 * @sleep_state: PM message representing a sleep state.
 *
 * Return a PM message representing the resume event corresponding to given
 * sleep state.
987 988
 */
static pm_message_t resume_event(pm_message_t sleep_state)
989
{
990 991 992 993 994 995 996 997
	switch (sleep_state.event) {
	case PM_EVENT_SUSPEND:
		return PMSG_RESUME;
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		return PMSG_RECOVER;
	case PM_EVENT_HIBERNATE:
		return PMSG_RESTORE;
998
	}
999
	return PMSG_ON;
1000 1001 1002
}

/**
1003 1004 1005
 * device_suspend_noirq - Execute a "late suspend" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
1006
 *
1007 1008
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
1009
 */
1010
static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1011
{
1012 1013
	pm_callback_t callback = NULL;
	char *info = NULL;
1014 1015 1016 1017 1018 1019 1020 1021 1022
	int error = 0;

	if (async_error)
		goto Complete;

	if (pm_wakeup_pending()) {
		async_error = -EBUSY;
		goto Complete;
	}
1023

1024
	if (dev->power.syscore || dev->power.direct_complete)
1025 1026 1027
		goto Complete;

	dpm_wait_for_children(dev, async);
1028

1029
	if (dev->pm_domain) {
1030
		info = "noirq power domain ";
1031
		callback = pm_noirq_op(&dev->pm_domain->ops, state);
1032
	} else if (dev->type && dev->type->pm) {
1033
		info = "noirq type ";
1034
		callback = pm_noirq_op(dev->type->pm, state);
1035
	} else if (dev->class && dev->class->pm) {
1036
		info = "noirq class ";
1037
		callback = pm_noirq_op(dev->class->pm, state);
1038
	} else if (dev->bus && dev->bus->pm) {
1039
		info = "noirq bus ";
1040
		callback = pm_noirq_op(dev->bus->pm, state);
1041 1042
	}

1043
	if (!callback && dev->driver && dev->driver->pm) {
1044
		info = "noirq driver ";
1045 1046 1047
		callback = pm_noirq_op(dev->driver->pm, state);
	}

1048 1049 1050
	error = dpm_run_callback(callback, dev, state, info);
	if (!error)
		dev->power.is_noirq_suspended = true;
1051 1052
	else
		async_error = error;
1053

1054 1055
Complete:
	complete_all(&dev->power.completion);
1056
	return error;
1057 1058
}

1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084
static void async_suspend_noirq(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = __device_suspend_noirq(dev, pm_transition, true);
	if (error) {
		dpm_save_failed_dev(dev_name(dev));
		pm_dev_err(dev, pm_transition, " async", error);
	}

	put_device(dev);
}

static int device_suspend_noirq(struct device *dev)
{
	reinit_completion(&dev->power.completion);

	if (pm_async_enabled && dev->power.async_suspend) {
		get_device(dev);
		async_schedule(async_suspend_noirq, dev);
		return 0;
	}
	return __device_suspend_noirq(dev, pm_transition, false);
}

1085
/**
1086
 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1087
 * @state: PM transition of the system being carried out.
1088
 *
1089 1090
 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
 * handlers for all non-sysdev devices.
1091
 */
1092
static int dpm_suspend_noirq(pm_message_t state)
1093
{
1094
	ktime_t starttime = ktime_get();
1095 1096
	int error = 0;

1097
	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1098
	cpuidle_pause();
1099
	suspend_device_irqs();
1100
	mutex_lock(&dpm_list_mtx);
1101 1102 1103
	pm_transition = state;
	async_error = 0;

1104 1105
	while (!list_empty(&dpm_late_early_list)) {
		struct device *dev = to_device(dpm_late_early_list.prev);
1106 1107 1108 1109

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

1110
		error = device_suspend_noirq(dev);
1111 1112

		mutex_lock(&dpm_list_mtx);
1113
		if (error) {
1114
			pm_dev_err(dev, state, " noirq", error);
1115
			dpm_save_failed_dev(dev_name(dev));
1116
			put_device(dev);
1117 1118
			break;
		}
1119
		if (!list_empty(&dev->power.entry))
1120
			list_move(&dev->power.entry, &dpm_noirq_list);
1121
		put_device(dev);
1122

1123
		if (async_error)
1124
			break;
1125
	}
1126
	mutex_unlock(&dpm_list_mtx);
1127 1128 1129 1130 1131 1132 1133
	async_synchronize_full();
	if (!error)
		error = async_error;

	if (error) {
		suspend_stats.failed_suspend_noirq++;
		dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1134
		dpm_resume_noirq(resume_event(state));
1135
	} else {
1136
		dpm_show_time(starttime, state, "noirq");
1137
	}
1138
	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1139 1140 1141 1142 1143 1144 1145 1146 1147 1148
	return error;
}

/**
 * device_suspend_late - Execute a "late suspend" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
 *
 * Runtime PM is disabled for @dev while this function is being executed.
 */
1149
static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1150 1151 1152
{
	pm_callback_t callback = NULL;
	char *info = NULL;
1153
	int error = 0;
1154

1155 1156
	__pm_runtime_disable(dev, false);

1157 1158 1159 1160 1161 1162 1163 1164
	if (async_error)
		goto Complete;

	if (pm_wakeup_pending()) {
		async_error = -EBUSY;
		goto Complete;
	}

1165
	if (dev->power.syscore || dev->power.direct_complete)
1166 1167 1168
		goto Complete;

	dpm_wait_for_children(dev, async);
1169

1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188
	if (dev->pm_domain) {
		info = "late power domain ";
		callback = pm_late_early_op(&dev->pm_domain->ops, state);
	} else if (dev->type && dev->type->pm) {
		info = "late type ";
		callback = pm_late_early_op(dev->type->pm, state);
	} else if (dev->class && dev->class->pm) {
		info = "late class ";
		callback = pm_late_early_op(dev->class->pm, state);
	} else if (dev->bus && dev->bus->pm) {
		info = "late bus ";
		callback = pm_late_early_op(dev->bus->pm, state);
	}

	if (!callback && dev->driver && dev->driver->pm) {
		info = "late driver ";
		callback = pm_late_early_op(dev->driver->pm, state);
	}

1189 1190 1191
	error = dpm_run_callback(callback, dev, state, info);
	if (!error)
		dev->power.is_late_suspended = true;
1192 1193
	else
		async_error = error;
1194

1195 1196
Complete:
	complete_all(&dev->power.completion);
1197
	return error;
1198 1199
}

1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225
static void async_suspend_late(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = __device_suspend_late(dev, pm_transition, true);
	if (error) {
		dpm_save_failed_dev(dev_name(dev));
		pm_dev_err(dev, pm_transition, " async", error);
	}
	put_device(dev);
}

static int device_suspend_late(struct device *dev)
{
	reinit_completion(&dev->power.completion);

	if (pm_async_enabled && dev->power.async_suspend) {
		get_device(dev);
		async_schedule(async_suspend_late, dev);
		return 0;
	}

	return __device_suspend_late(dev, pm_transition, false);
}

1226 1227 1228 1229 1230 1231 1232 1233 1234
/**
 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
 * @state: PM transition of the system being carried out.
 */
static int dpm_suspend_late(pm_message_t state)
{
	ktime_t starttime = ktime_get();
	int error = 0;

1235
	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1236
	mutex_lock(&dpm_list_mtx);
1237 1238 1239
	pm_transition = state;
	async_error = 0;

1240 1241 1242 1243 1244 1245
	while (!list_empty(&dpm_suspended_list)) {
		struct device *dev = to_device(dpm_suspended_list.prev);

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

1246
		error = device_suspend_late(dev);
1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257

		mutex_lock(&dpm_list_mtx);
		if (error) {
			pm_dev_err(dev, state, " late", error);
			dpm_save_failed_dev(dev_name(dev));
			put_device(dev);
			break;
		}
		if (!list_empty(&dev->power.entry))
			list_move(&dev->power.entry, &dpm_late_early_list);
		put_device(dev);
1258

1259
		if (async_error)
1260
			break;
1261 1262
	}
	mutex_unlock(&dpm_list_mtx);
1263 1264 1265 1266
	async_synchronize_full();
	if (error) {
		suspend_stats.failed_suspend_late++;
		dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1267
		dpm_resume_early(resume_event(state));
1268
	} else {
1269
		dpm_show_time(starttime, state, "late");
1270
	}
1271
	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1272 1273
	return error;
}
1274 1275 1276 1277 1278 1279 1280 1281

/**
 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
 * @state: PM transition of the system being carried out.
 */
int dpm_suspend_end(pm_message_t state)
{
	int error = dpm_suspend_late(state);
1282 1283 1284 1285 1286
	if (error)
		return error;

	error = dpm_suspend_noirq(state);
	if (error) {
1287
		dpm_resume_early(resume_event(state));
1288 1289
		return error;
	}
1290

1291
	return 0;
1292 1293
}
EXPORT_SYMBOL_GPL(dpm_suspend_end);
1294

1295 1296
/**
 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
R
Randy Dunlap 已提交
1297 1298 1299
 * @dev: Device to suspend.
 * @state: PM transition of the system being carried out.
 * @cb: Suspend callback to execute.
1300 1301
 */
static int legacy_suspend(struct device *dev, pm_message_t state,
1302 1303
			  int (*cb)(struct device *dev, pm_message_t state),
			  char *info)
1304 1305 1306 1307 1308 1309 1310 1311 1312
{
	int error;
	ktime_t calltime;

	calltime = initcall_debug_start(dev);

	error = cb(dev, state);
	suspend_report_result(cb, error);

1313
	initcall_debug_report(dev, calltime, error, state, info);
1314 1315 1316 1317

	return error;
}

1318
/**
1319 1320 1321
 * device_suspend - Execute "suspend" callbacks for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
1322
 * @async: If true, the device is being suspended asynchronously.
1323
 */
1324
static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1325
{
1326 1327
	pm_callback_t callback = NULL;
	char *info = NULL;
1328
	int error = 0;
1329
	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1330

1331
	dpm_wait_for_children(dev, async);
1332

1333
	if (async_error)
1334
		goto Complete;
1335

1336 1337 1338 1339 1340 1341
	/*
	 * If a device configured to wake up the system from sleep states
	 * has been suspended at run time and there's a resume request pending
	 * for it, this is equivalent to the device signaling wakeup, so the
	 * system suspend operation should be aborted.
	 */
1342 1343
	if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
		pm_wakeup_event(dev, 0);
1344

1345 1346
	if (pm_wakeup_pending()) {
		async_error = -EBUSY;
1347
		goto Complete;
1348 1349
	}

1350 1351 1352
	if (dev->power.syscore)
		goto Complete;

1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363
	if (dev->power.direct_complete) {
		if (pm_runtime_status_suspended(dev)) {
			pm_runtime_disable(dev);
			if (pm_runtime_suspended_if_enabled(dev))
				goto Complete;

			pm_runtime_enable(dev);
		}
		dev->power.direct_complete = false;
	}

1364
	dpm_watchdog_set(&wd, dev);
1365 1366
	device_lock(dev);

1367
	if (dev->pm_domain) {
1368 1369 1370
		info = "power domain ";
		callback = pm_op(&dev->pm_domain->ops, state);
		goto Run;
1371 1372
	}

1373
	if (dev->type && dev->type->pm) {
1374 1375 1376
		info = "type ";
		callback = pm_op(dev->type->pm, state);
		goto Run;
1377 1378
	}

1379 1380
	if (dev->class) {
		if (dev->class->pm) {
1381 1382 1383
			info = "class ";
			callback = pm_op(dev->class->pm, state);
			goto Run;
1384 1385
		} else if (dev->class->suspend) {
			pm_dev_dbg(dev, state, "legacy class ");
1386 1387
			error = legacy_suspend(dev, state, dev->class->suspend,
						"legacy class ");
1388
			goto End;
1389
		}
1390 1391
	}

1392 1393
	if (dev->bus) {
		if (dev->bus->pm) {
1394
			info = "bus ";
1395
			callback = pm_op(dev->bus->pm, state);
1396
		} else if (dev->bus->suspend) {
1397
			pm_dev_dbg(dev, state, "legacy bus ");
1398 1399
			error = legacy_suspend(dev, state, dev->bus->suspend,
						"legacy bus ");
1400
			goto End;
1401
		}
1402 1403
	}

1404
 Run:
1405 1406 1407 1408 1409
	if (!callback && dev->driver && dev->driver->pm) {
		info = "driver ";
		callback = pm_op(dev->driver->pm, state);
	}

1410 1411
	error = dpm_run_callback(callback, dev, state, info);

1412
 End:
1413
	if (!error) {
1414 1415
		struct device *parent = dev->parent;

1416
		dev->power.is_suspended = true;
1417 1418 1419 1420 1421 1422 1423 1424 1425 1426
		if (parent) {
			spin_lock_irq(&parent->power.lock);

			dev->parent->power.direct_complete = false;
			if (dev->power.wakeup_path
			    && !dev->parent->power.ignore_children)
				dev->parent->power.wakeup_path = true;

			spin_unlock_irq(&parent->power.lock);
		}
1427
	}
1428

1429
	device_unlock(dev);
1430
	dpm_watchdog_clear(&wd);
1431 1432

 Complete:
1433
	complete_all(&dev->power.completion);
1434
	if (error)
1435 1436
		async_error = error;

1437 1438 1439
	return error;
}

1440 1441 1442 1443 1444 1445
static void async_suspend(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = __device_suspend(dev, pm_transition, true);
1446 1447
	if (error) {
		dpm_save_failed_dev(dev_name(dev));
1448
		pm_dev_err(dev, pm_transition, " async", error);
1449
	}
1450 1451 1452 1453 1454 1455

	put_device(dev);
}

static int device_suspend(struct device *dev)
{
1456
	reinit_completion(&dev->power.completion);
1457

1458
	if (pm_async_enabled && dev->power.async_suspend) {
1459 1460 1461 1462 1463 1464 1465 1466
		get_device(dev);
		async_schedule(async_suspend, dev);
		return 0;
	}

	return __device_suspend(dev, pm_transition, false);
}

1467
/**
1468 1469
 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
 * @state: PM transition of the system being carried out.
1470
 */
1471
int dpm_suspend(pm_message_t state)
1472
{
1473
	ktime_t starttime = ktime_get();
1474 1475
	int error = 0;

1476
	trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1477 1478
	might_sleep();

1479 1480
	cpufreq_suspend();

1481
	mutex_lock(&dpm_list_mtx);
1482 1483
	pm_transition = state;
	async_error = 0;
1484 1485
	while (!list_empty(&dpm_prepared_list)) {
		struct device *dev = to_device(dpm_prepared_list.prev);
1486

1487
		get_device(dev);
1488
		mutex_unlock(&dpm_list_mtx);
1489

1490
		error = device_suspend(dev);
1491

1492
		mutex_lock(&dpm_list_mtx);
1493
		if (error) {
1494
			pm_dev_err(dev, state, "", error);
1495
			dpm_save_failed_dev(dev_name(dev));
1496
			put_device(dev);
1497 1498
			break;
		}
1499
		if (!list_empty(&dev->power.entry))
1500
			list_move(&dev->power.entry, &dpm_suspended_list);
1501
		put_device(dev);
1502 1503
		if (async_error)
			break;
1504 1505
	}
	mutex_unlock(&dpm_list_mtx);
1506 1507 1508
	async_synchronize_full();
	if (!error)
		error = async_error;
1509 1510 1511 1512
	if (error) {
		suspend_stats.failed_suspend++;
		dpm_save_failed_step(SUSPEND_SUSPEND);
	} else
1513
		dpm_show_time(starttime, state, NULL);
1514
	trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1515 1516 1517 1518
	return error;
}

/**
1519 1520 1521 1522 1523 1524
 * device_prepare - Prepare a device for system power transition.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
 *
 * Execute the ->prepare() callback(s) for given device.  No new children of the
 * device may be registered after this function has returned.
1525
 */
1526
static int device_prepare(struct device *dev, pm_message_t state)
1527
{
1528 1529
	int (*callback)(struct device *) = NULL;
	char *info = NULL;
1530
	int ret = 0;
1531

1532 1533 1534
	if (dev->power.syscore)
		return 0;

1535 1536 1537 1538 1539 1540 1541 1542
	/*
	 * If a device's parent goes into runtime suspend at the wrong time,
	 * it won't be possible to resume the device.  To prevent this we
	 * block runtime suspend here, during the prepare phase, and allow
	 * it again during the complete phase.
	 */
	pm_runtime_get_noresume(dev);

1543
	device_lock(dev);
1544

1545 1546
	dev->power.wakeup_path = device_may_wakeup(dev);

1547
	if (dev->pm_domain) {
1548 1549
		info = "preparing power domain ";
		callback = dev->pm_domain->ops.prepare;
1550
	} else if (dev->type && dev->type->pm) {
1551 1552
		info = "preparing type ";
		callback = dev->type->pm->prepare;
1553
	} else if (dev->class && dev->class->pm) {
1554 1555
		info = "preparing class ";
		callback = dev->class->pm->prepare;
1556
	} else if (dev->bus && dev->bus->pm) {
1557 1558 1559 1560 1561 1562 1563 1564 1565
		info = "preparing bus ";
		callback = dev->bus->pm->prepare;
	}

	if (!callback && dev->driver && dev->driver->pm) {
		info = "preparing driver ";
		callback = dev->driver->pm->prepare;
	}

1566 1567
	if (callback)
		ret = callback(dev);
1568

1569
	device_unlock(dev);
1570

1571 1572
	if (ret < 0) {
		suspend_report_result(callback, ret);
1573
		pm_runtime_put(dev);
1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586
		return ret;
	}
	/*
	 * A positive return value from ->prepare() means "this device appears
	 * to be runtime-suspended and its state is fine, so if it really is
	 * runtime-suspended, you can leave it in that state provided that you
	 * will do the same thing with all of its descendants".  This only
	 * applies to suspend transitions, however.
	 */
	spin_lock_irq(&dev->power.lock);
	dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND;
	spin_unlock_irq(&dev->power.lock);
	return 0;
1587
}
1588

1589
/**
1590 1591
 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
 * @state: PM transition of the system being carried out.
1592
 *
1593
 * Execute the ->prepare() callback(s) for all devices.
1594
 */
1595
int dpm_prepare(pm_message_t state)
1596 1597 1598
{
	int error = 0;

1599
	trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1600 1601
	might_sleep();

1602 1603 1604 1605 1606 1607 1608
	mutex_lock(&dpm_list_mtx);
	while (!list_empty(&dpm_list)) {
		struct device *dev = to_device(dpm_list.next);

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

1609
		error = device_prepare(dev, state);
1610 1611 1612 1613 1614

		mutex_lock(&dpm_list_mtx);
		if (error) {
			if (error == -EAGAIN) {
				put_device(dev);
S
Sebastian Ott 已提交
1615
				error = 0;
1616 1617
				continue;
			}
1618 1619
			printk(KERN_INFO "PM: Device %s not prepared "
				"for power transition: code %d\n",
1620
				dev_name(dev), error);
1621 1622 1623
			put_device(dev);
			break;
		}
1624
		dev->power.is_prepared = true;
1625
		if (!list_empty(&dev->power.entry))
1626
			list_move_tail(&dev->power.entry, &dpm_prepared_list);
1627 1628 1629
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
1630
	trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1631 1632 1633
	return error;
}

1634
/**
1635 1636
 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
 * @state: PM transition of the system being carried out.
1637
 *
1638 1639
 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
 * callbacks for them.
1640
 */
1641
int dpm_suspend_start(pm_message_t state)
1642 1643
{
	int error;
1644

1645
	error = dpm_prepare(state);
1646 1647 1648 1649
	if (error) {
		suspend_stats.failed_prepare++;
		dpm_save_failed_step(SUSPEND_PREPARE);
	} else
1650
		error = dpm_suspend(state);
1651 1652
	return error;
}
1653
EXPORT_SYMBOL_GPL(dpm_suspend_start);
1654 1655 1656

void __suspend_report_result(const char *function, void *fn, int ret)
{
1657 1658
	if (ret)
		printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1659 1660
}
EXPORT_SYMBOL_GPL(__suspend_report_result);
1661 1662 1663 1664 1665 1666

/**
 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
 * @dev: Device to wait for.
 * @subordinate: Device that needs to wait for @dev.
 */
1667
int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1668 1669
{
	dpm_wait(dev, subordinate->power.async_suspend);
1670
	return async_error;
1671 1672
}
EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694

/**
 * dpm_for_each_dev - device iterator.
 * @data: data for the callback.
 * @fn: function to be called for each device.
 *
 * Iterate over devices in dpm_list, and call @fn for each device,
 * passing it @data.
 */
void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
{
	struct device *dev;

	if (!fn)
		return;

	device_pm_lock();
	list_for_each_entry(dev, &dpm_list, power.entry)
		fn(dev, data);
	device_pm_unlock();
}
EXPORT_SYMBOL_GPL(dpm_for_each_dev);