main.c 42.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/*
 * drivers/base/power/main.c - Where the driver meets power management.
 *
 * Copyright (c) 2003 Patrick Mochel
 * Copyright (c) 2003 Open Source Development Lab
 *
 * This file is released under the GPLv2
 *
 *
 * The driver model core calls device_pm_add() when a device is registered.
11
 * This will initialize the embedded device_pm_info object in the device
L
Linus Torvalds 已提交
12 13 14
 * and add it to the list of power-controlled devices. sysfs entries for
 * controlling device power management will also be added.
 *
15 16 17
 * A separate list is used for keeping track of power info, because the power
 * domain dependencies may differ from the ancestral dependencies that the
 * subsystem list maintains.
L
Linus Torvalds 已提交
18 19 20
 */

#include <linux/device.h>
21
#include <linux/kallsyms.h>
22
#include <linux/export.h>
23
#include <linux/mutex.h>
24
#include <linux/pm.h>
25
#include <linux/pm_runtime.h>
26
#include <linux/pm-trace.h>
27
#include <linux/interrupt.h>
28
#include <linux/sched.h>
29
#include <linux/async.h>
30
#include <linux/suspend.h>
31
#include <trace/events/power.h>
32
#include <linux/cpufreq.h>
33
#include <linux/cpuidle.h>
34 35
#include <linux/timer.h>

36
#include "../base.h"
L
Linus Torvalds 已提交
37 38
#include "power.h"

39 40
typedef int (*pm_callback_t)(struct device *);

41
/*
42
 * The entries in the dpm_list list are in a depth first order, simply
43 44 45
 * because children are guaranteed to be discovered after parents, and
 * are inserted at the back of the list on discovery.
 *
46 47
 * Since device_pm_add() may be called with a device lock held,
 * we must never try to acquire a device lock while holding
48 49 50
 * dpm_list_mutex.
 */

51
LIST_HEAD(dpm_list);
52 53 54 55
static LIST_HEAD(dpm_prepared_list);
static LIST_HEAD(dpm_suspended_list);
static LIST_HEAD(dpm_late_early_list);
static LIST_HEAD(dpm_noirq_list);
L
Linus Torvalds 已提交
56

57
struct suspend_stats suspend_stats;
58
static DEFINE_MUTEX(dpm_list_mtx);
59
static pm_message_t pm_transition;
L
Linus Torvalds 已提交
60

61 62
static int async_error;

63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86
static char *pm_verb(int event)
{
	switch (event) {
	case PM_EVENT_SUSPEND:
		return "suspend";
	case PM_EVENT_RESUME:
		return "resume";
	case PM_EVENT_FREEZE:
		return "freeze";
	case PM_EVENT_QUIESCE:
		return "quiesce";
	case PM_EVENT_HIBERNATE:
		return "hibernate";
	case PM_EVENT_THAW:
		return "thaw";
	case PM_EVENT_RESTORE:
		return "restore";
	case PM_EVENT_RECOVER:
		return "recover";
	default:
		return "(unknown PM event)";
	}
}

87
/**
88
 * device_pm_sleep_init - Initialize system suspend-related device fields.
89 90
 * @dev: Device object being initialized.
 */
91
void device_pm_sleep_init(struct device *dev)
92
{
93
	dev->power.is_prepared = false;
94
	dev->power.is_suspended = false;
95 96
	dev->power.is_noirq_suspended = false;
	dev->power.is_late_suspended = false;
97
	init_completion(&dev->power.completion);
98
	complete_all(&dev->power.completion);
99
	dev->power.wakeup = NULL;
100
	INIT_LIST_HEAD(&dev->power.entry);
101 102
}

103
/**
104
 * device_pm_lock - Lock the list of active devices used by the PM core.
105 106 107 108 109 110 111
 */
void device_pm_lock(void)
{
	mutex_lock(&dpm_list_mtx);
}

/**
112
 * device_pm_unlock - Unlock the list of active devices used by the PM core.
113 114 115 116 117
 */
void device_pm_unlock(void)
{
	mutex_unlock(&dpm_list_mtx);
}
118

119
/**
120 121
 * device_pm_add - Add a device to the PM core's list of active devices.
 * @dev: Device to add to the list.
122
 */
123
void device_pm_add(struct device *dev)
L
Linus Torvalds 已提交
124 125
{
	pr_debug("PM: Adding info for %s:%s\n",
126
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
127
	mutex_lock(&dpm_list_mtx);
128
	if (dev->parent && dev->parent->power.is_prepared)
129 130
		dev_warn(dev, "parent %s should not be sleeping\n",
			dev_name(dev->parent));
131
	list_add_tail(&dev->power.entry, &dpm_list);
132
	mutex_unlock(&dpm_list_mtx);
L
Linus Torvalds 已提交
133 134
}

135
/**
136 137
 * device_pm_remove - Remove a device from the PM core's list of active devices.
 * @dev: Device to be removed from the list.
138
 */
139
void device_pm_remove(struct device *dev)
L
Linus Torvalds 已提交
140 141
{
	pr_debug("PM: Removing info for %s:%s\n",
142
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
143
	complete_all(&dev->power.completion);
144
	mutex_lock(&dpm_list_mtx);
L
Linus Torvalds 已提交
145
	list_del_init(&dev->power.entry);
146
	mutex_unlock(&dpm_list_mtx);
147
	device_wakeup_disable(dev);
148
	pm_runtime_remove(dev);
149 150
}

151
/**
152 153 154
 * device_pm_move_before - Move device in the PM core's list of active devices.
 * @deva: Device to move in dpm_list.
 * @devb: Device @deva should come before.
155 156 157 158
 */
void device_pm_move_before(struct device *deva, struct device *devb)
{
	pr_debug("PM: Moving %s:%s before %s:%s\n",
159 160
		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
161 162 163 164 165
	/* Delete deva from dpm_list and reinsert before devb. */
	list_move_tail(&deva->power.entry, &devb->power.entry);
}

/**
166 167 168
 * device_pm_move_after - Move device in the PM core's list of active devices.
 * @deva: Device to move in dpm_list.
 * @devb: Device @deva should come after.
169 170 171 172
 */
void device_pm_move_after(struct device *deva, struct device *devb)
{
	pr_debug("PM: Moving %s:%s after %s:%s\n",
173 174
		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
175 176 177 178 179
	/* Delete deva from dpm_list and reinsert after devb. */
	list_move(&deva->power.entry, &devb->power.entry);
}

/**
180 181
 * device_pm_move_last - Move device to end of the PM core's list of devices.
 * @dev: Device to move in dpm_list.
182 183 184 185
 */
void device_pm_move_last(struct device *dev)
{
	pr_debug("PM: Moving %s:%s to end of list\n",
186
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
187 188 189
	list_move_tail(&dev->power.entry, &dpm_list);
}

190 191 192 193
static ktime_t initcall_debug_start(struct device *dev)
{
	ktime_t calltime = ktime_set(0, 0);

194
	if (pm_print_times_enabled) {
195 196 197
		pr_info("calling  %s+ @ %i, parent: %s\n",
			dev_name(dev), task_pid_nr(current),
			dev->parent ? dev_name(dev->parent) : "none");
198 199 200 201 202 203 204
		calltime = ktime_get();
	}

	return calltime;
}

static void initcall_debug_report(struct device *dev, ktime_t calltime,
205
				  int error, pm_message_t state, char *info)
206
{
207 208 209 210 211
	ktime_t rettime;
	s64 nsecs;

	rettime = ktime_get();
	nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
212

213
	if (pm_print_times_enabled) {
214
		pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
215
			error, (unsigned long long)nsecs >> 10);
216 217 218
	}
}

219 220 221 222 223 224 225 226 227 228
/**
 * dpm_wait - Wait for a PM operation to complete.
 * @dev: Device to wait for.
 * @async: If unset, wait only if the device's power.async_suspend flag is set.
 */
static void dpm_wait(struct device *dev, bool async)
{
	if (!dev)
		return;

229
	if (async || (pm_async_enabled && dev->power.async_suspend))
230 231 232 233 234 235 236 237 238 239 240 241 242 243
		wait_for_completion(&dev->power.completion);
}

static int dpm_wait_fn(struct device *dev, void *async_ptr)
{
	dpm_wait(dev, *((bool *)async_ptr));
	return 0;
}

static void dpm_wait_for_children(struct device *dev, bool async)
{
       device_for_each_child(dev, &async, dpm_wait_fn);
}

244
/**
245
 * pm_op - Return the PM operation appropriate for given PM event.
246 247
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
248
 */
249
static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
250 251 252 253
{
	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
254
		return ops->suspend;
255
	case PM_EVENT_RESUME:
256
		return ops->resume;
257
#endif /* CONFIG_SUSPEND */
258
#ifdef CONFIG_HIBERNATE_CALLBACKS
259 260
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
261
		return ops->freeze;
262
	case PM_EVENT_HIBERNATE:
263
		return ops->poweroff;
264 265
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
266
		return ops->thaw;
267 268
		break;
	case PM_EVENT_RESTORE:
269
		return ops->restore;
270
#endif /* CONFIG_HIBERNATE_CALLBACKS */
271
	}
272

273
	return NULL;
274 275
}

276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309
/**
 * pm_late_early_op - Return the PM operation appropriate for given PM event.
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
 *
 * Runtime PM is disabled for @dev while this function is being executed.
 */
static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
				      pm_message_t state)
{
	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
		return ops->suspend_late;
	case PM_EVENT_RESUME:
		return ops->resume_early;
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATE_CALLBACKS
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		return ops->freeze_late;
	case PM_EVENT_HIBERNATE:
		return ops->poweroff_late;
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
		return ops->thaw_early;
	case PM_EVENT_RESTORE:
		return ops->restore_early;
#endif /* CONFIG_HIBERNATE_CALLBACKS */
	}

	return NULL;
}

310
/**
311
 * pm_noirq_op - Return the PM operation appropriate for given PM event.
312 313
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
314
 *
315 316
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
317
 */
318
static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
319 320 321 322
{
	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
323
		return ops->suspend_noirq;
324
	case PM_EVENT_RESUME:
325
		return ops->resume_noirq;
326
#endif /* CONFIG_SUSPEND */
327
#ifdef CONFIG_HIBERNATE_CALLBACKS
328 329
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
330
		return ops->freeze_noirq;
331
	case PM_EVENT_HIBERNATE:
332
		return ops->poweroff_noirq;
333 334
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
335
		return ops->thaw_noirq;
336
	case PM_EVENT_RESTORE:
337
		return ops->restore_noirq;
338
#endif /* CONFIG_HIBERNATE_CALLBACKS */
339
	}
340

341
	return NULL;
342 343 344 345 346 347 348 349 350 351 352 353 354
}

static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
{
	dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
		", may wakeup" : "");
}

static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
			int error)
{
	printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
355
		dev_name(dev), pm_verb(state.event), info, error);
356 357
}

358 359 360
static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
{
	ktime_t calltime;
361
	u64 usecs64;
362 363 364 365 366 367 368 369 370 371 372 373 374
	int usecs;

	calltime = ktime_get();
	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
	do_div(usecs64, NSEC_PER_USEC);
	usecs = usecs64;
	if (usecs == 0)
		usecs = 1;
	pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
		info ?: "", info ? " " : "", pm_verb(state.event),
		usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
}

375 376 377 378 379 380 381 382 383 384 385 386
static int dpm_run_callback(pm_callback_t cb, struct device *dev,
			    pm_message_t state, char *info)
{
	ktime_t calltime;
	int error;

	if (!cb)
		return 0;

	calltime = initcall_debug_start(dev);

	pm_dev_dbg(dev, state, info);
387
	trace_device_pm_callback_start(dev, info, state.event);
388
	error = cb(dev);
389
	trace_device_pm_callback_end(dev, error);
390 391
	suspend_report_result(cb, error);

392
	initcall_debug_report(dev, calltime, error, state, info);
393 394 395 396

	return error;
}

397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461
#ifdef CONFIG_DPM_WATCHDOG
struct dpm_watchdog {
	struct device		*dev;
	struct task_struct	*tsk;
	struct timer_list	timer;
};

#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
	struct dpm_watchdog wd

/**
 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
 * @data: Watchdog object address.
 *
 * Called when a driver has timed out suspending or resuming.
 * There's not much we can do here to recover so panic() to
 * capture a crash-dump in pstore.
 */
static void dpm_watchdog_handler(unsigned long data)
{
	struct dpm_watchdog *wd = (void *)data;

	dev_emerg(wd->dev, "**** DPM device timeout ****\n");
	show_stack(wd->tsk, NULL);
	panic("%s %s: unrecoverable failure\n",
		dev_driver_string(wd->dev), dev_name(wd->dev));
}

/**
 * dpm_watchdog_set - Enable pm watchdog for given device.
 * @wd: Watchdog. Must be allocated on the stack.
 * @dev: Device to handle.
 */
static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
{
	struct timer_list *timer = &wd->timer;

	wd->dev = dev;
	wd->tsk = current;

	init_timer_on_stack(timer);
	/* use same timeout value for both suspend and resume */
	timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
	timer->function = dpm_watchdog_handler;
	timer->data = (unsigned long)wd;
	add_timer(timer);
}

/**
 * dpm_watchdog_clear - Disable suspend/resume watchdog.
 * @wd: Watchdog to disable.
 */
static void dpm_watchdog_clear(struct dpm_watchdog *wd)
{
	struct timer_list *timer = &wd->timer;

	del_timer_sync(timer);
	destroy_timer_on_stack(timer);
}
#else
#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
#define dpm_watchdog_set(x, y)
#define dpm_watchdog_clear(x)
#endif

462 463 464
/*------------------------- Resume routines -------------------------*/

/**
465 466 467
 * device_resume_noirq - Execute an "early resume" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
468
 * @async: If true, the device is being resumed asynchronously.
469
 *
470 471
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
472
 */
473
static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
474
{
475 476
	pm_callback_t callback = NULL;
	char *info = NULL;
477 478 479 480 481
	int error = 0;

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);

482
	if (dev->power.syscore || dev->power.direct_complete)
483 484
		goto Out;

485 486 487
	if (!dev->power.is_noirq_suspended)
		goto Out;

488 489
	dpm_wait(dev->parent, async);

490
	if (dev->pm_domain) {
491
		info = "noirq power domain ";
492
		callback = pm_noirq_op(&dev->pm_domain->ops, state);
493
	} else if (dev->type && dev->type->pm) {
494
		info = "noirq type ";
495
		callback = pm_noirq_op(dev->type->pm, state);
496
	} else if (dev->class && dev->class->pm) {
497
		info = "noirq class ";
498
		callback = pm_noirq_op(dev->class->pm, state);
499
	} else if (dev->bus && dev->bus->pm) {
500
		info = "noirq bus ";
501
		callback = pm_noirq_op(dev->bus->pm, state);
502 503
	}

504
	if (!callback && dev->driver && dev->driver->pm) {
505
		info = "noirq driver ";
506 507 508
		callback = pm_noirq_op(dev->driver->pm, state);
	}

509
	error = dpm_run_callback(callback, dev, state, info);
510
	dev->power.is_noirq_suspended = false;
511

512
 Out:
513
	complete_all(&dev->power.completion);
514 515 516 517
	TRACE_RESUME(error);
	return error;
}

518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535
static bool is_async(struct device *dev)
{
	return dev->power.async_suspend && pm_async_enabled
		&& !pm_trace_is_enabled();
}

static void async_resume_noirq(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = device_resume_noirq(dev, pm_transition, true);
	if (error)
		pm_dev_err(dev, pm_transition, " async", error);

	put_device(dev);
}

536
/**
537
 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
538
 * @state: PM transition of the system being carried out.
539
 *
540
 * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
541
 * enable device drivers to receive interrupts.
542
 */
543
void dpm_resume_noirq(pm_message_t state)
544
{
545
	struct device *dev;
546
	ktime_t starttime = ktime_get();
547

548
	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
549
	mutex_lock(&dpm_list_mtx);
550
	pm_transition = state;
551

552 553 554 555 556 557 558 559 560 561 562 563 564 565 566
	/*
	 * Advanced the async threads upfront,
	 * in case the starting of async threads is
	 * delayed by non-async resuming devices.
	 */
	list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
		reinit_completion(&dev->power.completion);
		if (is_async(dev)) {
			get_device(dev);
			async_schedule(async_resume_noirq, dev);
		}
	}

	while (!list_empty(&dpm_noirq_list)) {
		dev = to_device(dpm_noirq_list.next);
567
		get_device(dev);
568
		list_move_tail(&dev->power.entry, &dpm_late_early_list);
569
		mutex_unlock(&dpm_list_mtx);
570

571 572 573 574 575 576 577 578 579 580
		if (!is_async(dev)) {
			int error;

			error = device_resume_noirq(dev, state, false);
			if (error) {
				suspend_stats.failed_resume_noirq++;
				dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
				dpm_save_failed_dev(dev_name(dev));
				pm_dev_err(dev, state, " noirq", error);
			}
581 582 583 584 585 586
		}

		mutex_lock(&dpm_list_mtx);
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
587
	async_synchronize_full();
588 589
	dpm_show_time(starttime, state, "noirq");
	resume_device_irqs();
590
	cpuidle_resume();
591
	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
592 593 594 595 596 597
}

/**
 * device_resume_early - Execute an "early resume" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
598
 * @async: If true, the device is being resumed asynchronously.
599 600 601
 *
 * Runtime PM is disabled for @dev while this function is being executed.
 */
602
static int device_resume_early(struct device *dev, pm_message_t state, bool async)
603 604 605 606 607 608 609 610
{
	pm_callback_t callback = NULL;
	char *info = NULL;
	int error = 0;

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);

611
	if (dev->power.syscore || dev->power.direct_complete)
612 613
		goto Out;

614 615 616
	if (!dev->power.is_late_suspended)
		goto Out;

617 618
	dpm_wait(dev->parent, async);

619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638
	if (dev->pm_domain) {
		info = "early power domain ";
		callback = pm_late_early_op(&dev->pm_domain->ops, state);
	} else if (dev->type && dev->type->pm) {
		info = "early type ";
		callback = pm_late_early_op(dev->type->pm, state);
	} else if (dev->class && dev->class->pm) {
		info = "early class ";
		callback = pm_late_early_op(dev->class->pm, state);
	} else if (dev->bus && dev->bus->pm) {
		info = "early bus ";
		callback = pm_late_early_op(dev->bus->pm, state);
	}

	if (!callback && dev->driver && dev->driver->pm) {
		info = "early driver ";
		callback = pm_late_early_op(dev->driver->pm, state);
	}

	error = dpm_run_callback(callback, dev, state, info);
639
	dev->power.is_late_suspended = false;
640

641
 Out:
642
	TRACE_RESUME(error);
643 644

	pm_runtime_enable(dev);
645
	complete_all(&dev->power.completion);
646 647 648
	return error;
}

649 650 651 652 653 654 655 656 657 658 659 660
static void async_resume_early(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = device_resume_early(dev, pm_transition, true);
	if (error)
		pm_dev_err(dev, pm_transition, " async", error);

	put_device(dev);
}

661 662 663 664
/**
 * dpm_resume_early - Execute "early resume" callbacks for all devices.
 * @state: PM transition of the system being carried out.
 */
665
void dpm_resume_early(pm_message_t state)
666
{
667
	struct device *dev;
668 669
	ktime_t starttime = ktime_get();

670
	trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
671
	mutex_lock(&dpm_list_mtx);
672 673 674 675 676 677 678 679 680 681 682 683 684 685
	pm_transition = state;

	/*
	 * Advanced the async threads upfront,
	 * in case the starting of async threads is
	 * delayed by non-async resuming devices.
	 */
	list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
		reinit_completion(&dev->power.completion);
		if (is_async(dev)) {
			get_device(dev);
			async_schedule(async_resume_early, dev);
		}
	}
686

687 688
	while (!list_empty(&dpm_late_early_list)) {
		dev = to_device(dpm_late_early_list.next);
689 690 691 692
		get_device(dev);
		list_move_tail(&dev->power.entry, &dpm_suspended_list);
		mutex_unlock(&dpm_list_mtx);

693 694
		if (!is_async(dev)) {
			int error;
695

696 697 698 699 700 701 702 703
			error = device_resume_early(dev, state, false);
			if (error) {
				suspend_stats.failed_resume_early++;
				dpm_save_failed_step(SUSPEND_RESUME_EARLY);
				dpm_save_failed_dev(dev_name(dev));
				pm_dev_err(dev, state, " early", error);
			}
		}
704
		mutex_lock(&dpm_list_mtx);
705 706
		put_device(dev);
	}
707
	mutex_unlock(&dpm_list_mtx);
708
	async_synchronize_full();
709
	dpm_show_time(starttime, state, "early");
710
	trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
711
}
712 713 714 715 716 717 718 719 720 721 722

/**
 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
 * @state: PM transition of the system being carried out.
 */
void dpm_resume_start(pm_message_t state)
{
	dpm_resume_noirq(state);
	dpm_resume_early(state);
}
EXPORT_SYMBOL_GPL(dpm_resume_start);
723 724

/**
725
 * device_resume - Execute "resume" callbacks for given device.
726 727
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
728
 * @async: If true, the device is being resumed asynchronously.
729
 */
730
static int device_resume(struct device *dev, pm_message_t state, bool async)
731
{
732 733
	pm_callback_t callback = NULL;
	char *info = NULL;
734
	int error = 0;
735
	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
736 737 738

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);
739

740 741 742
	if (dev->power.syscore)
		goto Complete;

743 744 745 746 747 748
	if (dev->power.direct_complete) {
		/* Match the pm_runtime_disable() in __device_suspend(). */
		pm_runtime_enable(dev);
		goto Complete;
	}

749
	dpm_wait(dev->parent, async);
750
	dpm_watchdog_set(&wd, dev);
751
	device_lock(dev);
752

753 754 755 756 757
	/*
	 * This is a fib.  But we'll allow new children to be added below
	 * a resumed device, even if the device hasn't been completed yet.
	 */
	dev->power.is_prepared = false;
758

759 760 761
	if (!dev->power.is_suspended)
		goto Unlock;

762
	if (dev->pm_domain) {
763 764
		info = "power domain ";
		callback = pm_op(&dev->pm_domain->ops, state);
765
		goto Driver;
766 767
	}

768
	if (dev->type && dev->type->pm) {
769 770
		info = "type ";
		callback = pm_op(dev->type->pm, state);
771
		goto Driver;
772 773
	}

774 775
	if (dev->class) {
		if (dev->class->pm) {
776 777
			info = "class ";
			callback = pm_op(dev->class->pm, state);
778
			goto Driver;
779
		} else if (dev->class->resume) {
780 781
			info = "legacy class ";
			callback = dev->class->resume;
782
			goto End;
783
		}
784
	}
785 786 787

	if (dev->bus) {
		if (dev->bus->pm) {
788
			info = "bus ";
789
			callback = pm_op(dev->bus->pm, state);
790
		} else if (dev->bus->resume) {
791
			info = "legacy bus ";
792
			callback = dev->bus->resume;
793
			goto End;
794 795 796
		}
	}

797 798 799 800 801 802
 Driver:
	if (!callback && dev->driver && dev->driver->pm) {
		info = "driver ";
		callback = pm_op(dev->driver->pm, state);
	}

803
 End:
804
	error = dpm_run_callback(callback, dev, state, info);
805 806 807
	dev->power.is_suspended = false;

 Unlock:
808
	device_unlock(dev);
809
	dpm_watchdog_clear(&wd);
810 811

 Complete:
812
	complete_all(&dev->power.completion);
813

814
	TRACE_RESUME(error);
815

816 817 818
	return error;
}

819 820 821 822 823
static void async_resume(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

824
	error = device_resume(dev, pm_transition, true);
825 826 827 828 829
	if (error)
		pm_dev_err(dev, pm_transition, " async", error);
	put_device(dev);
}

830
/**
831 832
 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
 * @state: PM transition of the system being carried out.
833
 *
834 835
 * Execute the appropriate "resume" callback for all devices whose status
 * indicates that they are suspended.
836
 */
837
void dpm_resume(pm_message_t state)
838
{
839
	struct device *dev;
840
	ktime_t starttime = ktime_get();
841

842
	trace_suspend_resume(TPS("dpm_resume"), state.event, true);
843 844
	might_sleep();

845
	mutex_lock(&dpm_list_mtx);
846
	pm_transition = state;
847
	async_error = 0;
848

849
	list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
850
		reinit_completion(&dev->power.completion);
851 852 853 854 855 856
		if (is_async(dev)) {
			get_device(dev);
			async_schedule(async_resume, dev);
		}
	}

857 858
	while (!list_empty(&dpm_suspended_list)) {
		dev = to_device(dpm_suspended_list.next);
859
		get_device(dev);
860
		if (!is_async(dev)) {
861 862 863 864
			int error;

			mutex_unlock(&dpm_list_mtx);

865
			error = device_resume(dev, state, false);
866 867 868 869
			if (error) {
				suspend_stats.failed_resume++;
				dpm_save_failed_step(SUSPEND_RESUME);
				dpm_save_failed_dev(dev_name(dev));
870
				pm_dev_err(dev, state, "", error);
871
			}
872 873

			mutex_lock(&dpm_list_mtx);
874 875
		}
		if (!list_empty(&dev->power.entry))
876
			list_move_tail(&dev->power.entry, &dpm_prepared_list);
877 878 879
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
880
	async_synchronize_full();
881
	dpm_show_time(starttime, state, NULL);
882 883

	cpufreq_resume();
884
	trace_suspend_resume(TPS("dpm_resume"), state.event, false);
885 886 887
}

/**
888 889 890
 * device_complete - Complete a PM transition for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
891
 */
892
static void device_complete(struct device *dev, pm_message_t state)
893
{
894 895 896
	void (*callback)(struct device *) = NULL;
	char *info = NULL;

897 898 899
	if (dev->power.syscore)
		return;

900
	device_lock(dev);
901

902
	if (dev->pm_domain) {
903 904
		info = "completing power domain ";
		callback = dev->pm_domain->ops.complete;
905
	} else if (dev->type && dev->type->pm) {
906 907
		info = "completing type ";
		callback = dev->type->pm->complete;
908
	} else if (dev->class && dev->class->pm) {
909 910
		info = "completing class ";
		callback = dev->class->pm->complete;
911
	} else if (dev->bus && dev->bus->pm) {
912 913 914 915 916 917 918 919 920 921 922
		info = "completing bus ";
		callback = dev->bus->pm->complete;
	}

	if (!callback && dev->driver && dev->driver->pm) {
		info = "completing driver ";
		callback = dev->driver->pm->complete;
	}

	if (callback) {
		pm_dev_dbg(dev, state, info);
923
		trace_device_pm_callback_start(dev, info, state.event);
924
		callback(dev);
925
		trace_device_pm_callback_end(dev, 0);
926 927
	}

928
	device_unlock(dev);
929

930
	pm_runtime_put(dev);
931 932 933
}

/**
934 935
 * dpm_complete - Complete a PM transition for all non-sysdev devices.
 * @state: PM transition of the system being carried out.
936
 *
937 938
 * Execute the ->complete() callbacks for all devices whose PM status is not
 * DPM_ON (this allows new devices to be registered).
939
 */
940
void dpm_complete(pm_message_t state)
941
{
942 943
	struct list_head list;

944
	trace_suspend_resume(TPS("dpm_complete"), state.event, true);
945 946
	might_sleep();

947
	INIT_LIST_HEAD(&list);
948
	mutex_lock(&dpm_list_mtx);
949 950
	while (!list_empty(&dpm_prepared_list)) {
		struct device *dev = to_device(dpm_prepared_list.prev);
951

952
		get_device(dev);
953
		dev->power.is_prepared = false;
954 955
		list_move(&dev->power.entry, &list);
		mutex_unlock(&dpm_list_mtx);
956

957
		device_complete(dev, state);
958

959
		mutex_lock(&dpm_list_mtx);
960
		put_device(dev);
961
	}
962
	list_splice(&list, &dpm_list);
963
	mutex_unlock(&dpm_list_mtx);
964
	trace_suspend_resume(TPS("dpm_complete"), state.event, false);
965 966 967
}

/**
968 969
 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
 * @state: PM transition of the system being carried out.
970
 *
971 972
 * Execute "resume" callbacks for all devices and complete the PM transition of
 * the system.
973
 */
974
void dpm_resume_end(pm_message_t state)
975
{
976 977
	dpm_resume(state);
	dpm_complete(state);
978
}
979
EXPORT_SYMBOL_GPL(dpm_resume_end);
980 981 982 983


/*------------------------- Suspend routines -------------------------*/

984
/**
985 986 987 988 989
 * resume_event - Return a "resume" message for given "suspend" sleep state.
 * @sleep_state: PM message representing a sleep state.
 *
 * Return a PM message representing the resume event corresponding to given
 * sleep state.
990 991
 */
static pm_message_t resume_event(pm_message_t sleep_state)
992
{
993 994 995 996 997 998 999 1000
	switch (sleep_state.event) {
	case PM_EVENT_SUSPEND:
		return PMSG_RESUME;
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		return PMSG_RECOVER;
	case PM_EVENT_HIBERNATE:
		return PMSG_RESTORE;
1001
	}
1002
	return PMSG_ON;
1003 1004 1005
}

/**
1006 1007 1008
 * device_suspend_noirq - Execute a "late suspend" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
1009
 * @async: If true, the device is being suspended asynchronously.
1010
 *
1011 1012
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
1013
 */
1014
static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1015
{
1016 1017
	pm_callback_t callback = NULL;
	char *info = NULL;
1018 1019
	int error = 0;

1020 1021 1022
	TRACE_DEVICE(dev);
	TRACE_SUSPEND(0);

1023 1024 1025 1026 1027 1028 1029
	if (async_error)
		goto Complete;

	if (pm_wakeup_pending()) {
		async_error = -EBUSY;
		goto Complete;
	}
1030

1031
	if (dev->power.syscore || dev->power.direct_complete)
1032 1033 1034
		goto Complete;

	dpm_wait_for_children(dev, async);
1035

1036
	if (dev->pm_domain) {
1037
		info = "noirq power domain ";
1038
		callback = pm_noirq_op(&dev->pm_domain->ops, state);
1039
	} else if (dev->type && dev->type->pm) {
1040
		info = "noirq type ";
1041
		callback = pm_noirq_op(dev->type->pm, state);
1042
	} else if (dev->class && dev->class->pm) {
1043
		info = "noirq class ";
1044
		callback = pm_noirq_op(dev->class->pm, state);
1045
	} else if (dev->bus && dev->bus->pm) {
1046
		info = "noirq bus ";
1047
		callback = pm_noirq_op(dev->bus->pm, state);
1048 1049
	}

1050
	if (!callback && dev->driver && dev->driver->pm) {
1051
		info = "noirq driver ";
1052 1053 1054
		callback = pm_noirq_op(dev->driver->pm, state);
	}

1055 1056 1057
	error = dpm_run_callback(callback, dev, state, info);
	if (!error)
		dev->power.is_noirq_suspended = true;
1058 1059
	else
		async_error = error;
1060

1061 1062
Complete:
	complete_all(&dev->power.completion);
1063
	TRACE_SUSPEND(error);
1064
	return error;
1065 1066
}

1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084
static void async_suspend_noirq(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = __device_suspend_noirq(dev, pm_transition, true);
	if (error) {
		dpm_save_failed_dev(dev_name(dev));
		pm_dev_err(dev, pm_transition, " async", error);
	}

	put_device(dev);
}

static int device_suspend_noirq(struct device *dev)
{
	reinit_completion(&dev->power.completion);

1085
	if (is_async(dev)) {
1086 1087 1088 1089 1090 1091 1092
		get_device(dev);
		async_schedule(async_suspend_noirq, dev);
		return 0;
	}
	return __device_suspend_noirq(dev, pm_transition, false);
}

1093
/**
1094
 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1095
 * @state: PM transition of the system being carried out.
1096
 *
1097 1098
 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
 * handlers for all non-sysdev devices.
1099
 */
1100
int dpm_suspend_noirq(pm_message_t state)
1101
{
1102
	ktime_t starttime = ktime_get();
1103 1104
	int error = 0;

1105
	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1106
	cpuidle_pause();
1107
	suspend_device_irqs();
1108
	mutex_lock(&dpm_list_mtx);
1109 1110 1111
	pm_transition = state;
	async_error = 0;

1112 1113
	while (!list_empty(&dpm_late_early_list)) {
		struct device *dev = to_device(dpm_late_early_list.prev);
1114 1115 1116 1117

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

1118
		error = device_suspend_noirq(dev);
1119 1120

		mutex_lock(&dpm_list_mtx);
1121
		if (error) {
1122
			pm_dev_err(dev, state, " noirq", error);
1123
			dpm_save_failed_dev(dev_name(dev));
1124
			put_device(dev);
1125 1126
			break;
		}
1127
		if (!list_empty(&dev->power.entry))
1128
			list_move(&dev->power.entry, &dpm_noirq_list);
1129
		put_device(dev);
1130

1131
		if (async_error)
1132
			break;
1133
	}
1134
	mutex_unlock(&dpm_list_mtx);
1135 1136 1137 1138 1139 1140 1141
	async_synchronize_full();
	if (!error)
		error = async_error;

	if (error) {
		suspend_stats.failed_suspend_noirq++;
		dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1142
		dpm_resume_noirq(resume_event(state));
1143
	} else {
1144
		dpm_show_time(starttime, state, "noirq");
1145
	}
1146
	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1147 1148 1149 1150 1151 1152 1153
	return error;
}

/**
 * device_suspend_late - Execute a "late suspend" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
1154
 * @async: If true, the device is being suspended asynchronously.
1155 1156 1157
 *
 * Runtime PM is disabled for @dev while this function is being executed.
 */
1158
static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1159 1160 1161
{
	pm_callback_t callback = NULL;
	char *info = NULL;
1162
	int error = 0;
1163

1164 1165 1166
	TRACE_DEVICE(dev);
	TRACE_SUSPEND(0);

1167 1168
	__pm_runtime_disable(dev, false);

1169 1170 1171 1172 1173 1174 1175 1176
	if (async_error)
		goto Complete;

	if (pm_wakeup_pending()) {
		async_error = -EBUSY;
		goto Complete;
	}

1177
	if (dev->power.syscore || dev->power.direct_complete)
1178 1179 1180
		goto Complete;

	dpm_wait_for_children(dev, async);
1181

1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200
	if (dev->pm_domain) {
		info = "late power domain ";
		callback = pm_late_early_op(&dev->pm_domain->ops, state);
	} else if (dev->type && dev->type->pm) {
		info = "late type ";
		callback = pm_late_early_op(dev->type->pm, state);
	} else if (dev->class && dev->class->pm) {
		info = "late class ";
		callback = pm_late_early_op(dev->class->pm, state);
	} else if (dev->bus && dev->bus->pm) {
		info = "late bus ";
		callback = pm_late_early_op(dev->bus->pm, state);
	}

	if (!callback && dev->driver && dev->driver->pm) {
		info = "late driver ";
		callback = pm_late_early_op(dev->driver->pm, state);
	}

1201 1202 1203
	error = dpm_run_callback(callback, dev, state, info);
	if (!error)
		dev->power.is_late_suspended = true;
1204 1205
	else
		async_error = error;
1206

1207
Complete:
1208
	TRACE_SUSPEND(error);
1209
	complete_all(&dev->power.completion);
1210
	return error;
1211 1212
}

1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229
static void async_suspend_late(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = __device_suspend_late(dev, pm_transition, true);
	if (error) {
		dpm_save_failed_dev(dev_name(dev));
		pm_dev_err(dev, pm_transition, " async", error);
	}
	put_device(dev);
}

static int device_suspend_late(struct device *dev)
{
	reinit_completion(&dev->power.completion);

1230
	if (is_async(dev)) {
1231 1232 1233 1234 1235 1236 1237 1238
		get_device(dev);
		async_schedule(async_suspend_late, dev);
		return 0;
	}

	return __device_suspend_late(dev, pm_transition, false);
}

1239 1240 1241 1242
/**
 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
 * @state: PM transition of the system being carried out.
 */
1243
int dpm_suspend_late(pm_message_t state)
1244 1245 1246 1247
{
	ktime_t starttime = ktime_get();
	int error = 0;

1248
	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1249
	mutex_lock(&dpm_list_mtx);
1250 1251 1252
	pm_transition = state;
	async_error = 0;

1253 1254 1255 1256 1257 1258
	while (!list_empty(&dpm_suspended_list)) {
		struct device *dev = to_device(dpm_suspended_list.prev);

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

1259
		error = device_suspend_late(dev);
1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270

		mutex_lock(&dpm_list_mtx);
		if (error) {
			pm_dev_err(dev, state, " late", error);
			dpm_save_failed_dev(dev_name(dev));
			put_device(dev);
			break;
		}
		if (!list_empty(&dev->power.entry))
			list_move(&dev->power.entry, &dpm_late_early_list);
		put_device(dev);
1271

1272
		if (async_error)
1273
			break;
1274 1275
	}
	mutex_unlock(&dpm_list_mtx);
1276
	async_synchronize_full();
1277 1278
	if (!error)
		error = async_error;
1279 1280 1281
	if (error) {
		suspend_stats.failed_suspend_late++;
		dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1282
		dpm_resume_early(resume_event(state));
1283
	} else {
1284
		dpm_show_time(starttime, state, "late");
1285
	}
1286
	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1287 1288
	return error;
}
1289 1290 1291 1292 1293 1294 1295 1296

/**
 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
 * @state: PM transition of the system being carried out.
 */
int dpm_suspend_end(pm_message_t state)
{
	int error = dpm_suspend_late(state);
1297 1298 1299 1300 1301
	if (error)
		return error;

	error = dpm_suspend_noirq(state);
	if (error) {
1302
		dpm_resume_early(resume_event(state));
1303 1304
		return error;
	}
1305

1306
	return 0;
1307 1308
}
EXPORT_SYMBOL_GPL(dpm_suspend_end);
1309

1310 1311
/**
 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
R
Randy Dunlap 已提交
1312 1313 1314
 * @dev: Device to suspend.
 * @state: PM transition of the system being carried out.
 * @cb: Suspend callback to execute.
1315
 * @info: string description of caller.
1316 1317
 */
static int legacy_suspend(struct device *dev, pm_message_t state,
1318 1319
			  int (*cb)(struct device *dev, pm_message_t state),
			  char *info)
1320 1321 1322 1323 1324 1325
{
	int error;
	ktime_t calltime;

	calltime = initcall_debug_start(dev);

1326
	trace_device_pm_callback_start(dev, info, state.event);
1327
	error = cb(dev, state);
1328
	trace_device_pm_callback_end(dev, error);
1329 1330
	suspend_report_result(cb, error);

1331
	initcall_debug_report(dev, calltime, error, state, info);
1332 1333 1334 1335

	return error;
}

1336
/**
1337 1338 1339
 * device_suspend - Execute "suspend" callbacks for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
1340
 * @async: If true, the device is being suspended asynchronously.
1341
 */
1342
static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1343
{
1344 1345
	pm_callback_t callback = NULL;
	char *info = NULL;
1346
	int error = 0;
1347
	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1348

1349 1350 1351
	TRACE_DEVICE(dev);
	TRACE_SUSPEND(0);

1352
	dpm_wait_for_children(dev, async);
1353

1354
	if (async_error)
1355
		goto Complete;
1356

1357 1358 1359 1360 1361 1362
	/*
	 * If a device configured to wake up the system from sleep states
	 * has been suspended at run time and there's a resume request pending
	 * for it, this is equivalent to the device signaling wakeup, so the
	 * system suspend operation should be aborted.
	 */
1363 1364
	if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
		pm_wakeup_event(dev, 0);
1365

1366 1367
	if (pm_wakeup_pending()) {
		async_error = -EBUSY;
1368
		goto Complete;
1369 1370
	}

1371 1372 1373
	if (dev->power.syscore)
		goto Complete;

1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384
	if (dev->power.direct_complete) {
		if (pm_runtime_status_suspended(dev)) {
			pm_runtime_disable(dev);
			if (pm_runtime_suspended_if_enabled(dev))
				goto Complete;

			pm_runtime_enable(dev);
		}
		dev->power.direct_complete = false;
	}

1385
	dpm_watchdog_set(&wd, dev);
1386 1387
	device_lock(dev);

1388
	if (dev->pm_domain) {
1389 1390 1391
		info = "power domain ";
		callback = pm_op(&dev->pm_domain->ops, state);
		goto Run;
1392 1393
	}

1394
	if (dev->type && dev->type->pm) {
1395 1396 1397
		info = "type ";
		callback = pm_op(dev->type->pm, state);
		goto Run;
1398 1399
	}

1400 1401
	if (dev->class) {
		if (dev->class->pm) {
1402 1403 1404
			info = "class ";
			callback = pm_op(dev->class->pm, state);
			goto Run;
1405 1406
		} else if (dev->class->suspend) {
			pm_dev_dbg(dev, state, "legacy class ");
1407 1408
			error = legacy_suspend(dev, state, dev->class->suspend,
						"legacy class ");
1409
			goto End;
1410
		}
1411 1412
	}

1413 1414
	if (dev->bus) {
		if (dev->bus->pm) {
1415
			info = "bus ";
1416
			callback = pm_op(dev->bus->pm, state);
1417
		} else if (dev->bus->suspend) {
1418
			pm_dev_dbg(dev, state, "legacy bus ");
1419 1420
			error = legacy_suspend(dev, state, dev->bus->suspend,
						"legacy bus ");
1421
			goto End;
1422
		}
1423 1424
	}

1425
 Run:
1426 1427 1428 1429 1430
	if (!callback && dev->driver && dev->driver->pm) {
		info = "driver ";
		callback = pm_op(dev->driver->pm, state);
	}

1431 1432
	error = dpm_run_callback(callback, dev, state, info);

1433
 End:
1434
	if (!error) {
1435 1436
		struct device *parent = dev->parent;

1437
		dev->power.is_suspended = true;
1438 1439 1440 1441 1442 1443 1444 1445 1446 1447
		if (parent) {
			spin_lock_irq(&parent->power.lock);

			dev->parent->power.direct_complete = false;
			if (dev->power.wakeup_path
			    && !dev->parent->power.ignore_children)
				dev->parent->power.wakeup_path = true;

			spin_unlock_irq(&parent->power.lock);
		}
1448
	}
1449

1450
	device_unlock(dev);
1451
	dpm_watchdog_clear(&wd);
1452 1453

 Complete:
1454
	complete_all(&dev->power.completion);
1455
	if (error)
1456 1457
		async_error = error;

1458
	TRACE_SUSPEND(error);
1459 1460 1461
	return error;
}

1462 1463 1464 1465 1466 1467
static void async_suspend(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = __device_suspend(dev, pm_transition, true);
1468 1469
	if (error) {
		dpm_save_failed_dev(dev_name(dev));
1470
		pm_dev_err(dev, pm_transition, " async", error);
1471
	}
1472 1473 1474 1475 1476 1477

	put_device(dev);
}

static int device_suspend(struct device *dev)
{
1478
	reinit_completion(&dev->power.completion);
1479

1480
	if (is_async(dev)) {
1481 1482 1483 1484 1485 1486 1487 1488
		get_device(dev);
		async_schedule(async_suspend, dev);
		return 0;
	}

	return __device_suspend(dev, pm_transition, false);
}

1489
/**
1490 1491
 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
 * @state: PM transition of the system being carried out.
1492
 */
1493
int dpm_suspend(pm_message_t state)
1494
{
1495
	ktime_t starttime = ktime_get();
1496 1497
	int error = 0;

1498
	trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1499 1500
	might_sleep();

1501 1502
	cpufreq_suspend();

1503
	mutex_lock(&dpm_list_mtx);
1504 1505
	pm_transition = state;
	async_error = 0;
1506 1507
	while (!list_empty(&dpm_prepared_list)) {
		struct device *dev = to_device(dpm_prepared_list.prev);
1508

1509
		get_device(dev);
1510
		mutex_unlock(&dpm_list_mtx);
1511

1512
		error = device_suspend(dev);
1513

1514
		mutex_lock(&dpm_list_mtx);
1515
		if (error) {
1516
			pm_dev_err(dev, state, "", error);
1517
			dpm_save_failed_dev(dev_name(dev));
1518
			put_device(dev);
1519 1520
			break;
		}
1521
		if (!list_empty(&dev->power.entry))
1522
			list_move(&dev->power.entry, &dpm_suspended_list);
1523
		put_device(dev);
1524 1525
		if (async_error)
			break;
1526 1527
	}
	mutex_unlock(&dpm_list_mtx);
1528 1529 1530
	async_synchronize_full();
	if (!error)
		error = async_error;
1531 1532 1533 1534
	if (error) {
		suspend_stats.failed_suspend++;
		dpm_save_failed_step(SUSPEND_SUSPEND);
	} else
1535
		dpm_show_time(starttime, state, NULL);
1536
	trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1537 1538 1539 1540
	return error;
}

/**
1541 1542 1543 1544 1545 1546
 * device_prepare - Prepare a device for system power transition.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
 *
 * Execute the ->prepare() callback(s) for given device.  No new children of the
 * device may be registered after this function has returned.
1547
 */
1548
static int device_prepare(struct device *dev, pm_message_t state)
1549
{
1550 1551
	int (*callback)(struct device *) = NULL;
	char *info = NULL;
1552
	int ret = 0;
1553

1554 1555 1556
	if (dev->power.syscore)
		return 0;

1557 1558 1559 1560 1561 1562 1563 1564
	/*
	 * If a device's parent goes into runtime suspend at the wrong time,
	 * it won't be possible to resume the device.  To prevent this we
	 * block runtime suspend here, during the prepare phase, and allow
	 * it again during the complete phase.
	 */
	pm_runtime_get_noresume(dev);

1565
	device_lock(dev);
1566

1567 1568
	dev->power.wakeup_path = device_may_wakeup(dev);

1569
	if (dev->pm_domain) {
1570 1571
		info = "preparing power domain ";
		callback = dev->pm_domain->ops.prepare;
1572
	} else if (dev->type && dev->type->pm) {
1573 1574
		info = "preparing type ";
		callback = dev->type->pm->prepare;
1575
	} else if (dev->class && dev->class->pm) {
1576 1577
		info = "preparing class ";
		callback = dev->class->pm->prepare;
1578
	} else if (dev->bus && dev->bus->pm) {
1579 1580 1581 1582 1583 1584 1585 1586 1587
		info = "preparing bus ";
		callback = dev->bus->pm->prepare;
	}

	if (!callback && dev->driver && dev->driver->pm) {
		info = "preparing driver ";
		callback = dev->driver->pm->prepare;
	}

1588 1589
	if (callback) {
		trace_device_pm_callback_start(dev, info, state.event);
1590
		ret = callback(dev);
1591 1592
		trace_device_pm_callback_end(dev, ret);
	}
1593

1594
	device_unlock(dev);
1595

1596 1597
	if (ret < 0) {
		suspend_report_result(callback, ret);
1598
		pm_runtime_put(dev);
1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611
		return ret;
	}
	/*
	 * A positive return value from ->prepare() means "this device appears
	 * to be runtime-suspended and its state is fine, so if it really is
	 * runtime-suspended, you can leave it in that state provided that you
	 * will do the same thing with all of its descendants".  This only
	 * applies to suspend transitions, however.
	 */
	spin_lock_irq(&dev->power.lock);
	dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND;
	spin_unlock_irq(&dev->power.lock);
	return 0;
1612
}
1613

1614
/**
1615 1616
 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
 * @state: PM transition of the system being carried out.
1617
 *
1618
 * Execute the ->prepare() callback(s) for all devices.
1619
 */
1620
int dpm_prepare(pm_message_t state)
1621 1622 1623
{
	int error = 0;

1624
	trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1625 1626
	might_sleep();

1627 1628 1629 1630 1631 1632 1633
	mutex_lock(&dpm_list_mtx);
	while (!list_empty(&dpm_list)) {
		struct device *dev = to_device(dpm_list.next);

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

1634
		error = device_prepare(dev, state);
1635 1636 1637 1638 1639

		mutex_lock(&dpm_list_mtx);
		if (error) {
			if (error == -EAGAIN) {
				put_device(dev);
S
Sebastian Ott 已提交
1640
				error = 0;
1641 1642
				continue;
			}
1643 1644
			printk(KERN_INFO "PM: Device %s not prepared "
				"for power transition: code %d\n",
1645
				dev_name(dev), error);
1646 1647 1648
			put_device(dev);
			break;
		}
1649
		dev->power.is_prepared = true;
1650
		if (!list_empty(&dev->power.entry))
1651
			list_move_tail(&dev->power.entry, &dpm_prepared_list);
1652 1653 1654
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
1655
	trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1656 1657 1658
	return error;
}

1659
/**
1660 1661
 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
 * @state: PM transition of the system being carried out.
1662
 *
1663 1664
 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
 * callbacks for them.
1665
 */
1666
int dpm_suspend_start(pm_message_t state)
1667 1668
{
	int error;
1669

1670
	error = dpm_prepare(state);
1671 1672 1673 1674
	if (error) {
		suspend_stats.failed_prepare++;
		dpm_save_failed_step(SUSPEND_PREPARE);
	} else
1675
		error = dpm_suspend(state);
1676 1677
	return error;
}
1678
EXPORT_SYMBOL_GPL(dpm_suspend_start);
1679 1680 1681

void __suspend_report_result(const char *function, void *fn, int ret)
{
1682 1683
	if (ret)
		printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1684 1685
}
EXPORT_SYMBOL_GPL(__suspend_report_result);
1686 1687 1688 1689 1690 1691

/**
 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
 * @dev: Device to wait for.
 * @subordinate: Device that needs to wait for @dev.
 */
1692
int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1693 1694
{
	dpm_wait(dev, subordinate->power.async_suspend);
1695
	return async_error;
1696 1697
}
EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719

/**
 * dpm_for_each_dev - device iterator.
 * @data: data for the callback.
 * @fn: function to be called for each device.
 *
 * Iterate over devices in dpm_list, and call @fn for each device,
 * passing it @data.
 */
void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
{
	struct device *dev;

	if (!fn)
		return;

	device_pm_lock();
	list_for_each_entry(dev, &dpm_list, power.entry)
		fn(dev, data);
	device_pm_unlock();
}
EXPORT_SYMBOL_GPL(dpm_for_each_dev);