main.c 33.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/*
 * drivers/base/power/main.c - Where the driver meets power management.
 *
 * Copyright (c) 2003 Patrick Mochel
 * Copyright (c) 2003 Open Source Development Lab
 *
 * This file is released under the GPLv2
 *
 *
 * The driver model core calls device_pm_add() when a device is registered.
11
 * This will initialize the embedded device_pm_info object in the device
L
Linus Torvalds 已提交
12 13 14
 * and add it to the list of power-controlled devices. sysfs entries for
 * controlling device power management will also be added.
 *
15 16 17
 * A separate list is used for keeping track of power info, because the power
 * domain dependencies may differ from the ancestral dependencies that the
 * subsystem list maintains.
L
Linus Torvalds 已提交
18 19 20
 */

#include <linux/device.h>
21
#include <linux/kallsyms.h>
22
#include <linux/export.h>
23
#include <linux/mutex.h>
24
#include <linux/pm.h>
25
#include <linux/pm_runtime.h>
26
#include <linux/resume-trace.h>
27
#include <linux/interrupt.h>
28
#include <linux/sched.h>
29
#include <linux/async.h>
30
#include <linux/suspend.h>
31
#include <linux/cpuidle.h>
32
#include "../base.h"
L
Linus Torvalds 已提交
33 34
#include "power.h"

35 36
typedef int (*pm_callback_t)(struct device *);

37
/*
38
 * The entries in the dpm_list list are in a depth first order, simply
39 40 41
 * because children are guaranteed to be discovered after parents, and
 * are inserted at the back of the list on discovery.
 *
42 43
 * Since device_pm_add() may be called with a device lock held,
 * we must never try to acquire a device lock while holding
44 45 46
 * dpm_list_mutex.
 */

47
LIST_HEAD(dpm_list);
48 49 50 51
static LIST_HEAD(dpm_prepared_list);
static LIST_HEAD(dpm_suspended_list);
static LIST_HEAD(dpm_late_early_list);
static LIST_HEAD(dpm_noirq_list);
L
Linus Torvalds 已提交
52

53
struct suspend_stats suspend_stats;
54
static DEFINE_MUTEX(dpm_list_mtx);
55
static pm_message_t pm_transition;
L
Linus Torvalds 已提交
56

57 58
static int async_error;

59
/**
60
 * device_pm_sleep_init - Initialize system suspend-related device fields.
61 62
 * @dev: Device object being initialized.
 */
63
void device_pm_sleep_init(struct device *dev)
64
{
65
	dev->power.is_prepared = false;
66
	dev->power.is_suspended = false;
67
	init_completion(&dev->power.completion);
68
	complete_all(&dev->power.completion);
69
	dev->power.wakeup = NULL;
70
	INIT_LIST_HEAD(&dev->power.entry);
71 72
}

73
/**
74
 * device_pm_lock - Lock the list of active devices used by the PM core.
75 76 77 78 79 80 81
 */
void device_pm_lock(void)
{
	mutex_lock(&dpm_list_mtx);
}

/**
82
 * device_pm_unlock - Unlock the list of active devices used by the PM core.
83 84 85 86 87
 */
void device_pm_unlock(void)
{
	mutex_unlock(&dpm_list_mtx);
}
88

89
/**
90 91
 * device_pm_add - Add a device to the PM core's list of active devices.
 * @dev: Device to add to the list.
92
 */
93
void device_pm_add(struct device *dev)
L
Linus Torvalds 已提交
94 95
{
	pr_debug("PM: Adding info for %s:%s\n",
96
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
97
	mutex_lock(&dpm_list_mtx);
98
	if (dev->parent && dev->parent->power.is_prepared)
99 100
		dev_warn(dev, "parent %s should not be sleeping\n",
			dev_name(dev->parent));
101
	list_add_tail(&dev->power.entry, &dpm_list);
102
	dev_pm_qos_constraints_init(dev);
103
	mutex_unlock(&dpm_list_mtx);
L
Linus Torvalds 已提交
104 105
}

106
/**
107 108
 * device_pm_remove - Remove a device from the PM core's list of active devices.
 * @dev: Device to be removed from the list.
109
 */
110
void device_pm_remove(struct device *dev)
L
Linus Torvalds 已提交
111 112
{
	pr_debug("PM: Removing info for %s:%s\n",
113
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
114
	complete_all(&dev->power.completion);
115
	mutex_lock(&dpm_list_mtx);
116
	dev_pm_qos_constraints_destroy(dev);
L
Linus Torvalds 已提交
117
	list_del_init(&dev->power.entry);
118
	mutex_unlock(&dpm_list_mtx);
119
	device_wakeup_disable(dev);
120
	pm_runtime_remove(dev);
121 122
}

123
/**
124 125 126
 * device_pm_move_before - Move device in the PM core's list of active devices.
 * @deva: Device to move in dpm_list.
 * @devb: Device @deva should come before.
127 128 129 130
 */
void device_pm_move_before(struct device *deva, struct device *devb)
{
	pr_debug("PM: Moving %s:%s before %s:%s\n",
131 132
		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
133 134 135 136 137
	/* Delete deva from dpm_list and reinsert before devb. */
	list_move_tail(&deva->power.entry, &devb->power.entry);
}

/**
138 139 140
 * device_pm_move_after - Move device in the PM core's list of active devices.
 * @deva: Device to move in dpm_list.
 * @devb: Device @deva should come after.
141 142 143 144
 */
void device_pm_move_after(struct device *deva, struct device *devb)
{
	pr_debug("PM: Moving %s:%s after %s:%s\n",
145 146
		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
147 148 149 150 151
	/* Delete deva from dpm_list and reinsert after devb. */
	list_move(&deva->power.entry, &devb->power.entry);
}

/**
152 153
 * device_pm_move_last - Move device to end of the PM core's list of devices.
 * @dev: Device to move in dpm_list.
154 155 156 157
 */
void device_pm_move_last(struct device *dev)
{
	pr_debug("PM: Moving %s:%s to end of list\n",
158
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
159 160 161
	list_move_tail(&dev->power.entry, &dpm_list);
}

162 163 164 165
static ktime_t initcall_debug_start(struct device *dev)
{
	ktime_t calltime = ktime_set(0, 0);

166
	if (pm_print_times_enabled) {
167 168 169
		pr_info("calling  %s+ @ %i, parent: %s\n",
			dev_name(dev), task_pid_nr(current),
			dev->parent ? dev_name(dev->parent) : "none");
170 171 172 173 174 175 176 177 178 179 180
		calltime = ktime_get();
	}

	return calltime;
}

static void initcall_debug_report(struct device *dev, ktime_t calltime,
				  int error)
{
	ktime_t delta, rettime;

181
	if (pm_print_times_enabled) {
182 183 184 185 186 187 188
		rettime = ktime_get();
		delta = ktime_sub(rettime, calltime);
		pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
			error, (unsigned long long)ktime_to_ns(delta) >> 10);
	}
}

189 190 191 192 193 194 195 196 197 198
/**
 * dpm_wait - Wait for a PM operation to complete.
 * @dev: Device to wait for.
 * @async: If unset, wait only if the device's power.async_suspend flag is set.
 */
static void dpm_wait(struct device *dev, bool async)
{
	if (!dev)
		return;

199
	if (async || (pm_async_enabled && dev->power.async_suspend))
200 201 202 203 204 205 206 207 208 209 210 211 212 213
		wait_for_completion(&dev->power.completion);
}

static int dpm_wait_fn(struct device *dev, void *async_ptr)
{
	dpm_wait(dev, *((bool *)async_ptr));
	return 0;
}

static void dpm_wait_for_children(struct device *dev, bool async)
{
       device_for_each_child(dev, &async, dpm_wait_fn);
}

214
/**
215
 * pm_op - Return the PM operation appropriate for given PM event.
216 217
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
218
 */
219
static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
220 221 222 223
{
	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
224
		return ops->suspend;
225
	case PM_EVENT_RESUME:
226
		return ops->resume;
227
#endif /* CONFIG_SUSPEND */
228
#ifdef CONFIG_HIBERNATE_CALLBACKS
229 230
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
231
		return ops->freeze;
232
	case PM_EVENT_HIBERNATE:
233
		return ops->poweroff;
234 235
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
236
		return ops->thaw;
237 238
		break;
	case PM_EVENT_RESTORE:
239
		return ops->restore;
240
#endif /* CONFIG_HIBERNATE_CALLBACKS */
241
	}
242

243
	return NULL;
244 245
}

246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279
/**
 * pm_late_early_op - Return the PM operation appropriate for given PM event.
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
 *
 * Runtime PM is disabled for @dev while this function is being executed.
 */
static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
				      pm_message_t state)
{
	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
		return ops->suspend_late;
	case PM_EVENT_RESUME:
		return ops->resume_early;
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATE_CALLBACKS
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		return ops->freeze_late;
	case PM_EVENT_HIBERNATE:
		return ops->poweroff_late;
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
		return ops->thaw_early;
	case PM_EVENT_RESTORE:
		return ops->restore_early;
#endif /* CONFIG_HIBERNATE_CALLBACKS */
	}

	return NULL;
}

280
/**
281
 * pm_noirq_op - Return the PM operation appropriate for given PM event.
282 283
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
284
 *
285 286
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
287
 */
288
static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
289 290 291 292
{
	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
293
		return ops->suspend_noirq;
294
	case PM_EVENT_RESUME:
295
		return ops->resume_noirq;
296
#endif /* CONFIG_SUSPEND */
297
#ifdef CONFIG_HIBERNATE_CALLBACKS
298 299
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
300
		return ops->freeze_noirq;
301
	case PM_EVENT_HIBERNATE:
302
		return ops->poweroff_noirq;
303 304
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
305
		return ops->thaw_noirq;
306
	case PM_EVENT_RESTORE:
307
		return ops->restore_noirq;
308
#endif /* CONFIG_HIBERNATE_CALLBACKS */
309
	}
310

311
	return NULL;
312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348
}

static char *pm_verb(int event)
{
	switch (event) {
	case PM_EVENT_SUSPEND:
		return "suspend";
	case PM_EVENT_RESUME:
		return "resume";
	case PM_EVENT_FREEZE:
		return "freeze";
	case PM_EVENT_QUIESCE:
		return "quiesce";
	case PM_EVENT_HIBERNATE:
		return "hibernate";
	case PM_EVENT_THAW:
		return "thaw";
	case PM_EVENT_RESTORE:
		return "restore";
	case PM_EVENT_RECOVER:
		return "recover";
	default:
		return "(unknown PM event)";
	}
}

static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
{
	dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
		", may wakeup" : "");
}

static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
			int error)
{
	printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
349
		dev_name(dev), pm_verb(state.event), info, error);
350 351
}

352 353 354
static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
{
	ktime_t calltime;
355
	u64 usecs64;
356 357 358 359 360 361 362 363 364 365 366 367 368
	int usecs;

	calltime = ktime_get();
	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
	do_div(usecs64, NSEC_PER_USEC);
	usecs = usecs64;
	if (usecs == 0)
		usecs = 1;
	pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
		info ?: "", info ? " " : "", pm_verb(state.event),
		usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
}

369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388
static int dpm_run_callback(pm_callback_t cb, struct device *dev,
			    pm_message_t state, char *info)
{
	ktime_t calltime;
	int error;

	if (!cb)
		return 0;

	calltime = initcall_debug_start(dev);

	pm_dev_dbg(dev, state, info);
	error = cb(dev);
	suspend_report_result(cb, error);

	initcall_debug_report(dev, calltime, error);

	return error;
}

389 390 391
/*------------------------- Resume routines -------------------------*/

/**
392 393 394
 * device_resume_noirq - Execute an "early resume" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
395
 *
396 397
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
398
 */
399
static int device_resume_noirq(struct device *dev, pm_message_t state)
400
{
401 402
	pm_callback_t callback = NULL;
	char *info = NULL;
403 404 405 406 407
	int error = 0;

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);

408 409 410
	if (dev->power.syscore)
		goto Out;

411
	if (dev->pm_domain) {
412
		info = "noirq power domain ";
413
		callback = pm_noirq_op(&dev->pm_domain->ops, state);
414
	} else if (dev->type && dev->type->pm) {
415
		info = "noirq type ";
416
		callback = pm_noirq_op(dev->type->pm, state);
417
	} else if (dev->class && dev->class->pm) {
418
		info = "noirq class ";
419
		callback = pm_noirq_op(dev->class->pm, state);
420
	} else if (dev->bus && dev->bus->pm) {
421
		info = "noirq bus ";
422
		callback = pm_noirq_op(dev->bus->pm, state);
423 424
	}

425
	if (!callback && dev->driver && dev->driver->pm) {
426
		info = "noirq driver ";
427 428 429
		callback = pm_noirq_op(dev->driver->pm, state);
	}

430 431
	error = dpm_run_callback(callback, dev, state, info);

432
 Out:
433 434 435 436 437
	TRACE_RESUME(error);
	return error;
}

/**
438
 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
439
 * @state: PM transition of the system being carried out.
440
 *
441
 * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
442
 * enable device drivers to receive interrupts.
443
 */
444
static void dpm_resume_noirq(pm_message_t state)
445
{
446
	ktime_t starttime = ktime_get();
447

448
	mutex_lock(&dpm_list_mtx);
449 450
	while (!list_empty(&dpm_noirq_list)) {
		struct device *dev = to_device(dpm_noirq_list.next);
451
		int error;
452 453

		get_device(dev);
454
		list_move_tail(&dev->power.entry, &dpm_late_early_list);
455
		mutex_unlock(&dpm_list_mtx);
456

457
		error = device_resume_noirq(dev, state);
458 459 460 461
		if (error) {
			suspend_stats.failed_resume_noirq++;
			dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
			dpm_save_failed_dev(dev_name(dev));
462 463 464 465 466 467 468 469 470
			pm_dev_err(dev, state, " noirq", error);
		}

		mutex_lock(&dpm_list_mtx);
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
	dpm_show_time(starttime, state, "noirq");
	resume_device_irqs();
471
	cpuidle_resume();
472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489
}

/**
 * device_resume_early - Execute an "early resume" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
 *
 * Runtime PM is disabled for @dev while this function is being executed.
 */
static int device_resume_early(struct device *dev, pm_message_t state)
{
	pm_callback_t callback = NULL;
	char *info = NULL;
	int error = 0;

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);

490 491 492
	if (dev->power.syscore)
		goto Out;

493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513
	if (dev->pm_domain) {
		info = "early power domain ";
		callback = pm_late_early_op(&dev->pm_domain->ops, state);
	} else if (dev->type && dev->type->pm) {
		info = "early type ";
		callback = pm_late_early_op(dev->type->pm, state);
	} else if (dev->class && dev->class->pm) {
		info = "early class ";
		callback = pm_late_early_op(dev->class->pm, state);
	} else if (dev->bus && dev->bus->pm) {
		info = "early bus ";
		callback = pm_late_early_op(dev->bus->pm, state);
	}

	if (!callback && dev->driver && dev->driver->pm) {
		info = "early driver ";
		callback = pm_late_early_op(dev->driver->pm, state);
	}

	error = dpm_run_callback(callback, dev, state, info);

514
 Out:
515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540
	TRACE_RESUME(error);
	return error;
}

/**
 * dpm_resume_early - Execute "early resume" callbacks for all devices.
 * @state: PM transition of the system being carried out.
 */
static void dpm_resume_early(pm_message_t state)
{
	ktime_t starttime = ktime_get();

	mutex_lock(&dpm_list_mtx);
	while (!list_empty(&dpm_late_early_list)) {
		struct device *dev = to_device(dpm_late_early_list.next);
		int error;

		get_device(dev);
		list_move_tail(&dev->power.entry, &dpm_suspended_list);
		mutex_unlock(&dpm_list_mtx);

		error = device_resume_early(dev, state);
		if (error) {
			suspend_stats.failed_resume_early++;
			dpm_save_failed_step(SUSPEND_RESUME_EARLY);
			dpm_save_failed_dev(dev_name(dev));
541
			pm_dev_err(dev, state, " early", error);
542
		}
543

544
		mutex_lock(&dpm_list_mtx);
545 546
		put_device(dev);
	}
547
	mutex_unlock(&dpm_list_mtx);
548
	dpm_show_time(starttime, state, "early");
549
}
550 551 552 553 554 555 556 557 558 559 560

/**
 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
 * @state: PM transition of the system being carried out.
 */
void dpm_resume_start(pm_message_t state)
{
	dpm_resume_noirq(state);
	dpm_resume_early(state);
}
EXPORT_SYMBOL_GPL(dpm_resume_start);
561 562

/**
563
 * device_resume - Execute "resume" callbacks for given device.
564 565
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
566
 * @async: If true, the device is being resumed asynchronously.
567
 */
568
static int device_resume(struct device *dev, pm_message_t state, bool async)
569
{
570 571
	pm_callback_t callback = NULL;
	char *info = NULL;
572 573 574 575
	int error = 0;

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);
576

577 578 579
	if (dev->power.syscore)
		goto Complete;

580
	dpm_wait(dev->parent, async);
581
	device_lock(dev);
582

583 584 585 586 587
	/*
	 * This is a fib.  But we'll allow new children to be added below
	 * a resumed device, even if the device hasn't been completed yet.
	 */
	dev->power.is_prepared = false;
588

589 590 591
	if (!dev->power.is_suspended)
		goto Unlock;

592 593
	pm_runtime_enable(dev);

594
	if (dev->pm_domain) {
595 596
		info = "power domain ";
		callback = pm_op(&dev->pm_domain->ops, state);
597
		goto Driver;
598 599
	}

600
	if (dev->type && dev->type->pm) {
601 602
		info = "type ";
		callback = pm_op(dev->type->pm, state);
603
		goto Driver;
604 605
	}

606 607
	if (dev->class) {
		if (dev->class->pm) {
608 609
			info = "class ";
			callback = pm_op(dev->class->pm, state);
610
			goto Driver;
611
		} else if (dev->class->resume) {
612 613
			info = "legacy class ";
			callback = dev->class->resume;
614
			goto End;
615
		}
616
	}
617 618 619

	if (dev->bus) {
		if (dev->bus->pm) {
620
			info = "bus ";
621
			callback = pm_op(dev->bus->pm, state);
622
		} else if (dev->bus->resume) {
623
			info = "legacy bus ";
624
			callback = dev->bus->resume;
625
			goto End;
626 627 628
		}
	}

629 630 631 632 633 634
 Driver:
	if (!callback && dev->driver && dev->driver->pm) {
		info = "driver ";
		callback = pm_op(dev->driver->pm, state);
	}

635
 End:
636
	error = dpm_run_callback(callback, dev, state, info);
637 638 639
	dev->power.is_suspended = false;

 Unlock:
640
	device_unlock(dev);
641 642

 Complete:
643
	complete_all(&dev->power.completion);
644

645
	TRACE_RESUME(error);
646

647 648 649
	return error;
}

650 651 652 653 654
static void async_resume(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

655
	error = device_resume(dev, pm_transition, true);
656 657 658 659 660
	if (error)
		pm_dev_err(dev, pm_transition, " async", error);
	put_device(dev);
}

661
static bool is_async(struct device *dev)
662
{
663 664
	return dev->power.async_suspend && pm_async_enabled
		&& !pm_trace_is_enabled();
665 666
}

667
/**
668 669
 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
 * @state: PM transition of the system being carried out.
670
 *
671 672
 * Execute the appropriate "resume" callback for all devices whose status
 * indicates that they are suspended.
673
 */
674
void dpm_resume(pm_message_t state)
675
{
676
	struct device *dev;
677
	ktime_t starttime = ktime_get();
678

679 680
	might_sleep();

681
	mutex_lock(&dpm_list_mtx);
682
	pm_transition = state;
683
	async_error = 0;
684

685
	list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
686 687 688 689 690 691 692
		INIT_COMPLETION(dev->power.completion);
		if (is_async(dev)) {
			get_device(dev);
			async_schedule(async_resume, dev);
		}
	}

693 694
	while (!list_empty(&dpm_suspended_list)) {
		dev = to_device(dpm_suspended_list.next);
695
		get_device(dev);
696
		if (!is_async(dev)) {
697 698 699 700
			int error;

			mutex_unlock(&dpm_list_mtx);

701
			error = device_resume(dev, state, false);
702 703 704 705
			if (error) {
				suspend_stats.failed_resume++;
				dpm_save_failed_step(SUSPEND_RESUME);
				dpm_save_failed_dev(dev_name(dev));
706
				pm_dev_err(dev, state, "", error);
707
			}
708 709

			mutex_lock(&dpm_list_mtx);
710 711
		}
		if (!list_empty(&dev->power.entry))
712
			list_move_tail(&dev->power.entry, &dpm_prepared_list);
713 714 715
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
716
	async_synchronize_full();
717
	dpm_show_time(starttime, state, NULL);
718 719 720
}

/**
721 722 723
 * device_complete - Complete a PM transition for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
724
 */
725
static void device_complete(struct device *dev, pm_message_t state)
726
{
727 728 729
	void (*callback)(struct device *) = NULL;
	char *info = NULL;

730 731 732
	if (dev->power.syscore)
		return;

733
	device_lock(dev);
734

735
	if (dev->pm_domain) {
736 737
		info = "completing power domain ";
		callback = dev->pm_domain->ops.complete;
738
	} else if (dev->type && dev->type->pm) {
739 740
		info = "completing type ";
		callback = dev->type->pm->complete;
741
	} else if (dev->class && dev->class->pm) {
742 743
		info = "completing class ";
		callback = dev->class->pm->complete;
744
	} else if (dev->bus && dev->bus->pm) {
745 746 747 748 749 750 751 752 753 754 755 756
		info = "completing bus ";
		callback = dev->bus->pm->complete;
	}

	if (!callback && dev->driver && dev->driver->pm) {
		info = "completing driver ";
		callback = dev->driver->pm->complete;
	}

	if (callback) {
		pm_dev_dbg(dev, state, info);
		callback(dev);
757 758
	}

759
	device_unlock(dev);
760 761

	pm_runtime_put_sync(dev);
762 763 764
}

/**
765 766
 * dpm_complete - Complete a PM transition for all non-sysdev devices.
 * @state: PM transition of the system being carried out.
767
 *
768 769
 * Execute the ->complete() callbacks for all devices whose PM status is not
 * DPM_ON (this allows new devices to be registered).
770
 */
771
void dpm_complete(pm_message_t state)
772
{
773 774
	struct list_head list;

775 776
	might_sleep();

777
	INIT_LIST_HEAD(&list);
778
	mutex_lock(&dpm_list_mtx);
779 780
	while (!list_empty(&dpm_prepared_list)) {
		struct device *dev = to_device(dpm_prepared_list.prev);
781

782
		get_device(dev);
783
		dev->power.is_prepared = false;
784 785
		list_move(&dev->power.entry, &list);
		mutex_unlock(&dpm_list_mtx);
786

787
		device_complete(dev, state);
788

789
		mutex_lock(&dpm_list_mtx);
790
		put_device(dev);
791
	}
792
	list_splice(&list, &dpm_list);
793 794 795 796
	mutex_unlock(&dpm_list_mtx);
}

/**
797 798
 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
 * @state: PM transition of the system being carried out.
799
 *
800 801
 * Execute "resume" callbacks for all devices and complete the PM transition of
 * the system.
802
 */
803
void dpm_resume_end(pm_message_t state)
804
{
805 806
	dpm_resume(state);
	dpm_complete(state);
807
}
808
EXPORT_SYMBOL_GPL(dpm_resume_end);
809 810 811 812


/*------------------------- Suspend routines -------------------------*/

813
/**
814 815 816 817 818
 * resume_event - Return a "resume" message for given "suspend" sleep state.
 * @sleep_state: PM message representing a sleep state.
 *
 * Return a PM message representing the resume event corresponding to given
 * sleep state.
819 820
 */
static pm_message_t resume_event(pm_message_t sleep_state)
821
{
822 823 824 825 826 827 828 829
	switch (sleep_state.event) {
	case PM_EVENT_SUSPEND:
		return PMSG_RESUME;
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		return PMSG_RECOVER;
	case PM_EVENT_HIBERNATE:
		return PMSG_RESTORE;
830
	}
831
	return PMSG_ON;
832 833 834
}

/**
835 836 837
 * device_suspend_noirq - Execute a "late suspend" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
838
 *
839 840
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
841
 */
842
static int device_suspend_noirq(struct device *dev, pm_message_t state)
843
{
844 845
	pm_callback_t callback = NULL;
	char *info = NULL;
846

847 848 849
	if (dev->power.syscore)
		return 0;

850
	if (dev->pm_domain) {
851
		info = "noirq power domain ";
852
		callback = pm_noirq_op(&dev->pm_domain->ops, state);
853
	} else if (dev->type && dev->type->pm) {
854
		info = "noirq type ";
855
		callback = pm_noirq_op(dev->type->pm, state);
856
	} else if (dev->class && dev->class->pm) {
857
		info = "noirq class ";
858
		callback = pm_noirq_op(dev->class->pm, state);
859
	} else if (dev->bus && dev->bus->pm) {
860
		info = "noirq bus ";
861
		callback = pm_noirq_op(dev->bus->pm, state);
862 863
	}

864
	if (!callback && dev->driver && dev->driver->pm) {
865
		info = "noirq driver ";
866 867 868
		callback = pm_noirq_op(dev->driver->pm, state);
	}

869
	return dpm_run_callback(callback, dev, state, info);
870 871 872
}

/**
873
 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
874
 * @state: PM transition of the system being carried out.
875
 *
876 877
 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
 * handlers for all non-sysdev devices.
878
 */
879
static int dpm_suspend_noirq(pm_message_t state)
880
{
881
	ktime_t starttime = ktime_get();
882 883
	int error = 0;

884
	cpuidle_pause();
885
	suspend_device_irqs();
886
	mutex_lock(&dpm_list_mtx);
887 888
	while (!list_empty(&dpm_late_early_list)) {
		struct device *dev = to_device(dpm_late_early_list.prev);
889 890 891 892

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

893
		error = device_suspend_noirq(dev, state);
894 895

		mutex_lock(&dpm_list_mtx);
896
		if (error) {
897
			pm_dev_err(dev, state, " noirq", error);
898 899 900
			suspend_stats.failed_suspend_noirq++;
			dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
			dpm_save_failed_dev(dev_name(dev));
901
			put_device(dev);
902 903
			break;
		}
904
		if (!list_empty(&dev->power.entry))
905
			list_move(&dev->power.entry, &dpm_noirq_list);
906
		put_device(dev);
907 908 909 910 911

		if (pm_wakeup_pending()) {
			error = -EBUSY;
			break;
		}
912
	}
913
	mutex_unlock(&dpm_list_mtx);
914
	if (error)
915
		dpm_resume_noirq(resume_event(state));
916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932
	else
		dpm_show_time(starttime, state, "noirq");
	return error;
}

/**
 * device_suspend_late - Execute a "late suspend" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
 *
 * Runtime PM is disabled for @dev while this function is being executed.
 */
static int device_suspend_late(struct device *dev, pm_message_t state)
{
	pm_callback_t callback = NULL;
	char *info = NULL;

933 934 935
	if (dev->power.syscore)
		return 0;

936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987
	if (dev->pm_domain) {
		info = "late power domain ";
		callback = pm_late_early_op(&dev->pm_domain->ops, state);
	} else if (dev->type && dev->type->pm) {
		info = "late type ";
		callback = pm_late_early_op(dev->type->pm, state);
	} else if (dev->class && dev->class->pm) {
		info = "late class ";
		callback = pm_late_early_op(dev->class->pm, state);
	} else if (dev->bus && dev->bus->pm) {
		info = "late bus ";
		callback = pm_late_early_op(dev->bus->pm, state);
	}

	if (!callback && dev->driver && dev->driver->pm) {
		info = "late driver ";
		callback = pm_late_early_op(dev->driver->pm, state);
	}

	return dpm_run_callback(callback, dev, state, info);
}

/**
 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
 * @state: PM transition of the system being carried out.
 */
static int dpm_suspend_late(pm_message_t state)
{
	ktime_t starttime = ktime_get();
	int error = 0;

	mutex_lock(&dpm_list_mtx);
	while (!list_empty(&dpm_suspended_list)) {
		struct device *dev = to_device(dpm_suspended_list.prev);

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

		error = device_suspend_late(dev, state);

		mutex_lock(&dpm_list_mtx);
		if (error) {
			pm_dev_err(dev, state, " late", error);
			suspend_stats.failed_suspend_late++;
			dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
			dpm_save_failed_dev(dev_name(dev));
			put_device(dev);
			break;
		}
		if (!list_empty(&dev->power.entry))
			list_move(&dev->power.entry, &dpm_late_early_list);
		put_device(dev);
988 989 990 991 992

		if (pm_wakeup_pending()) {
			error = -EBUSY;
			break;
		}
993 994 995 996
	}
	mutex_unlock(&dpm_list_mtx);
	if (error)
		dpm_resume_early(resume_event(state));
997 998
	else
		dpm_show_time(starttime, state, "late");
999

1000 1001
	return error;
}
1002 1003 1004 1005 1006 1007 1008 1009

/**
 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
 * @state: PM transition of the system being carried out.
 */
int dpm_suspend_end(pm_message_t state)
{
	int error = dpm_suspend_late(state);
1010 1011 1012 1013 1014
	if (error)
		return error;

	error = dpm_suspend_noirq(state);
	if (error) {
1015
		dpm_resume_early(resume_event(state));
1016 1017
		return error;
	}
1018

1019
	return 0;
1020 1021
}
EXPORT_SYMBOL_GPL(dpm_suspend_end);
1022

1023 1024
/**
 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
R
Randy Dunlap 已提交
1025 1026 1027
 * @dev: Device to suspend.
 * @state: PM transition of the system being carried out.
 * @cb: Suspend callback to execute.
1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044
 */
static int legacy_suspend(struct device *dev, pm_message_t state,
			  int (*cb)(struct device *dev, pm_message_t state))
{
	int error;
	ktime_t calltime;

	calltime = initcall_debug_start(dev);

	error = cb(dev, state);
	suspend_report_result(cb, error);

	initcall_debug_report(dev, calltime, error);

	return error;
}

1045
/**
1046 1047 1048
 * device_suspend - Execute "suspend" callbacks for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
1049
 * @async: If true, the device is being suspended asynchronously.
1050
 */
1051
static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1052
{
1053 1054
	pm_callback_t callback = NULL;
	char *info = NULL;
1055 1056
	int error = 0;

1057
	dpm_wait_for_children(dev, async);
1058

1059
	if (async_error)
1060
		goto Complete;
1061

1062 1063 1064 1065 1066 1067
	/*
	 * If a device configured to wake up the system from sleep states
	 * has been suspended at run time and there's a resume request pending
	 * for it, this is equivalent to the device signaling wakeup, so the
	 * system suspend operation should be aborted.
	 */
1068 1069
	if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
		pm_wakeup_event(dev, 0);
1070

1071 1072
	if (pm_wakeup_pending()) {
		async_error = -EBUSY;
1073
		goto Complete;
1074 1075
	}

1076 1077 1078
	if (dev->power.syscore)
		goto Complete;

1079 1080
	device_lock(dev);

1081
	if (dev->pm_domain) {
1082 1083 1084
		info = "power domain ";
		callback = pm_op(&dev->pm_domain->ops, state);
		goto Run;
1085 1086
	}

1087
	if (dev->type && dev->type->pm) {
1088 1089 1090
		info = "type ";
		callback = pm_op(dev->type->pm, state);
		goto Run;
1091 1092
	}

1093 1094
	if (dev->class) {
		if (dev->class->pm) {
1095 1096 1097
			info = "class ";
			callback = pm_op(dev->class->pm, state);
			goto Run;
1098 1099
		} else if (dev->class->suspend) {
			pm_dev_dbg(dev, state, "legacy class ");
1100
			error = legacy_suspend(dev, state, dev->class->suspend);
1101
			goto End;
1102
		}
1103 1104
	}

1105 1106
	if (dev->bus) {
		if (dev->bus->pm) {
1107
			info = "bus ";
1108
			callback = pm_op(dev->bus->pm, state);
1109
		} else if (dev->bus->suspend) {
1110
			pm_dev_dbg(dev, state, "legacy bus ");
1111
			error = legacy_suspend(dev, state, dev->bus->suspend);
1112
			goto End;
1113
		}
1114 1115
	}

1116
 Run:
1117 1118 1119 1120 1121
	if (!callback && dev->driver && dev->driver->pm) {
		info = "driver ";
		callback = pm_op(dev->driver->pm, state);
	}

1122 1123
	error = dpm_run_callback(callback, dev, state, info);

1124
 End:
1125 1126
	if (!error) {
		dev->power.is_suspended = true;
1127 1128
		if (dev->power.wakeup_path
		    && dev->parent && !dev->parent->power.ignore_children)
1129 1130
			dev->parent->power.wakeup_path = true;
	}
1131

1132
	device_unlock(dev);
1133 1134

 Complete:
1135
	complete_all(&dev->power.completion);
1136

1137
	if (error)
1138
		async_error = error;
1139
	else if (dev->power.is_suspended)
1140
		__pm_runtime_disable(dev, false);
1141

1142 1143 1144
	return error;
}

1145 1146 1147 1148 1149 1150
static void async_suspend(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = __device_suspend(dev, pm_transition, true);
1151 1152
	if (error) {
		dpm_save_failed_dev(dev_name(dev));
1153
		pm_dev_err(dev, pm_transition, " async", error);
1154
	}
1155 1156 1157 1158 1159 1160 1161 1162

	put_device(dev);
}

static int device_suspend(struct device *dev)
{
	INIT_COMPLETION(dev->power.completion);

1163
	if (pm_async_enabled && dev->power.async_suspend) {
1164 1165 1166 1167 1168 1169 1170 1171
		get_device(dev);
		async_schedule(async_suspend, dev);
		return 0;
	}

	return __device_suspend(dev, pm_transition, false);
}

1172
/**
1173 1174
 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
 * @state: PM transition of the system being carried out.
1175
 */
1176
int dpm_suspend(pm_message_t state)
1177
{
1178
	ktime_t starttime = ktime_get();
1179 1180
	int error = 0;

1181 1182
	might_sleep();

1183
	mutex_lock(&dpm_list_mtx);
1184 1185
	pm_transition = state;
	async_error = 0;
1186 1187
	while (!list_empty(&dpm_prepared_list)) {
		struct device *dev = to_device(dpm_prepared_list.prev);
1188

1189
		get_device(dev);
1190
		mutex_unlock(&dpm_list_mtx);
1191

1192
		error = device_suspend(dev);
1193

1194
		mutex_lock(&dpm_list_mtx);
1195
		if (error) {
1196
			pm_dev_err(dev, state, "", error);
1197
			dpm_save_failed_dev(dev_name(dev));
1198
			put_device(dev);
1199 1200
			break;
		}
1201
		if (!list_empty(&dev->power.entry))
1202
			list_move(&dev->power.entry, &dpm_suspended_list);
1203
		put_device(dev);
1204 1205
		if (async_error)
			break;
1206 1207
	}
	mutex_unlock(&dpm_list_mtx);
1208 1209 1210
	async_synchronize_full();
	if (!error)
		error = async_error;
1211 1212 1213 1214
	if (error) {
		suspend_stats.failed_suspend++;
		dpm_save_failed_step(SUSPEND_SUSPEND);
	} else
1215
		dpm_show_time(starttime, state, NULL);
1216 1217 1218 1219
	return error;
}

/**
1220 1221 1222 1223 1224 1225
 * device_prepare - Prepare a device for system power transition.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
 *
 * Execute the ->prepare() callback(s) for given device.  No new children of the
 * device may be registered after this function has returned.
1226
 */
1227
static int device_prepare(struct device *dev, pm_message_t state)
1228
{
1229 1230
	int (*callback)(struct device *) = NULL;
	char *info = NULL;
1231 1232
	int error = 0;

1233 1234 1235
	if (dev->power.syscore)
		return 0;

1236 1237 1238 1239 1240 1241 1242 1243
	/*
	 * If a device's parent goes into runtime suspend at the wrong time,
	 * it won't be possible to resume the device.  To prevent this we
	 * block runtime suspend here, during the prepare phase, and allow
	 * it again during the complete phase.
	 */
	pm_runtime_get_noresume(dev);

1244
	device_lock(dev);
1245

1246 1247
	dev->power.wakeup_path = device_may_wakeup(dev);

1248
	if (dev->pm_domain) {
1249 1250
		info = "preparing power domain ";
		callback = dev->pm_domain->ops.prepare;
1251
	} else if (dev->type && dev->type->pm) {
1252 1253
		info = "preparing type ";
		callback = dev->type->pm->prepare;
1254
	} else if (dev->class && dev->class->pm) {
1255 1256
		info = "preparing class ";
		callback = dev->class->pm->prepare;
1257
	} else if (dev->bus && dev->bus->pm) {
1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269
		info = "preparing bus ";
		callback = dev->bus->pm->prepare;
	}

	if (!callback && dev->driver && dev->driver->pm) {
		info = "preparing driver ";
		callback = dev->driver->pm->prepare;
	}

	if (callback) {
		error = callback(dev);
		suspend_report_result(callback, error);
1270
	}
1271

1272
	device_unlock(dev);
1273 1274 1275

	return error;
}
1276

1277
/**
1278 1279
 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
 * @state: PM transition of the system being carried out.
1280
 *
1281
 * Execute the ->prepare() callback(s) for all devices.
1282
 */
1283
int dpm_prepare(pm_message_t state)
1284 1285 1286
{
	int error = 0;

1287 1288
	might_sleep();

1289 1290 1291 1292 1293 1294 1295
	mutex_lock(&dpm_list_mtx);
	while (!list_empty(&dpm_list)) {
		struct device *dev = to_device(dpm_list.next);

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

1296
		error = device_prepare(dev, state);
1297 1298 1299 1300 1301

		mutex_lock(&dpm_list_mtx);
		if (error) {
			if (error == -EAGAIN) {
				put_device(dev);
S
Sebastian Ott 已提交
1302
				error = 0;
1303 1304
				continue;
			}
1305 1306
			printk(KERN_INFO "PM: Device %s not prepared "
				"for power transition: code %d\n",
1307
				dev_name(dev), error);
1308 1309 1310
			put_device(dev);
			break;
		}
1311
		dev->power.is_prepared = true;
1312
		if (!list_empty(&dev->power.entry))
1313
			list_move_tail(&dev->power.entry, &dpm_prepared_list);
1314 1315 1316
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
1317 1318 1319
	return error;
}

1320
/**
1321 1322
 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
 * @state: PM transition of the system being carried out.
1323
 *
1324 1325
 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
 * callbacks for them.
1326
 */
1327
int dpm_suspend_start(pm_message_t state)
1328 1329
{
	int error;
1330

1331
	error = dpm_prepare(state);
1332 1333 1334 1335
	if (error) {
		suspend_stats.failed_prepare++;
		dpm_save_failed_step(SUSPEND_PREPARE);
	} else
1336
		error = dpm_suspend(state);
1337 1338
	return error;
}
1339
EXPORT_SYMBOL_GPL(dpm_suspend_start);
1340 1341 1342

void __suspend_report_result(const char *function, void *fn, int ret)
{
1343 1344
	if (ret)
		printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1345 1346
}
EXPORT_SYMBOL_GPL(__suspend_report_result);
1347 1348 1349 1350 1351 1352

/**
 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
 * @dev: Device to wait for.
 * @subordinate: Device that needs to wait for @dev.
 */
1353
int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1354 1355
{
	dpm_wait(dev, subordinate->power.async_suspend);
1356
	return async_error;
1357 1358
}
EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380

/**
 * dpm_for_each_dev - device iterator.
 * @data: data for the callback.
 * @fn: function to be called for each device.
 *
 * Iterate over devices in dpm_list, and call @fn for each device,
 * passing it @data.
 */
void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
{
	struct device *dev;

	if (!fn)
		return;

	device_pm_lock();
	list_for_each_entry(dev, &dpm_list, power.entry)
		fn(dev, data);
	device_pm_unlock();
}
EXPORT_SYMBOL_GPL(dpm_for_each_dev);