main.c 33.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/*
 * drivers/base/power/main.c - Where the driver meets power management.
 *
 * Copyright (c) 2003 Patrick Mochel
 * Copyright (c) 2003 Open Source Development Lab
 *
 * This file is released under the GPLv2
 *
 *
 * The driver model core calls device_pm_add() when a device is registered.
11
 * This will initialize the embedded device_pm_info object in the device
L
Linus Torvalds 已提交
12 13 14
 * and add it to the list of power-controlled devices. sysfs entries for
 * controlling device power management will also be added.
 *
15 16 17
 * A separate list is used for keeping track of power info, because the power
 * domain dependencies may differ from the ancestral dependencies that the
 * subsystem list maintains.
L
Linus Torvalds 已提交
18 19 20
 */

#include <linux/device.h>
21
#include <linux/kallsyms.h>
22
#include <linux/export.h>
23
#include <linux/mutex.h>
24
#include <linux/pm.h>
25
#include <linux/pm_runtime.h>
26
#include <linux/resume-trace.h>
27
#include <linux/interrupt.h>
28
#include <linux/sched.h>
29
#include <linux/async.h>
30
#include <linux/suspend.h>
31
#include <linux/cpuidle.h>
32
#include "../base.h"
L
Linus Torvalds 已提交
33 34
#include "power.h"

35 36
typedef int (*pm_callback_t)(struct device *);

37
/*
38
 * The entries in the dpm_list list are in a depth first order, simply
39 40 41
 * because children are guaranteed to be discovered after parents, and
 * are inserted at the back of the list on discovery.
 *
42 43
 * Since device_pm_add() may be called with a device lock held,
 * we must never try to acquire a device lock while holding
44 45 46
 * dpm_list_mutex.
 */

47
LIST_HEAD(dpm_list);
48 49 50 51
static LIST_HEAD(dpm_prepared_list);
static LIST_HEAD(dpm_suspended_list);
static LIST_HEAD(dpm_late_early_list);
static LIST_HEAD(dpm_noirq_list);
L
Linus Torvalds 已提交
52

53
struct suspend_stats suspend_stats;
54
static DEFINE_MUTEX(dpm_list_mtx);
55
static pm_message_t pm_transition;
L
Linus Torvalds 已提交
56

57 58
static int async_error;

59
/**
60
 * device_pm_sleep_init - Initialize system suspend-related device fields.
61 62
 * @dev: Device object being initialized.
 */
63
void device_pm_sleep_init(struct device *dev)
64
{
65
	dev->power.is_prepared = false;
66
	dev->power.is_suspended = false;
67
	init_completion(&dev->power.completion);
68
	complete_all(&dev->power.completion);
69
	dev->power.wakeup = NULL;
70
	INIT_LIST_HEAD(&dev->power.entry);
71 72
}

73
/**
74
 * device_pm_lock - Lock the list of active devices used by the PM core.
75 76 77 78 79 80 81
 */
void device_pm_lock(void)
{
	mutex_lock(&dpm_list_mtx);
}

/**
82
 * device_pm_unlock - Unlock the list of active devices used by the PM core.
83 84 85 86 87
 */
void device_pm_unlock(void)
{
	mutex_unlock(&dpm_list_mtx);
}
88

89
/**
90 91
 * device_pm_add - Add a device to the PM core's list of active devices.
 * @dev: Device to add to the list.
92
 */
93
void device_pm_add(struct device *dev)
L
Linus Torvalds 已提交
94 95
{
	pr_debug("PM: Adding info for %s:%s\n",
96
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
97
	mutex_lock(&dpm_list_mtx);
98
	if (dev->parent && dev->parent->power.is_prepared)
99 100
		dev_warn(dev, "parent %s should not be sleeping\n",
			dev_name(dev->parent));
101
	list_add_tail(&dev->power.entry, &dpm_list);
102
	mutex_unlock(&dpm_list_mtx);
L
Linus Torvalds 已提交
103 104
}

105
/**
106 107
 * device_pm_remove - Remove a device from the PM core's list of active devices.
 * @dev: Device to be removed from the list.
108
 */
109
void device_pm_remove(struct device *dev)
L
Linus Torvalds 已提交
110 111
{
	pr_debug("PM: Removing info for %s:%s\n",
112
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
113
	complete_all(&dev->power.completion);
114
	mutex_lock(&dpm_list_mtx);
L
Linus Torvalds 已提交
115
	list_del_init(&dev->power.entry);
116
	mutex_unlock(&dpm_list_mtx);
117
	device_wakeup_disable(dev);
118
	pm_runtime_remove(dev);
119 120
}

121
/**
122 123 124
 * device_pm_move_before - Move device in the PM core's list of active devices.
 * @deva: Device to move in dpm_list.
 * @devb: Device @deva should come before.
125 126 127 128
 */
void device_pm_move_before(struct device *deva, struct device *devb)
{
	pr_debug("PM: Moving %s:%s before %s:%s\n",
129 130
		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
131 132 133 134 135
	/* Delete deva from dpm_list and reinsert before devb. */
	list_move_tail(&deva->power.entry, &devb->power.entry);
}

/**
136 137 138
 * device_pm_move_after - Move device in the PM core's list of active devices.
 * @deva: Device to move in dpm_list.
 * @devb: Device @deva should come after.
139 140 141 142
 */
void device_pm_move_after(struct device *deva, struct device *devb)
{
	pr_debug("PM: Moving %s:%s after %s:%s\n",
143 144
		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
145 146 147 148 149
	/* Delete deva from dpm_list and reinsert after devb. */
	list_move(&deva->power.entry, &devb->power.entry);
}

/**
150 151
 * device_pm_move_last - Move device to end of the PM core's list of devices.
 * @dev: Device to move in dpm_list.
152 153 154 155
 */
void device_pm_move_last(struct device *dev)
{
	pr_debug("PM: Moving %s:%s to end of list\n",
156
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
157 158 159
	list_move_tail(&dev->power.entry, &dpm_list);
}

160 161 162 163
static ktime_t initcall_debug_start(struct device *dev)
{
	ktime_t calltime = ktime_set(0, 0);

164
	if (pm_print_times_enabled) {
165 166 167
		pr_info("calling  %s+ @ %i, parent: %s\n",
			dev_name(dev), task_pid_nr(current),
			dev->parent ? dev_name(dev->parent) : "none");
168 169 170 171 172 173 174 175 176 177 178
		calltime = ktime_get();
	}

	return calltime;
}

static void initcall_debug_report(struct device *dev, ktime_t calltime,
				  int error)
{
	ktime_t delta, rettime;

179
	if (pm_print_times_enabled) {
180 181 182 183 184 185 186
		rettime = ktime_get();
		delta = ktime_sub(rettime, calltime);
		pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
			error, (unsigned long long)ktime_to_ns(delta) >> 10);
	}
}

187 188 189 190 191 192 193 194 195 196
/**
 * dpm_wait - Wait for a PM operation to complete.
 * @dev: Device to wait for.
 * @async: If unset, wait only if the device's power.async_suspend flag is set.
 */
static void dpm_wait(struct device *dev, bool async)
{
	if (!dev)
		return;

197
	if (async || (pm_async_enabled && dev->power.async_suspend))
198 199 200 201 202 203 204 205 206 207 208 209 210 211
		wait_for_completion(&dev->power.completion);
}

static int dpm_wait_fn(struct device *dev, void *async_ptr)
{
	dpm_wait(dev, *((bool *)async_ptr));
	return 0;
}

static void dpm_wait_for_children(struct device *dev, bool async)
{
       device_for_each_child(dev, &async, dpm_wait_fn);
}

212
/**
213
 * pm_op - Return the PM operation appropriate for given PM event.
214 215
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
216
 */
217
static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
218 219 220 221
{
	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
222
		return ops->suspend;
223
	case PM_EVENT_RESUME:
224
		return ops->resume;
225
#endif /* CONFIG_SUSPEND */
226
#ifdef CONFIG_HIBERNATE_CALLBACKS
227 228
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
229
		return ops->freeze;
230
	case PM_EVENT_HIBERNATE:
231
		return ops->poweroff;
232 233
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
234
		return ops->thaw;
235 236
		break;
	case PM_EVENT_RESTORE:
237
		return ops->restore;
238
#endif /* CONFIG_HIBERNATE_CALLBACKS */
239
	}
240

241
	return NULL;
242 243
}

244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277
/**
 * pm_late_early_op - Return the PM operation appropriate for given PM event.
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
 *
 * Runtime PM is disabled for @dev while this function is being executed.
 */
static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
				      pm_message_t state)
{
	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
		return ops->suspend_late;
	case PM_EVENT_RESUME:
		return ops->resume_early;
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATE_CALLBACKS
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		return ops->freeze_late;
	case PM_EVENT_HIBERNATE:
		return ops->poweroff_late;
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
		return ops->thaw_early;
	case PM_EVENT_RESTORE:
		return ops->restore_early;
#endif /* CONFIG_HIBERNATE_CALLBACKS */
	}

	return NULL;
}

278
/**
279
 * pm_noirq_op - Return the PM operation appropriate for given PM event.
280 281
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
282
 *
283 284
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
285
 */
286
static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
287 288 289 290
{
	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
291
		return ops->suspend_noirq;
292
	case PM_EVENT_RESUME:
293
		return ops->resume_noirq;
294
#endif /* CONFIG_SUSPEND */
295
#ifdef CONFIG_HIBERNATE_CALLBACKS
296 297
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
298
		return ops->freeze_noirq;
299
	case PM_EVENT_HIBERNATE:
300
		return ops->poweroff_noirq;
301 302
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
303
		return ops->thaw_noirq;
304
	case PM_EVENT_RESTORE:
305
		return ops->restore_noirq;
306
#endif /* CONFIG_HIBERNATE_CALLBACKS */
307
	}
308

309
	return NULL;
310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346
}

static char *pm_verb(int event)
{
	switch (event) {
	case PM_EVENT_SUSPEND:
		return "suspend";
	case PM_EVENT_RESUME:
		return "resume";
	case PM_EVENT_FREEZE:
		return "freeze";
	case PM_EVENT_QUIESCE:
		return "quiesce";
	case PM_EVENT_HIBERNATE:
		return "hibernate";
	case PM_EVENT_THAW:
		return "thaw";
	case PM_EVENT_RESTORE:
		return "restore";
	case PM_EVENT_RECOVER:
		return "recover";
	default:
		return "(unknown PM event)";
	}
}

static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
{
	dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
		", may wakeup" : "");
}

static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
			int error)
{
	printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
347
		dev_name(dev), pm_verb(state.event), info, error);
348 349
}

350 351 352
static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
{
	ktime_t calltime;
353
	u64 usecs64;
354 355 356 357 358 359 360 361 362 363 364 365 366
	int usecs;

	calltime = ktime_get();
	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
	do_div(usecs64, NSEC_PER_USEC);
	usecs = usecs64;
	if (usecs == 0)
		usecs = 1;
	pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
		info ?: "", info ? " " : "", pm_verb(state.event),
		usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
}

367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386
static int dpm_run_callback(pm_callback_t cb, struct device *dev,
			    pm_message_t state, char *info)
{
	ktime_t calltime;
	int error;

	if (!cb)
		return 0;

	calltime = initcall_debug_start(dev);

	pm_dev_dbg(dev, state, info);
	error = cb(dev);
	suspend_report_result(cb, error);

	initcall_debug_report(dev, calltime, error);

	return error;
}

387 388 389
/*------------------------- Resume routines -------------------------*/

/**
390 391 392
 * device_resume_noirq - Execute an "early resume" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
393
 *
394 395
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
396
 */
397
static int device_resume_noirq(struct device *dev, pm_message_t state)
398
{
399 400
	pm_callback_t callback = NULL;
	char *info = NULL;
401 402 403 404 405
	int error = 0;

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);

406 407 408
	if (dev->power.syscore)
		goto Out;

409
	if (dev->pm_domain) {
410
		info = "noirq power domain ";
411
		callback = pm_noirq_op(&dev->pm_domain->ops, state);
412
	} else if (dev->type && dev->type->pm) {
413
		info = "noirq type ";
414
		callback = pm_noirq_op(dev->type->pm, state);
415
	} else if (dev->class && dev->class->pm) {
416
		info = "noirq class ";
417
		callback = pm_noirq_op(dev->class->pm, state);
418
	} else if (dev->bus && dev->bus->pm) {
419
		info = "noirq bus ";
420
		callback = pm_noirq_op(dev->bus->pm, state);
421 422
	}

423
	if (!callback && dev->driver && dev->driver->pm) {
424
		info = "noirq driver ";
425 426 427
		callback = pm_noirq_op(dev->driver->pm, state);
	}

428 429
	error = dpm_run_callback(callback, dev, state, info);

430
 Out:
431 432 433 434 435
	TRACE_RESUME(error);
	return error;
}

/**
436
 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
437
 * @state: PM transition of the system being carried out.
438
 *
439
 * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
440
 * enable device drivers to receive interrupts.
441
 */
442
static void dpm_resume_noirq(pm_message_t state)
443
{
444
	ktime_t starttime = ktime_get();
445

446
	mutex_lock(&dpm_list_mtx);
447 448
	while (!list_empty(&dpm_noirq_list)) {
		struct device *dev = to_device(dpm_noirq_list.next);
449
		int error;
450 451

		get_device(dev);
452
		list_move_tail(&dev->power.entry, &dpm_late_early_list);
453
		mutex_unlock(&dpm_list_mtx);
454

455
		error = device_resume_noirq(dev, state);
456 457 458 459
		if (error) {
			suspend_stats.failed_resume_noirq++;
			dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
			dpm_save_failed_dev(dev_name(dev));
460 461 462 463 464 465 466 467 468
			pm_dev_err(dev, state, " noirq", error);
		}

		mutex_lock(&dpm_list_mtx);
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
	dpm_show_time(starttime, state, "noirq");
	resume_device_irqs();
469
	cpuidle_resume();
470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487
}

/**
 * device_resume_early - Execute an "early resume" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
 *
 * Runtime PM is disabled for @dev while this function is being executed.
 */
static int device_resume_early(struct device *dev, pm_message_t state)
{
	pm_callback_t callback = NULL;
	char *info = NULL;
	int error = 0;

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);

488 489 490
	if (dev->power.syscore)
		goto Out;

491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511
	if (dev->pm_domain) {
		info = "early power domain ";
		callback = pm_late_early_op(&dev->pm_domain->ops, state);
	} else if (dev->type && dev->type->pm) {
		info = "early type ";
		callback = pm_late_early_op(dev->type->pm, state);
	} else if (dev->class && dev->class->pm) {
		info = "early class ";
		callback = pm_late_early_op(dev->class->pm, state);
	} else if (dev->bus && dev->bus->pm) {
		info = "early bus ";
		callback = pm_late_early_op(dev->bus->pm, state);
	}

	if (!callback && dev->driver && dev->driver->pm) {
		info = "early driver ";
		callback = pm_late_early_op(dev->driver->pm, state);
	}

	error = dpm_run_callback(callback, dev, state, info);

512
 Out:
513
	TRACE_RESUME(error);
514 515

	pm_runtime_enable(dev);
516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540
	return error;
}

/**
 * dpm_resume_early - Execute "early resume" callbacks for all devices.
 * @state: PM transition of the system being carried out.
 */
static void dpm_resume_early(pm_message_t state)
{
	ktime_t starttime = ktime_get();

	mutex_lock(&dpm_list_mtx);
	while (!list_empty(&dpm_late_early_list)) {
		struct device *dev = to_device(dpm_late_early_list.next);
		int error;

		get_device(dev);
		list_move_tail(&dev->power.entry, &dpm_suspended_list);
		mutex_unlock(&dpm_list_mtx);

		error = device_resume_early(dev, state);
		if (error) {
			suspend_stats.failed_resume_early++;
			dpm_save_failed_step(SUSPEND_RESUME_EARLY);
			dpm_save_failed_dev(dev_name(dev));
541
			pm_dev_err(dev, state, " early", error);
542
		}
543

544
		mutex_lock(&dpm_list_mtx);
545 546
		put_device(dev);
	}
547
	mutex_unlock(&dpm_list_mtx);
548
	dpm_show_time(starttime, state, "early");
549
}
550 551 552 553 554 555 556 557 558 559 560

/**
 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
 * @state: PM transition of the system being carried out.
 */
void dpm_resume_start(pm_message_t state)
{
	dpm_resume_noirq(state);
	dpm_resume_early(state);
}
EXPORT_SYMBOL_GPL(dpm_resume_start);
561 562

/**
563
 * device_resume - Execute "resume" callbacks for given device.
564 565
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
566
 * @async: If true, the device is being resumed asynchronously.
567
 */
568
static int device_resume(struct device *dev, pm_message_t state, bool async)
569
{
570 571
	pm_callback_t callback = NULL;
	char *info = NULL;
572 573 574 575
	int error = 0;

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);
576

577 578 579
	if (dev->power.syscore)
		goto Complete;

580
	dpm_wait(dev->parent, async);
581
	device_lock(dev);
582

583 584 585 586 587
	/*
	 * This is a fib.  But we'll allow new children to be added below
	 * a resumed device, even if the device hasn't been completed yet.
	 */
	dev->power.is_prepared = false;
588

589 590 591
	if (!dev->power.is_suspended)
		goto Unlock;

592
	if (dev->pm_domain) {
593 594
		info = "power domain ";
		callback = pm_op(&dev->pm_domain->ops, state);
595
		goto Driver;
596 597
	}

598
	if (dev->type && dev->type->pm) {
599 600
		info = "type ";
		callback = pm_op(dev->type->pm, state);
601
		goto Driver;
602 603
	}

604 605
	if (dev->class) {
		if (dev->class->pm) {
606 607
			info = "class ";
			callback = pm_op(dev->class->pm, state);
608
			goto Driver;
609
		} else if (dev->class->resume) {
610 611
			info = "legacy class ";
			callback = dev->class->resume;
612
			goto End;
613
		}
614
	}
615 616 617

	if (dev->bus) {
		if (dev->bus->pm) {
618
			info = "bus ";
619
			callback = pm_op(dev->bus->pm, state);
620
		} else if (dev->bus->resume) {
621
			info = "legacy bus ";
622
			callback = dev->bus->resume;
623
			goto End;
624 625 626
		}
	}

627 628 629 630 631 632
 Driver:
	if (!callback && dev->driver && dev->driver->pm) {
		info = "driver ";
		callback = pm_op(dev->driver->pm, state);
	}

633
 End:
634
	error = dpm_run_callback(callback, dev, state, info);
635 636 637
	dev->power.is_suspended = false;

 Unlock:
638
	device_unlock(dev);
639 640

 Complete:
641
	complete_all(&dev->power.completion);
642

643
	TRACE_RESUME(error);
644

645 646 647
	return error;
}

648 649 650 651 652
static void async_resume(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

653
	error = device_resume(dev, pm_transition, true);
654 655 656 657 658
	if (error)
		pm_dev_err(dev, pm_transition, " async", error);
	put_device(dev);
}

659
static bool is_async(struct device *dev)
660
{
661 662
	return dev->power.async_suspend && pm_async_enabled
		&& !pm_trace_is_enabled();
663 664
}

665
/**
666 667
 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
 * @state: PM transition of the system being carried out.
668
 *
669 670
 * Execute the appropriate "resume" callback for all devices whose status
 * indicates that they are suspended.
671
 */
672
void dpm_resume(pm_message_t state)
673
{
674
	struct device *dev;
675
	ktime_t starttime = ktime_get();
676

677 678
	might_sleep();

679
	mutex_lock(&dpm_list_mtx);
680
	pm_transition = state;
681
	async_error = 0;
682

683
	list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
684 685 686 687 688 689 690
		INIT_COMPLETION(dev->power.completion);
		if (is_async(dev)) {
			get_device(dev);
			async_schedule(async_resume, dev);
		}
	}

691 692
	while (!list_empty(&dpm_suspended_list)) {
		dev = to_device(dpm_suspended_list.next);
693
		get_device(dev);
694
		if (!is_async(dev)) {
695 696 697 698
			int error;

			mutex_unlock(&dpm_list_mtx);

699
			error = device_resume(dev, state, false);
700 701 702 703
			if (error) {
				suspend_stats.failed_resume++;
				dpm_save_failed_step(SUSPEND_RESUME);
				dpm_save_failed_dev(dev_name(dev));
704
				pm_dev_err(dev, state, "", error);
705
			}
706 707

			mutex_lock(&dpm_list_mtx);
708 709
		}
		if (!list_empty(&dev->power.entry))
710
			list_move_tail(&dev->power.entry, &dpm_prepared_list);
711 712 713
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
714
	async_synchronize_full();
715
	dpm_show_time(starttime, state, NULL);
716 717 718
}

/**
719 720 721
 * device_complete - Complete a PM transition for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
722
 */
723
static void device_complete(struct device *dev, pm_message_t state)
724
{
725 726 727
	void (*callback)(struct device *) = NULL;
	char *info = NULL;

728 729 730
	if (dev->power.syscore)
		return;

731
	device_lock(dev);
732

733
	if (dev->pm_domain) {
734 735
		info = "completing power domain ";
		callback = dev->pm_domain->ops.complete;
736
	} else if (dev->type && dev->type->pm) {
737 738
		info = "completing type ";
		callback = dev->type->pm->complete;
739
	} else if (dev->class && dev->class->pm) {
740 741
		info = "completing class ";
		callback = dev->class->pm->complete;
742
	} else if (dev->bus && dev->bus->pm) {
743 744 745 746 747 748 749 750 751 752 753 754
		info = "completing bus ";
		callback = dev->bus->pm->complete;
	}

	if (!callback && dev->driver && dev->driver->pm) {
		info = "completing driver ";
		callback = dev->driver->pm->complete;
	}

	if (callback) {
		pm_dev_dbg(dev, state, info);
		callback(dev);
755 756
	}

757
	device_unlock(dev);
758 759

	pm_runtime_put_sync(dev);
760 761 762
}

/**
763 764
 * dpm_complete - Complete a PM transition for all non-sysdev devices.
 * @state: PM transition of the system being carried out.
765
 *
766 767
 * Execute the ->complete() callbacks for all devices whose PM status is not
 * DPM_ON (this allows new devices to be registered).
768
 */
769
void dpm_complete(pm_message_t state)
770
{
771 772
	struct list_head list;

773 774
	might_sleep();

775
	INIT_LIST_HEAD(&list);
776
	mutex_lock(&dpm_list_mtx);
777 778
	while (!list_empty(&dpm_prepared_list)) {
		struct device *dev = to_device(dpm_prepared_list.prev);
779

780
		get_device(dev);
781
		dev->power.is_prepared = false;
782 783
		list_move(&dev->power.entry, &list);
		mutex_unlock(&dpm_list_mtx);
784

785
		device_complete(dev, state);
786

787
		mutex_lock(&dpm_list_mtx);
788
		put_device(dev);
789
	}
790
	list_splice(&list, &dpm_list);
791 792 793 794
	mutex_unlock(&dpm_list_mtx);
}

/**
795 796
 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
 * @state: PM transition of the system being carried out.
797
 *
798 799
 * Execute "resume" callbacks for all devices and complete the PM transition of
 * the system.
800
 */
801
void dpm_resume_end(pm_message_t state)
802
{
803 804
	dpm_resume(state);
	dpm_complete(state);
805
}
806
EXPORT_SYMBOL_GPL(dpm_resume_end);
807 808 809 810


/*------------------------- Suspend routines -------------------------*/

811
/**
812 813 814 815 816
 * resume_event - Return a "resume" message for given "suspend" sleep state.
 * @sleep_state: PM message representing a sleep state.
 *
 * Return a PM message representing the resume event corresponding to given
 * sleep state.
817 818
 */
static pm_message_t resume_event(pm_message_t sleep_state)
819
{
820 821 822 823 824 825 826 827
	switch (sleep_state.event) {
	case PM_EVENT_SUSPEND:
		return PMSG_RESUME;
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		return PMSG_RECOVER;
	case PM_EVENT_HIBERNATE:
		return PMSG_RESTORE;
828
	}
829
	return PMSG_ON;
830 831 832
}

/**
833 834 835
 * device_suspend_noirq - Execute a "late suspend" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
836
 *
837 838
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
839
 */
840
static int device_suspend_noirq(struct device *dev, pm_message_t state)
841
{
842 843
	pm_callback_t callback = NULL;
	char *info = NULL;
844

845 846 847
	if (dev->power.syscore)
		return 0;

848
	if (dev->pm_domain) {
849
		info = "noirq power domain ";
850
		callback = pm_noirq_op(&dev->pm_domain->ops, state);
851
	} else if (dev->type && dev->type->pm) {
852
		info = "noirq type ";
853
		callback = pm_noirq_op(dev->type->pm, state);
854
	} else if (dev->class && dev->class->pm) {
855
		info = "noirq class ";
856
		callback = pm_noirq_op(dev->class->pm, state);
857
	} else if (dev->bus && dev->bus->pm) {
858
		info = "noirq bus ";
859
		callback = pm_noirq_op(dev->bus->pm, state);
860 861
	}

862
	if (!callback && dev->driver && dev->driver->pm) {
863
		info = "noirq driver ";
864 865 866
		callback = pm_noirq_op(dev->driver->pm, state);
	}

867
	return dpm_run_callback(callback, dev, state, info);
868 869 870
}

/**
871
 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
872
 * @state: PM transition of the system being carried out.
873
 *
874 875
 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
 * handlers for all non-sysdev devices.
876
 */
877
static int dpm_suspend_noirq(pm_message_t state)
878
{
879
	ktime_t starttime = ktime_get();
880 881
	int error = 0;

882
	cpuidle_pause();
883
	suspend_device_irqs();
884
	mutex_lock(&dpm_list_mtx);
885 886
	while (!list_empty(&dpm_late_early_list)) {
		struct device *dev = to_device(dpm_late_early_list.prev);
887 888 889 890

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

891
		error = device_suspend_noirq(dev, state);
892 893

		mutex_lock(&dpm_list_mtx);
894
		if (error) {
895
			pm_dev_err(dev, state, " noirq", error);
896 897 898
			suspend_stats.failed_suspend_noirq++;
			dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
			dpm_save_failed_dev(dev_name(dev));
899
			put_device(dev);
900 901
			break;
		}
902
		if (!list_empty(&dev->power.entry))
903
			list_move(&dev->power.entry, &dpm_noirq_list);
904
		put_device(dev);
905 906 907 908 909

		if (pm_wakeup_pending()) {
			error = -EBUSY;
			break;
		}
910
	}
911
	mutex_unlock(&dpm_list_mtx);
912
	if (error)
913
		dpm_resume_noirq(resume_event(state));
914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930
	else
		dpm_show_time(starttime, state, "noirq");
	return error;
}

/**
 * device_suspend_late - Execute a "late suspend" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
 *
 * Runtime PM is disabled for @dev while this function is being executed.
 */
static int device_suspend_late(struct device *dev, pm_message_t state)
{
	pm_callback_t callback = NULL;
	char *info = NULL;

931 932
	__pm_runtime_disable(dev, false);

933 934 935
	if (dev->power.syscore)
		return 0;

936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987
	if (dev->pm_domain) {
		info = "late power domain ";
		callback = pm_late_early_op(&dev->pm_domain->ops, state);
	} else if (dev->type && dev->type->pm) {
		info = "late type ";
		callback = pm_late_early_op(dev->type->pm, state);
	} else if (dev->class && dev->class->pm) {
		info = "late class ";
		callback = pm_late_early_op(dev->class->pm, state);
	} else if (dev->bus && dev->bus->pm) {
		info = "late bus ";
		callback = pm_late_early_op(dev->bus->pm, state);
	}

	if (!callback && dev->driver && dev->driver->pm) {
		info = "late driver ";
		callback = pm_late_early_op(dev->driver->pm, state);
	}

	return dpm_run_callback(callback, dev, state, info);
}

/**
 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
 * @state: PM transition of the system being carried out.
 */
static int dpm_suspend_late(pm_message_t state)
{
	ktime_t starttime = ktime_get();
	int error = 0;

	mutex_lock(&dpm_list_mtx);
	while (!list_empty(&dpm_suspended_list)) {
		struct device *dev = to_device(dpm_suspended_list.prev);

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

		error = device_suspend_late(dev, state);

		mutex_lock(&dpm_list_mtx);
		if (error) {
			pm_dev_err(dev, state, " late", error);
			suspend_stats.failed_suspend_late++;
			dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
			dpm_save_failed_dev(dev_name(dev));
			put_device(dev);
			break;
		}
		if (!list_empty(&dev->power.entry))
			list_move(&dev->power.entry, &dpm_late_early_list);
		put_device(dev);
988 989 990 991 992

		if (pm_wakeup_pending()) {
			error = -EBUSY;
			break;
		}
993 994 995 996
	}
	mutex_unlock(&dpm_list_mtx);
	if (error)
		dpm_resume_early(resume_event(state));
997 998
	else
		dpm_show_time(starttime, state, "late");
999

1000 1001
	return error;
}
1002 1003 1004 1005 1006 1007 1008 1009

/**
 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
 * @state: PM transition of the system being carried out.
 */
int dpm_suspend_end(pm_message_t state)
{
	int error = dpm_suspend_late(state);
1010 1011 1012 1013 1014
	if (error)
		return error;

	error = dpm_suspend_noirq(state);
	if (error) {
1015
		dpm_resume_early(resume_event(state));
1016 1017
		return error;
	}
1018

1019
	return 0;
1020 1021
}
EXPORT_SYMBOL_GPL(dpm_suspend_end);
1022

1023 1024
/**
 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
R
Randy Dunlap 已提交
1025 1026 1027
 * @dev: Device to suspend.
 * @state: PM transition of the system being carried out.
 * @cb: Suspend callback to execute.
1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044
 */
static int legacy_suspend(struct device *dev, pm_message_t state,
			  int (*cb)(struct device *dev, pm_message_t state))
{
	int error;
	ktime_t calltime;

	calltime = initcall_debug_start(dev);

	error = cb(dev, state);
	suspend_report_result(cb, error);

	initcall_debug_report(dev, calltime, error);

	return error;
}

1045
/**
1046 1047 1048
 * device_suspend - Execute "suspend" callbacks for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
1049
 * @async: If true, the device is being suspended asynchronously.
1050
 */
1051
static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1052
{
1053 1054
	pm_callback_t callback = NULL;
	char *info = NULL;
1055 1056
	int error = 0;

1057
	dpm_wait_for_children(dev, async);
1058

1059
	if (async_error)
1060
		goto Complete;
1061

1062 1063 1064 1065 1066 1067
	/*
	 * If a device configured to wake up the system from sleep states
	 * has been suspended at run time and there's a resume request pending
	 * for it, this is equivalent to the device signaling wakeup, so the
	 * system suspend operation should be aborted.
	 */
1068 1069
	if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
		pm_wakeup_event(dev, 0);
1070

1071 1072
	if (pm_wakeup_pending()) {
		async_error = -EBUSY;
1073
		goto Complete;
1074 1075
	}

1076 1077 1078
	if (dev->power.syscore)
		goto Complete;

1079 1080
	device_lock(dev);

1081
	if (dev->pm_domain) {
1082 1083 1084
		info = "power domain ";
		callback = pm_op(&dev->pm_domain->ops, state);
		goto Run;
1085 1086
	}

1087
	if (dev->type && dev->type->pm) {
1088 1089 1090
		info = "type ";
		callback = pm_op(dev->type->pm, state);
		goto Run;
1091 1092
	}

1093 1094
	if (dev->class) {
		if (dev->class->pm) {
1095 1096 1097
			info = "class ";
			callback = pm_op(dev->class->pm, state);
			goto Run;
1098 1099
		} else if (dev->class->suspend) {
			pm_dev_dbg(dev, state, "legacy class ");
1100
			error = legacy_suspend(dev, state, dev->class->suspend);
1101
			goto End;
1102
		}
1103 1104
	}

1105 1106
	if (dev->bus) {
		if (dev->bus->pm) {
1107
			info = "bus ";
1108
			callback = pm_op(dev->bus->pm, state);
1109
		} else if (dev->bus->suspend) {
1110
			pm_dev_dbg(dev, state, "legacy bus ");
1111
			error = legacy_suspend(dev, state, dev->bus->suspend);
1112
			goto End;
1113
		}
1114 1115
	}

1116
 Run:
1117 1118 1119 1120 1121
	if (!callback && dev->driver && dev->driver->pm) {
		info = "driver ";
		callback = pm_op(dev->driver->pm, state);
	}

1122 1123
	error = dpm_run_callback(callback, dev, state, info);

1124
 End:
1125 1126
	if (!error) {
		dev->power.is_suspended = true;
1127 1128
		if (dev->power.wakeup_path
		    && dev->parent && !dev->parent->power.ignore_children)
1129 1130
			dev->parent->power.wakeup_path = true;
	}
1131

1132
	device_unlock(dev);
1133 1134

 Complete:
1135
	complete_all(&dev->power.completion);
1136
	if (error)
1137 1138
		async_error = error;

1139 1140 1141
	return error;
}

1142 1143 1144 1145 1146 1147
static void async_suspend(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = __device_suspend(dev, pm_transition, true);
1148 1149
	if (error) {
		dpm_save_failed_dev(dev_name(dev));
1150
		pm_dev_err(dev, pm_transition, " async", error);
1151
	}
1152 1153 1154 1155 1156 1157 1158 1159

	put_device(dev);
}

static int device_suspend(struct device *dev)
{
	INIT_COMPLETION(dev->power.completion);

1160
	if (pm_async_enabled && dev->power.async_suspend) {
1161 1162 1163 1164 1165 1166 1167 1168
		get_device(dev);
		async_schedule(async_suspend, dev);
		return 0;
	}

	return __device_suspend(dev, pm_transition, false);
}

1169
/**
1170 1171
 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
 * @state: PM transition of the system being carried out.
1172
 */
1173
int dpm_suspend(pm_message_t state)
1174
{
1175
	ktime_t starttime = ktime_get();
1176 1177
	int error = 0;

1178 1179
	might_sleep();

1180
	mutex_lock(&dpm_list_mtx);
1181 1182
	pm_transition = state;
	async_error = 0;
1183 1184
	while (!list_empty(&dpm_prepared_list)) {
		struct device *dev = to_device(dpm_prepared_list.prev);
1185

1186
		get_device(dev);
1187
		mutex_unlock(&dpm_list_mtx);
1188

1189
		error = device_suspend(dev);
1190

1191
		mutex_lock(&dpm_list_mtx);
1192
		if (error) {
1193
			pm_dev_err(dev, state, "", error);
1194
			dpm_save_failed_dev(dev_name(dev));
1195
			put_device(dev);
1196 1197
			break;
		}
1198
		if (!list_empty(&dev->power.entry))
1199
			list_move(&dev->power.entry, &dpm_suspended_list);
1200
		put_device(dev);
1201 1202
		if (async_error)
			break;
1203 1204
	}
	mutex_unlock(&dpm_list_mtx);
1205 1206 1207
	async_synchronize_full();
	if (!error)
		error = async_error;
1208 1209 1210 1211
	if (error) {
		suspend_stats.failed_suspend++;
		dpm_save_failed_step(SUSPEND_SUSPEND);
	} else
1212
		dpm_show_time(starttime, state, NULL);
1213 1214 1215 1216
	return error;
}

/**
1217 1218 1219 1220 1221 1222
 * device_prepare - Prepare a device for system power transition.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
 *
 * Execute the ->prepare() callback(s) for given device.  No new children of the
 * device may be registered after this function has returned.
1223
 */
1224
static int device_prepare(struct device *dev, pm_message_t state)
1225
{
1226 1227
	int (*callback)(struct device *) = NULL;
	char *info = NULL;
1228 1229
	int error = 0;

1230 1231 1232
	if (dev->power.syscore)
		return 0;

1233 1234 1235 1236 1237 1238 1239 1240
	/*
	 * If a device's parent goes into runtime suspend at the wrong time,
	 * it won't be possible to resume the device.  To prevent this we
	 * block runtime suspend here, during the prepare phase, and allow
	 * it again during the complete phase.
	 */
	pm_runtime_get_noresume(dev);

1241
	device_lock(dev);
1242

1243 1244
	dev->power.wakeup_path = device_may_wakeup(dev);

1245
	if (dev->pm_domain) {
1246 1247
		info = "preparing power domain ";
		callback = dev->pm_domain->ops.prepare;
1248
	} else if (dev->type && dev->type->pm) {
1249 1250
		info = "preparing type ";
		callback = dev->type->pm->prepare;
1251
	} else if (dev->class && dev->class->pm) {
1252 1253
		info = "preparing class ";
		callback = dev->class->pm->prepare;
1254
	} else if (dev->bus && dev->bus->pm) {
1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266
		info = "preparing bus ";
		callback = dev->bus->pm->prepare;
	}

	if (!callback && dev->driver && dev->driver->pm) {
		info = "preparing driver ";
		callback = dev->driver->pm->prepare;
	}

	if (callback) {
		error = callback(dev);
		suspend_report_result(callback, error);
1267
	}
1268

1269
	device_unlock(dev);
1270 1271 1272

	return error;
}
1273

1274
/**
1275 1276
 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
 * @state: PM transition of the system being carried out.
1277
 *
1278
 * Execute the ->prepare() callback(s) for all devices.
1279
 */
1280
int dpm_prepare(pm_message_t state)
1281 1282 1283
{
	int error = 0;

1284 1285
	might_sleep();

1286 1287 1288 1289 1290 1291 1292
	mutex_lock(&dpm_list_mtx);
	while (!list_empty(&dpm_list)) {
		struct device *dev = to_device(dpm_list.next);

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

1293
		error = device_prepare(dev, state);
1294 1295 1296 1297 1298

		mutex_lock(&dpm_list_mtx);
		if (error) {
			if (error == -EAGAIN) {
				put_device(dev);
S
Sebastian Ott 已提交
1299
				error = 0;
1300 1301
				continue;
			}
1302 1303
			printk(KERN_INFO "PM: Device %s not prepared "
				"for power transition: code %d\n",
1304
				dev_name(dev), error);
1305 1306 1307
			put_device(dev);
			break;
		}
1308
		dev->power.is_prepared = true;
1309
		if (!list_empty(&dev->power.entry))
1310
			list_move_tail(&dev->power.entry, &dpm_prepared_list);
1311 1312 1313
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
1314 1315 1316
	return error;
}

1317
/**
1318 1319
 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
 * @state: PM transition of the system being carried out.
1320
 *
1321 1322
 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
 * callbacks for them.
1323
 */
1324
int dpm_suspend_start(pm_message_t state)
1325 1326
{
	int error;
1327

1328
	error = dpm_prepare(state);
1329 1330 1331 1332
	if (error) {
		suspend_stats.failed_prepare++;
		dpm_save_failed_step(SUSPEND_PREPARE);
	} else
1333
		error = dpm_suspend(state);
1334 1335
	return error;
}
1336
EXPORT_SYMBOL_GPL(dpm_suspend_start);
1337 1338 1339

void __suspend_report_result(const char *function, void *fn, int ret)
{
1340 1341
	if (ret)
		printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1342 1343
}
EXPORT_SYMBOL_GPL(__suspend_report_result);
1344 1345 1346 1347 1348 1349

/**
 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
 * @dev: Device to wait for.
 * @subordinate: Device that needs to wait for @dev.
 */
1350
int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1351 1352
{
	dpm_wait(dev, subordinate->power.async_suspend);
1353
	return async_error;
1354 1355
}
EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377

/**
 * dpm_for_each_dev - device iterator.
 * @data: data for the callback.
 * @fn: function to be called for each device.
 *
 * Iterate over devices in dpm_list, and call @fn for each device,
 * passing it @data.
 */
void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
{
	struct device *dev;

	if (!fn)
		return;

	device_pm_lock();
	list_for_each_entry(dev, &dpm_list, power.entry)
		fn(dev, data);
	device_pm_unlock();
}
EXPORT_SYMBOL_GPL(dpm_for_each_dev);