main.c 28.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/*
 * drivers/base/power/main.c - Where the driver meets power management.
 *
 * Copyright (c) 2003 Patrick Mochel
 * Copyright (c) 2003 Open Source Development Lab
 *
 * This file is released under the GPLv2
 *
 *
 * The driver model core calls device_pm_add() when a device is registered.
11
 * This will initialize the embedded device_pm_info object in the device
L
Linus Torvalds 已提交
12 13 14
 * and add it to the list of power-controlled devices. sysfs entries for
 * controlling device power management will also be added.
 *
15 16 17
 * A separate list is used for keeping track of power info, because the power
 * domain dependencies may differ from the ancestral dependencies that the
 * subsystem list maintains.
L
Linus Torvalds 已提交
18 19 20
 */

#include <linux/device.h>
21
#include <linux/kallsyms.h>
22
#include <linux/export.h>
23
#include <linux/mutex.h>
24
#include <linux/pm.h>
25
#include <linux/pm_runtime.h>
26
#include <linux/resume-trace.h>
27
#include <linux/interrupt.h>
28
#include <linux/sched.h>
29
#include <linux/async.h>
30
#include <linux/suspend.h>
31

32
#include "../base.h"
L
Linus Torvalds 已提交
33 34
#include "power.h"

35
/*
36
 * The entries in the dpm_list list are in a depth first order, simply
37 38 39
 * because children are guaranteed to be discovered after parents, and
 * are inserted at the back of the list on discovery.
 *
40 41
 * Since device_pm_add() may be called with a device lock held,
 * we must never try to acquire a device lock while holding
42 43 44
 * dpm_list_mutex.
 */

45
LIST_HEAD(dpm_list);
46 47 48
LIST_HEAD(dpm_prepared_list);
LIST_HEAD(dpm_suspended_list);
LIST_HEAD(dpm_noirq_list);
L
Linus Torvalds 已提交
49

50
struct suspend_stats suspend_stats;
51
static DEFINE_MUTEX(dpm_list_mtx);
52
static pm_message_t pm_transition;
L
Linus Torvalds 已提交
53

54 55
static int async_error;

56
/**
57
 * device_pm_init - Initialize the PM-related part of a device object.
58 59 60 61
 * @dev: Device object being initialized.
 */
void device_pm_init(struct device *dev)
{
62
	dev->power.is_prepared = false;
63
	dev->power.is_suspended = false;
64
	init_completion(&dev->power.completion);
65
	complete_all(&dev->power.completion);
66 67
	dev->power.wakeup = NULL;
	spin_lock_init(&dev->power.lock);
68
	pm_runtime_init(dev);
69
	INIT_LIST_HEAD(&dev->power.entry);
70
	dev->power.power_state = PMSG_INVALID;
71 72
}

73
/**
74
 * device_pm_lock - Lock the list of active devices used by the PM core.
75 76 77 78 79 80 81
 */
void device_pm_lock(void)
{
	mutex_lock(&dpm_list_mtx);
}

/**
82
 * device_pm_unlock - Unlock the list of active devices used by the PM core.
83 84 85 86 87
 */
void device_pm_unlock(void)
{
	mutex_unlock(&dpm_list_mtx);
}
88

89
/**
90 91
 * device_pm_add - Add a device to the PM core's list of active devices.
 * @dev: Device to add to the list.
92
 */
93
void device_pm_add(struct device *dev)
L
Linus Torvalds 已提交
94 95
{
	pr_debug("PM: Adding info for %s:%s\n",
96
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
97
	mutex_lock(&dpm_list_mtx);
98
	if (dev->parent && dev->parent->power.is_prepared)
99 100
		dev_warn(dev, "parent %s should not be sleeping\n",
			dev_name(dev->parent));
101
	list_add_tail(&dev->power.entry, &dpm_list);
102
	dev_pm_qos_constraints_init(dev);
103
	mutex_unlock(&dpm_list_mtx);
L
Linus Torvalds 已提交
104 105
}

106
/**
107 108
 * device_pm_remove - Remove a device from the PM core's list of active devices.
 * @dev: Device to be removed from the list.
109
 */
110
void device_pm_remove(struct device *dev)
L
Linus Torvalds 已提交
111 112
{
	pr_debug("PM: Removing info for %s:%s\n",
113
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
114
	complete_all(&dev->power.completion);
115
	mutex_lock(&dpm_list_mtx);
116
	dev_pm_qos_constraints_destroy(dev);
L
Linus Torvalds 已提交
117
	list_del_init(&dev->power.entry);
118
	mutex_unlock(&dpm_list_mtx);
119
	device_wakeup_disable(dev);
120
	pm_runtime_remove(dev);
121 122
}

123
/**
124 125 126
 * device_pm_move_before - Move device in the PM core's list of active devices.
 * @deva: Device to move in dpm_list.
 * @devb: Device @deva should come before.
127 128 129 130
 */
void device_pm_move_before(struct device *deva, struct device *devb)
{
	pr_debug("PM: Moving %s:%s before %s:%s\n",
131 132
		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
133 134 135 136 137
	/* Delete deva from dpm_list and reinsert before devb. */
	list_move_tail(&deva->power.entry, &devb->power.entry);
}

/**
138 139 140
 * device_pm_move_after - Move device in the PM core's list of active devices.
 * @deva: Device to move in dpm_list.
 * @devb: Device @deva should come after.
141 142 143 144
 */
void device_pm_move_after(struct device *deva, struct device *devb)
{
	pr_debug("PM: Moving %s:%s after %s:%s\n",
145 146
		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
147 148 149 150 151
	/* Delete deva from dpm_list and reinsert after devb. */
	list_move(&deva->power.entry, &devb->power.entry);
}

/**
152 153
 * device_pm_move_last - Move device to end of the PM core's list of devices.
 * @dev: Device to move in dpm_list.
154 155 156 157
 */
void device_pm_move_last(struct device *dev)
{
	pr_debug("PM: Moving %s:%s to end of list\n",
158
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
159 160 161
	list_move_tail(&dev->power.entry, &dpm_list);
}

162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
static ktime_t initcall_debug_start(struct device *dev)
{
	ktime_t calltime = ktime_set(0, 0);

	if (initcall_debug) {
		pr_info("calling  %s+ @ %i\n",
				dev_name(dev), task_pid_nr(current));
		calltime = ktime_get();
	}

	return calltime;
}

static void initcall_debug_report(struct device *dev, ktime_t calltime,
				  int error)
{
	ktime_t delta, rettime;

	if (initcall_debug) {
		rettime = ktime_get();
		delta = ktime_sub(rettime, calltime);
		pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
			error, (unsigned long long)ktime_to_ns(delta) >> 10);
	}
}

188 189 190 191 192 193 194 195 196 197
/**
 * dpm_wait - Wait for a PM operation to complete.
 * @dev: Device to wait for.
 * @async: If unset, wait only if the device's power.async_suspend flag is set.
 */
static void dpm_wait(struct device *dev, bool async)
{
	if (!dev)
		return;

198
	if (async || (pm_async_enabled && dev->power.async_suspend))
199 200 201 202 203 204 205 206 207 208 209 210 211 212
		wait_for_completion(&dev->power.completion);
}

static int dpm_wait_fn(struct device *dev, void *async_ptr)
{
	dpm_wait(dev, *((bool *)async_ptr));
	return 0;
}

static void dpm_wait_for_children(struct device *dev, bool async)
{
       device_for_each_child(dev, &async, dpm_wait_fn);
}

213
/**
214 215 216 217
 * pm_op - Execute the PM operation appropriate for given PM event.
 * @dev: Device to handle.
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
218
 */
219 220 221
static int pm_op(struct device *dev,
		 const struct dev_pm_ops *ops,
		 pm_message_t state)
222 223
{
	int error = 0;
224
	ktime_t calltime;
225

226
	calltime = initcall_debug_start(dev);
227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242

	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
		if (ops->suspend) {
			error = ops->suspend(dev);
			suspend_report_result(ops->suspend, error);
		}
		break;
	case PM_EVENT_RESUME:
		if (ops->resume) {
			error = ops->resume(dev);
			suspend_report_result(ops->resume, error);
		}
		break;
#endif /* CONFIG_SUSPEND */
243
#ifdef CONFIG_HIBERNATE_CALLBACKS
244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		if (ops->freeze) {
			error = ops->freeze(dev);
			suspend_report_result(ops->freeze, error);
		}
		break;
	case PM_EVENT_HIBERNATE:
		if (ops->poweroff) {
			error = ops->poweroff(dev);
			suspend_report_result(ops->poweroff, error);
		}
		break;
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
		if (ops->thaw) {
			error = ops->thaw(dev);
			suspend_report_result(ops->thaw, error);
		}
		break;
	case PM_EVENT_RESTORE:
		if (ops->restore) {
			error = ops->restore(dev);
			suspend_report_result(ops->restore, error);
		}
		break;
270
#endif /* CONFIG_HIBERNATE_CALLBACKS */
271 272 273
	default:
		error = -EINVAL;
	}
274

275
	initcall_debug_report(dev, calltime, error);
276

277 278 279 280
	return error;
}

/**
281 282 283 284
 * pm_noirq_op - Execute the PM operation appropriate for given PM event.
 * @dev: Device to handle.
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
285
 *
286 287
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
288
 */
289 290
static int pm_noirq_op(struct device *dev,
			const struct dev_pm_ops *ops,
291 292 293
			pm_message_t state)
{
	int error = 0;
294
	ktime_t calltime = ktime_set(0, 0), delta, rettime;
295 296

	if (initcall_debug) {
297 298 299
		pr_info("calling  %s+ @ %i, parent: %s\n",
				dev_name(dev), task_pid_nr(current),
				dev->parent ? dev_name(dev->parent) : "none");
300 301
		calltime = ktime_get();
	}
302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317

	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
		if (ops->suspend_noirq) {
			error = ops->suspend_noirq(dev);
			suspend_report_result(ops->suspend_noirq, error);
		}
		break;
	case PM_EVENT_RESUME:
		if (ops->resume_noirq) {
			error = ops->resume_noirq(dev);
			suspend_report_result(ops->resume_noirq, error);
		}
		break;
#endif /* CONFIG_SUSPEND */
318
#ifdef CONFIG_HIBERNATE_CALLBACKS
319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		if (ops->freeze_noirq) {
			error = ops->freeze_noirq(dev);
			suspend_report_result(ops->freeze_noirq, error);
		}
		break;
	case PM_EVENT_HIBERNATE:
		if (ops->poweroff_noirq) {
			error = ops->poweroff_noirq(dev);
			suspend_report_result(ops->poweroff_noirq, error);
		}
		break;
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
		if (ops->thaw_noirq) {
			error = ops->thaw_noirq(dev);
			suspend_report_result(ops->thaw_noirq, error);
		}
		break;
	case PM_EVENT_RESTORE:
		if (ops->restore_noirq) {
			error = ops->restore_noirq(dev);
			suspend_report_result(ops->restore_noirq, error);
		}
		break;
345
#endif /* CONFIG_HIBERNATE_CALLBACKS */
346 347 348
	default:
		error = -EINVAL;
	}
349 350 351 352

	if (initcall_debug) {
		rettime = ktime_get();
		delta = ktime_sub(rettime, calltime);
353 354 355
		printk("initcall %s_i+ returned %d after %Ld usecs\n",
			dev_name(dev), error,
			(unsigned long long)ktime_to_ns(delta) >> 10);
356 357
	}

358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395
	return error;
}

static char *pm_verb(int event)
{
	switch (event) {
	case PM_EVENT_SUSPEND:
		return "suspend";
	case PM_EVENT_RESUME:
		return "resume";
	case PM_EVENT_FREEZE:
		return "freeze";
	case PM_EVENT_QUIESCE:
		return "quiesce";
	case PM_EVENT_HIBERNATE:
		return "hibernate";
	case PM_EVENT_THAW:
		return "thaw";
	case PM_EVENT_RESTORE:
		return "restore";
	case PM_EVENT_RECOVER:
		return "recover";
	default:
		return "(unknown PM event)";
	}
}

static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
{
	dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
		", may wakeup" : "");
}

static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
			int error)
{
	printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
396
		dev_name(dev), pm_verb(state.event), info, error);
397 398
}

399 400 401
static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
{
	ktime_t calltime;
402
	u64 usecs64;
403 404 405 406 407 408 409 410 411 412 413 414 415
	int usecs;

	calltime = ktime_get();
	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
	do_div(usecs64, NSEC_PER_USEC);
	usecs = usecs64;
	if (usecs == 0)
		usecs = 1;
	pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
		info ?: "", info ? " " : "", pm_verb(state.event),
		usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
}

416 417 418
/*------------------------- Resume routines -------------------------*/

/**
419 420 421
 * device_resume_noirq - Execute an "early resume" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
422
 *
423 424
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
425
 */
426
static int device_resume_noirq(struct device *dev, pm_message_t state)
427 428 429 430 431 432
{
	int error = 0;

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);

433
	if (dev->pm_domain) {
434
		pm_dev_dbg(dev, state, "EARLY power domain ");
435
		error = pm_noirq_op(dev, &dev->pm_domain->ops, state);
436
	} else if (dev->type && dev->type->pm) {
437 438
		pm_dev_dbg(dev, state, "EARLY type ");
		error = pm_noirq_op(dev, dev->type->pm, state);
439
	} else if (dev->class && dev->class->pm) {
440 441
		pm_dev_dbg(dev, state, "EARLY class ");
		error = pm_noirq_op(dev, dev->class->pm, state);
442 443 444
	} else if (dev->bus && dev->bus->pm) {
		pm_dev_dbg(dev, state, "EARLY ");
		error = pm_noirq_op(dev, dev->bus->pm, state);
445 446
	}

447 448 449 450 451
	TRACE_RESUME(error);
	return error;
}

/**
452 453
 * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices.
 * @state: PM transition of the system being carried out.
454
 *
455 456
 * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and
 * enable device drivers to receive interrupts.
457
 */
458
void dpm_resume_noirq(pm_message_t state)
459
{
460
	ktime_t starttime = ktime_get();
461

462
	mutex_lock(&dpm_list_mtx);
463 464
	while (!list_empty(&dpm_noirq_list)) {
		struct device *dev = to_device(dpm_noirq_list.next);
465
		int error;
466 467

		get_device(dev);
468 469
		list_move_tail(&dev->power.entry, &dpm_suspended_list);
		mutex_unlock(&dpm_list_mtx);
470

471
		error = device_resume_noirq(dev, state);
472 473 474 475
		if (error) {
			suspend_stats.failed_resume_noirq++;
			dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
			dpm_save_failed_dev(dev_name(dev));
476
			pm_dev_err(dev, state, " early", error);
477
		}
478

479
		mutex_lock(&dpm_list_mtx);
480 481
		put_device(dev);
	}
482
	mutex_unlock(&dpm_list_mtx);
483
	dpm_show_time(starttime, state, "early");
484
	resume_device_irqs();
485
}
486
EXPORT_SYMBOL_GPL(dpm_resume_noirq);
487

488 489
/**
 * legacy_resume - Execute a legacy (bus or class) resume callback for device.
R
Randy Dunlap 已提交
490 491
 * @dev: Device to resume.
 * @cb: Resume callback to execute.
492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507
 */
static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
{
	int error;
	ktime_t calltime;

	calltime = initcall_debug_start(dev);

	error = cb(dev);
	suspend_report_result(cb, error);

	initcall_debug_report(dev, calltime, error);

	return error;
}

508
/**
509
 * device_resume - Execute "resume" callbacks for given device.
510 511
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
512
 * @async: If true, the device is being resumed asynchronously.
513
 */
514
static int device_resume(struct device *dev, pm_message_t state, bool async)
515 516
{
	int error = 0;
517
	bool put = false;
518 519 520

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);
521

522
	dpm_wait(dev->parent, async);
523
	device_lock(dev);
524

525 526 527 528 529
	/*
	 * This is a fib.  But we'll allow new children to be added below
	 * a resumed device, even if the device hasn't been completed yet.
	 */
	dev->power.is_prepared = false;
530

531 532 533
	if (!dev->power.is_suspended)
		goto Unlock;

534 535 536
	pm_runtime_enable(dev);
	put = true;

537
	if (dev->pm_domain) {
538
		pm_dev_dbg(dev, state, "power domain ");
539
		error = pm_op(dev, &dev->pm_domain->ops, state);
540
		goto End;
541 542
	}

543 544 545 546
	if (dev->type && dev->type->pm) {
		pm_dev_dbg(dev, state, "type ");
		error = pm_op(dev, dev->type->pm, state);
		goto End;
547 548
	}

549 550 551 552
	if (dev->class) {
		if (dev->class->pm) {
			pm_dev_dbg(dev, state, "class ");
			error = pm_op(dev, dev->class->pm, state);
553
			goto End;
554 555
		} else if (dev->class->resume) {
			pm_dev_dbg(dev, state, "legacy class ");
556
			error = legacy_resume(dev, dev->class->resume);
557
			goto End;
558
		}
559
	}
560 561 562 563 564 565 566 567 568 569 570

	if (dev->bus) {
		if (dev->bus->pm) {
			pm_dev_dbg(dev, state, "");
			error = pm_op(dev, dev->bus->pm, state);
		} else if (dev->bus->resume) {
			pm_dev_dbg(dev, state, "legacy ");
			error = legacy_resume(dev, dev->bus->resume);
		}
	}

571
 End:
572 573 574
	dev->power.is_suspended = false;

 Unlock:
575
	device_unlock(dev);
576
	complete_all(&dev->power.completion);
577

578
	TRACE_RESUME(error);
579 580 581 582

	if (put)
		pm_runtime_put_sync(dev);

583 584 585
	return error;
}

586 587 588 589 590
static void async_resume(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

591
	error = device_resume(dev, pm_transition, true);
592 593 594 595 596
	if (error)
		pm_dev_err(dev, pm_transition, " async", error);
	put_device(dev);
}

597
static bool is_async(struct device *dev)
598
{
599 600
	return dev->power.async_suspend && pm_async_enabled
		&& !pm_trace_is_enabled();
601 602
}

603
/**
604 605
 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
 * @state: PM transition of the system being carried out.
606
 *
607 608
 * Execute the appropriate "resume" callback for all devices whose status
 * indicates that they are suspended.
609
 */
610
void dpm_resume(pm_message_t state)
611
{
612
	struct device *dev;
613
	ktime_t starttime = ktime_get();
614

615 616
	might_sleep();

617
	mutex_lock(&dpm_list_mtx);
618
	pm_transition = state;
619
	async_error = 0;
620

621
	list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
622 623 624 625 626 627 628
		INIT_COMPLETION(dev->power.completion);
		if (is_async(dev)) {
			get_device(dev);
			async_schedule(async_resume, dev);
		}
	}

629 630
	while (!list_empty(&dpm_suspended_list)) {
		dev = to_device(dpm_suspended_list.next);
631
		get_device(dev);
632
		if (!is_async(dev)) {
633 634 635 636
			int error;

			mutex_unlock(&dpm_list_mtx);

637
			error = device_resume(dev, state, false);
638 639 640 641
			if (error) {
				suspend_stats.failed_resume++;
				dpm_save_failed_step(SUSPEND_RESUME);
				dpm_save_failed_dev(dev_name(dev));
642
				pm_dev_err(dev, state, "", error);
643
			}
644 645

			mutex_lock(&dpm_list_mtx);
646 647
		}
		if (!list_empty(&dev->power.entry))
648
			list_move_tail(&dev->power.entry, &dpm_prepared_list);
649 650 651
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
652
	async_synchronize_full();
653
	dpm_show_time(starttime, state, NULL);
654 655 656
}

/**
657 658 659
 * device_complete - Complete a PM transition for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
660
 */
661
static void device_complete(struct device *dev, pm_message_t state)
662
{
663
	device_lock(dev);
664

665
	if (dev->pm_domain) {
666
		pm_dev_dbg(dev, state, "completing power domain ");
667 668
		if (dev->pm_domain->ops.complete)
			dev->pm_domain->ops.complete(dev);
669
	} else if (dev->type && dev->type->pm) {
670
		pm_dev_dbg(dev, state, "completing type ");
671 672 673 674 675 676 677
		if (dev->type->pm->complete)
			dev->type->pm->complete(dev);
	} else if (dev->class && dev->class->pm) {
		pm_dev_dbg(dev, state, "completing class ");
		if (dev->class->pm->complete)
			dev->class->pm->complete(dev);
	} else if (dev->bus && dev->bus->pm) {
678
		pm_dev_dbg(dev, state, "completing ");
679 680
		if (dev->bus->pm->complete)
			dev->bus->pm->complete(dev);
681 682
	}

683
	device_unlock(dev);
684 685 686
}

/**
687 688
 * dpm_complete - Complete a PM transition for all non-sysdev devices.
 * @state: PM transition of the system being carried out.
689
 *
690 691
 * Execute the ->complete() callbacks for all devices whose PM status is not
 * DPM_ON (this allows new devices to be registered).
692
 */
693
void dpm_complete(pm_message_t state)
694
{
695 696
	struct list_head list;

697 698
	might_sleep();

699
	INIT_LIST_HEAD(&list);
700
	mutex_lock(&dpm_list_mtx);
701 702
	while (!list_empty(&dpm_prepared_list)) {
		struct device *dev = to_device(dpm_prepared_list.prev);
703

704
		get_device(dev);
705
		dev->power.is_prepared = false;
706 707
		list_move(&dev->power.entry, &list);
		mutex_unlock(&dpm_list_mtx);
708

709
		device_complete(dev, state);
710

711
		mutex_lock(&dpm_list_mtx);
712
		put_device(dev);
713
	}
714
	list_splice(&list, &dpm_list);
715 716 717 718
	mutex_unlock(&dpm_list_mtx);
}

/**
719 720
 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
 * @state: PM transition of the system being carried out.
721
 *
722 723
 * Execute "resume" callbacks for all devices and complete the PM transition of
 * the system.
724
 */
725
void dpm_resume_end(pm_message_t state)
726
{
727 728
	dpm_resume(state);
	dpm_complete(state);
729
}
730
EXPORT_SYMBOL_GPL(dpm_resume_end);
731 732 733 734


/*------------------------- Suspend routines -------------------------*/

735
/**
736 737 738 739 740
 * resume_event - Return a "resume" message for given "suspend" sleep state.
 * @sleep_state: PM message representing a sleep state.
 *
 * Return a PM message representing the resume event corresponding to given
 * sleep state.
741 742
 */
static pm_message_t resume_event(pm_message_t sleep_state)
743
{
744 745 746 747 748 749 750 751
	switch (sleep_state.event) {
	case PM_EVENT_SUSPEND:
		return PMSG_RESUME;
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		return PMSG_RECOVER;
	case PM_EVENT_HIBERNATE:
		return PMSG_RESTORE;
752
	}
753
	return PMSG_ON;
754 755 756
}

/**
757 758 759
 * device_suspend_noirq - Execute a "late suspend" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
760
 *
761 762
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
763
 */
764
static int device_suspend_noirq(struct device *dev, pm_message_t state)
765
{
766
	int error;
767

768
	if (dev->pm_domain) {
769
		pm_dev_dbg(dev, state, "LATE power domain ");
770
		error = pm_noirq_op(dev, &dev->pm_domain->ops, state);
771 772 773
		if (error)
			return error;
	} else if (dev->type && dev->type->pm) {
774 775 776
		pm_dev_dbg(dev, state, "LATE type ");
		error = pm_noirq_op(dev, dev->type->pm, state);
		if (error)
777 778 779 780 781 782 783
			return error;
	} else if (dev->class && dev->class->pm) {
		pm_dev_dbg(dev, state, "LATE class ");
		error = pm_noirq_op(dev, dev->class->pm, state);
		if (error)
			return error;
	} else if (dev->bus && dev->bus->pm) {
784 785
		pm_dev_dbg(dev, state, "LATE ");
		error = pm_noirq_op(dev, dev->bus->pm, state);
786
		if (error)
787
			return error;
788 789
	}

790
	return 0;
791 792 793
}

/**
794 795
 * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices.
 * @state: PM transition of the system being carried out.
796
 *
797 798
 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
 * handlers for all non-sysdev devices.
799
 */
800
int dpm_suspend_noirq(pm_message_t state)
801
{
802
	ktime_t starttime = ktime_get();
803 804
	int error = 0;

805
	suspend_device_irqs();
806
	mutex_lock(&dpm_list_mtx);
807 808
	while (!list_empty(&dpm_suspended_list)) {
		struct device *dev = to_device(dpm_suspended_list.prev);
809 810 811 812

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

813
		error = device_suspend_noirq(dev, state);
814 815

		mutex_lock(&dpm_list_mtx);
816
		if (error) {
817
			pm_dev_err(dev, state, " late", error);
818 819 820
			suspend_stats.failed_suspend_noirq++;
			dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
			dpm_save_failed_dev(dev_name(dev));
821
			put_device(dev);
822 823
			break;
		}
824
		if (!list_empty(&dev->power.entry))
825
			list_move(&dev->power.entry, &dpm_noirq_list);
826
		put_device(dev);
827
	}
828
	mutex_unlock(&dpm_list_mtx);
829
	if (error)
830
		dpm_resume_noirq(resume_event(state));
831 832
	else
		dpm_show_time(starttime, state, "late");
833 834
	return error;
}
835
EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
836

837 838
/**
 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
R
Randy Dunlap 已提交
839 840 841
 * @dev: Device to suspend.
 * @state: PM transition of the system being carried out.
 * @cb: Suspend callback to execute.
842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858
 */
static int legacy_suspend(struct device *dev, pm_message_t state,
			  int (*cb)(struct device *dev, pm_message_t state))
{
	int error;
	ktime_t calltime;

	calltime = initcall_debug_start(dev);

	error = cb(dev, state);
	suspend_report_result(cb, error);

	initcall_debug_report(dev, calltime, error);

	return error;
}

859
/**
860 861 862
 * device_suspend - Execute "suspend" callbacks for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
863
 * @async: If true, the device is being suspended asynchronously.
864
 */
865
static int __device_suspend(struct device *dev, pm_message_t state, bool async)
866 867 868
{
	int error = 0;

869
	dpm_wait_for_children(dev, async);
870

871
	if (async_error)
872 873 874 875 876
		return 0;

	pm_runtime_get_noresume(dev);
	if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
		pm_wakeup_event(dev, 0);
877

878
	if (pm_wakeup_pending()) {
879
		pm_runtime_put_sync(dev);
880
		async_error = -EBUSY;
881
		return 0;
882 883
	}

884 885
	device_lock(dev);

886
	if (dev->pm_domain) {
887
		pm_dev_dbg(dev, state, "power domain ");
888
		error = pm_op(dev, &dev->pm_domain->ops, state);
889 890 891
		goto End;
	}

892 893 894
	if (dev->type && dev->type->pm) {
		pm_dev_dbg(dev, state, "type ");
		error = pm_op(dev, dev->type->pm, state);
895
		goto End;
896 897
	}

898 899 900 901
	if (dev->class) {
		if (dev->class->pm) {
			pm_dev_dbg(dev, state, "class ");
			error = pm_op(dev, dev->class->pm, state);
902
			goto End;
903 904
		} else if (dev->class->suspend) {
			pm_dev_dbg(dev, state, "legacy class ");
905
			error = legacy_suspend(dev, state, dev->class->suspend);
906
			goto End;
907
		}
908 909
	}

910 911 912
	if (dev->bus) {
		if (dev->bus->pm) {
			pm_dev_dbg(dev, state, "");
913
			error = pm_op(dev, dev->bus->pm, state);
914 915
		} else if (dev->bus->suspend) {
			pm_dev_dbg(dev, state, "legacy ");
916
			error = legacy_suspend(dev, state, dev->bus->suspend);
917
		}
918 919
	}

920
 End:
921 922 923 924 925
	if (!error) {
		dev->power.is_suspended = true;
		if (dev->power.wakeup_path && dev->parent)
			dev->parent->power.wakeup_path = true;
	}
926

927
	device_unlock(dev);
928
	complete_all(&dev->power.completion);
929

930 931
	if (error) {
		pm_runtime_put_sync(dev);
932
		async_error = error;
933 934 935
	} else if (dev->power.is_suspended) {
		__pm_runtime_disable(dev, false);
	}
936

937 938 939
	return error;
}

940 941 942 943 944 945
static void async_suspend(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = __device_suspend(dev, pm_transition, true);
946 947
	if (error) {
		dpm_save_failed_dev(dev_name(dev));
948
		pm_dev_err(dev, pm_transition, " async", error);
949
	}
950 951 952 953 954 955 956 957

	put_device(dev);
}

static int device_suspend(struct device *dev)
{
	INIT_COMPLETION(dev->power.completion);

958
	if (pm_async_enabled && dev->power.async_suspend) {
959 960 961 962 963 964 965 966
		get_device(dev);
		async_schedule(async_suspend, dev);
		return 0;
	}

	return __device_suspend(dev, pm_transition, false);
}

967
/**
968 969
 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
 * @state: PM transition of the system being carried out.
970
 */
971
int dpm_suspend(pm_message_t state)
972
{
973
	ktime_t starttime = ktime_get();
974 975
	int error = 0;

976 977
	might_sleep();

978
	mutex_lock(&dpm_list_mtx);
979 980
	pm_transition = state;
	async_error = 0;
981 982
	while (!list_empty(&dpm_prepared_list)) {
		struct device *dev = to_device(dpm_prepared_list.prev);
983

984
		get_device(dev);
985
		mutex_unlock(&dpm_list_mtx);
986

987
		error = device_suspend(dev);
988

989
		mutex_lock(&dpm_list_mtx);
990
		if (error) {
991
			pm_dev_err(dev, state, "", error);
992
			dpm_save_failed_dev(dev_name(dev));
993
			put_device(dev);
994 995
			break;
		}
996
		if (!list_empty(&dev->power.entry))
997
			list_move(&dev->power.entry, &dpm_suspended_list);
998
		put_device(dev);
999 1000
		if (async_error)
			break;
1001 1002
	}
	mutex_unlock(&dpm_list_mtx);
1003 1004 1005
	async_synchronize_full();
	if (!error)
		error = async_error;
1006 1007 1008 1009
	if (error) {
		suspend_stats.failed_suspend++;
		dpm_save_failed_step(SUSPEND_SUSPEND);
	} else
1010
		dpm_show_time(starttime, state, NULL);
1011 1012 1013 1014
	return error;
}

/**
1015 1016 1017 1018 1019 1020
 * device_prepare - Prepare a device for system power transition.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
 *
 * Execute the ->prepare() callback(s) for given device.  No new children of the
 * device may be registered after this function has returned.
1021
 */
1022
static int device_prepare(struct device *dev, pm_message_t state)
1023 1024 1025
{
	int error = 0;

1026
	device_lock(dev);
1027

1028 1029
	dev->power.wakeup_path = device_may_wakeup(dev);

1030
	if (dev->pm_domain) {
1031
		pm_dev_dbg(dev, state, "preparing power domain ");
1032 1033 1034
		if (dev->pm_domain->ops.prepare)
			error = dev->pm_domain->ops.prepare(dev);
		suspend_report_result(dev->pm_domain->ops.prepare, error);
1035 1036 1037
		if (error)
			goto End;
	} else if (dev->type && dev->type->pm) {
1038
		pm_dev_dbg(dev, state, "preparing type ");
1039 1040
		if (dev->type->pm->prepare)
			error = dev->type->pm->prepare(dev);
1041 1042 1043
		suspend_report_result(dev->type->pm->prepare, error);
		if (error)
			goto End;
1044
	} else if (dev->class && dev->class->pm) {
1045
		pm_dev_dbg(dev, state, "preparing class ");
1046 1047
		if (dev->class->pm->prepare)
			error = dev->class->pm->prepare(dev);
1048
		suspend_report_result(dev->class->pm->prepare, error);
1049 1050
		if (error)
			goto End;
1051 1052 1053 1054 1055
	} else if (dev->bus && dev->bus->pm) {
		pm_dev_dbg(dev, state, "preparing ");
		if (dev->bus->pm->prepare)
			error = dev->bus->pm->prepare(dev);
		suspend_report_result(dev->bus->pm->prepare, error);
1056
	}
1057

1058
 End:
1059
	device_unlock(dev);
1060 1061 1062

	return error;
}
1063

1064
/**
1065 1066
 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
 * @state: PM transition of the system being carried out.
1067
 *
1068
 * Execute the ->prepare() callback(s) for all devices.
1069
 */
1070
int dpm_prepare(pm_message_t state)
1071 1072 1073
{
	int error = 0;

1074 1075
	might_sleep();

1076 1077 1078 1079 1080 1081 1082
	mutex_lock(&dpm_list_mtx);
	while (!list_empty(&dpm_list)) {
		struct device *dev = to_device(dpm_list.next);

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

1083
		error = device_prepare(dev, state);
1084 1085 1086 1087 1088

		mutex_lock(&dpm_list_mtx);
		if (error) {
			if (error == -EAGAIN) {
				put_device(dev);
S
Sebastian Ott 已提交
1089
				error = 0;
1090 1091
				continue;
			}
1092 1093
			printk(KERN_INFO "PM: Device %s not prepared "
				"for power transition: code %d\n",
1094
				dev_name(dev), error);
1095 1096 1097
			put_device(dev);
			break;
		}
1098
		dev->power.is_prepared = true;
1099
		if (!list_empty(&dev->power.entry))
1100
			list_move_tail(&dev->power.entry, &dpm_prepared_list);
1101 1102 1103
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
1104 1105 1106
	return error;
}

1107
/**
1108 1109
 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
 * @state: PM transition of the system being carried out.
1110
 *
1111 1112
 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
 * callbacks for them.
1113
 */
1114
int dpm_suspend_start(pm_message_t state)
1115 1116
{
	int error;
1117

1118
	error = dpm_prepare(state);
1119 1120 1121 1122
	if (error) {
		suspend_stats.failed_prepare++;
		dpm_save_failed_step(SUSPEND_PREPARE);
	} else
1123
		error = dpm_suspend(state);
1124 1125
	return error;
}
1126
EXPORT_SYMBOL_GPL(dpm_suspend_start);
1127 1128 1129

void __suspend_report_result(const char *function, void *fn, int ret)
{
1130 1131
	if (ret)
		printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1132 1133
}
EXPORT_SYMBOL_GPL(__suspend_report_result);
1134 1135 1136 1137 1138 1139

/**
 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
 * @dev: Device to wait for.
 * @subordinate: Device that needs to wait for @dev.
 */
1140
int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1141 1142
{
	dpm_wait(dev, subordinate->power.async_suspend);
1143
	return async_error;
1144 1145
}
EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);