main.c 27.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/*
 * drivers/base/power/main.c - Where the driver meets power management.
 *
 * Copyright (c) 2003 Patrick Mochel
 * Copyright (c) 2003 Open Source Development Lab
 *
 * This file is released under the GPLv2
 *
 *
 * The driver model core calls device_pm_add() when a device is registered.
11
 * This will initialize the embedded device_pm_info object in the device
L
Linus Torvalds 已提交
12 13 14
 * and add it to the list of power-controlled devices. sysfs entries for
 * controlling device power management will also be added.
 *
15 16 17
 * A separate list is used for keeping track of power info, because the power
 * domain dependencies may differ from the ancestral dependencies that the
 * subsystem list maintains.
L
Linus Torvalds 已提交
18 19 20
 */

#include <linux/device.h>
21
#include <linux/kallsyms.h>
22
#include <linux/mutex.h>
23
#include <linux/pm.h>
24
#include <linux/pm_runtime.h>
25
#include <linux/resume-trace.h>
26
#include <linux/interrupt.h>
27
#include <linux/sched.h>
28
#include <linux/async.h>
29
#include <linux/suspend.h>
30

31
#include "../base.h"
L
Linus Torvalds 已提交
32 33
#include "power.h"

34
/*
35
 * The entries in the dpm_list list are in a depth first order, simply
36 37 38
 * because children are guaranteed to be discovered after parents, and
 * are inserted at the back of the list on discovery.
 *
39 40
 * Since device_pm_add() may be called with a device lock held,
 * we must never try to acquire a device lock while holding
41 42 43
 * dpm_list_mutex.
 */

44
LIST_HEAD(dpm_list);
45 46 47
LIST_HEAD(dpm_prepared_list);
LIST_HEAD(dpm_suspended_list);
LIST_HEAD(dpm_noirq_list);
L
Linus Torvalds 已提交
48

49
static DEFINE_MUTEX(dpm_list_mtx);
50
static pm_message_t pm_transition;
L
Linus Torvalds 已提交
51

52 53
static int async_error;

54
/**
55
 * device_pm_init - Initialize the PM-related part of a device object.
56 57 58 59
 * @dev: Device object being initialized.
 */
void device_pm_init(struct device *dev)
{
60
	dev->power.is_prepared = false;
61
	dev->power.is_suspended = false;
62
	init_completion(&dev->power.completion);
63
	complete_all(&dev->power.completion);
64 65
	dev->power.wakeup = NULL;
	spin_lock_init(&dev->power.lock);
66
	pm_runtime_init(dev);
67
	INIT_LIST_HEAD(&dev->power.entry);
68
	dev->power.power_state = PMSG_INVALID;
69 70
}

71
/**
72
 * device_pm_lock - Lock the list of active devices used by the PM core.
73 74 75 76 77 78 79
 */
void device_pm_lock(void)
{
	mutex_lock(&dpm_list_mtx);
}

/**
80
 * device_pm_unlock - Unlock the list of active devices used by the PM core.
81 82 83 84 85
 */
void device_pm_unlock(void)
{
	mutex_unlock(&dpm_list_mtx);
}
86

87
/**
88 89
 * device_pm_add - Add a device to the PM core's list of active devices.
 * @dev: Device to add to the list.
90
 */
91
void device_pm_add(struct device *dev)
L
Linus Torvalds 已提交
92 93
{
	pr_debug("PM: Adding info for %s:%s\n",
94
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
95
	mutex_lock(&dpm_list_mtx);
96
	if (dev->parent && dev->parent->power.is_prepared)
97 98
		dev_warn(dev, "parent %s should not be sleeping\n",
			dev_name(dev->parent));
99
	list_add_tail(&dev->power.entry, &dpm_list);
100
	dev_pm_qos_constraints_init(dev);
101
	mutex_unlock(&dpm_list_mtx);
L
Linus Torvalds 已提交
102 103
}

104
/**
105 106
 * device_pm_remove - Remove a device from the PM core's list of active devices.
 * @dev: Device to be removed from the list.
107
 */
108
void device_pm_remove(struct device *dev)
L
Linus Torvalds 已提交
109 110
{
	pr_debug("PM: Removing info for %s:%s\n",
111
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
112
	complete_all(&dev->power.completion);
113
	mutex_lock(&dpm_list_mtx);
114
	dev_pm_qos_constraints_destroy(dev);
L
Linus Torvalds 已提交
115
	list_del_init(&dev->power.entry);
116
	mutex_unlock(&dpm_list_mtx);
117
	device_wakeup_disable(dev);
118
	pm_runtime_remove(dev);
119 120
}

121
/**
122 123 124
 * device_pm_move_before - Move device in the PM core's list of active devices.
 * @deva: Device to move in dpm_list.
 * @devb: Device @deva should come before.
125 126 127 128
 */
void device_pm_move_before(struct device *deva, struct device *devb)
{
	pr_debug("PM: Moving %s:%s before %s:%s\n",
129 130
		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
131 132 133 134 135
	/* Delete deva from dpm_list and reinsert before devb. */
	list_move_tail(&deva->power.entry, &devb->power.entry);
}

/**
136 137 138
 * device_pm_move_after - Move device in the PM core's list of active devices.
 * @deva: Device to move in dpm_list.
 * @devb: Device @deva should come after.
139 140 141 142
 */
void device_pm_move_after(struct device *deva, struct device *devb)
{
	pr_debug("PM: Moving %s:%s after %s:%s\n",
143 144
		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
145 146 147 148 149
	/* Delete deva from dpm_list and reinsert after devb. */
	list_move(&deva->power.entry, &devb->power.entry);
}

/**
150 151
 * device_pm_move_last - Move device to end of the PM core's list of devices.
 * @dev: Device to move in dpm_list.
152 153 154 155
 */
void device_pm_move_last(struct device *dev)
{
	pr_debug("PM: Moving %s:%s to end of list\n",
156
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
157 158 159
	list_move_tail(&dev->power.entry, &dpm_list);
}

160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185
static ktime_t initcall_debug_start(struct device *dev)
{
	ktime_t calltime = ktime_set(0, 0);

	if (initcall_debug) {
		pr_info("calling  %s+ @ %i\n",
				dev_name(dev), task_pid_nr(current));
		calltime = ktime_get();
	}

	return calltime;
}

static void initcall_debug_report(struct device *dev, ktime_t calltime,
				  int error)
{
	ktime_t delta, rettime;

	if (initcall_debug) {
		rettime = ktime_get();
		delta = ktime_sub(rettime, calltime);
		pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
			error, (unsigned long long)ktime_to_ns(delta) >> 10);
	}
}

186 187 188 189 190 191 192 193 194 195
/**
 * dpm_wait - Wait for a PM operation to complete.
 * @dev: Device to wait for.
 * @async: If unset, wait only if the device's power.async_suspend flag is set.
 */
static void dpm_wait(struct device *dev, bool async)
{
	if (!dev)
		return;

196
	if (async || (pm_async_enabled && dev->power.async_suspend))
197 198 199 200 201 202 203 204 205 206 207 208 209 210
		wait_for_completion(&dev->power.completion);
}

static int dpm_wait_fn(struct device *dev, void *async_ptr)
{
	dpm_wait(dev, *((bool *)async_ptr));
	return 0;
}

static void dpm_wait_for_children(struct device *dev, bool async)
{
       device_for_each_child(dev, &async, dpm_wait_fn);
}

211
/**
212 213 214 215
 * pm_op - Execute the PM operation appropriate for given PM event.
 * @dev: Device to handle.
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
216
 */
217 218 219
static int pm_op(struct device *dev,
		 const struct dev_pm_ops *ops,
		 pm_message_t state)
220 221
{
	int error = 0;
222
	ktime_t calltime;
223

224
	calltime = initcall_debug_start(dev);
225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240

	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
		if (ops->suspend) {
			error = ops->suspend(dev);
			suspend_report_result(ops->suspend, error);
		}
		break;
	case PM_EVENT_RESUME:
		if (ops->resume) {
			error = ops->resume(dev);
			suspend_report_result(ops->resume, error);
		}
		break;
#endif /* CONFIG_SUSPEND */
241
#ifdef CONFIG_HIBERNATE_CALLBACKS
242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		if (ops->freeze) {
			error = ops->freeze(dev);
			suspend_report_result(ops->freeze, error);
		}
		break;
	case PM_EVENT_HIBERNATE:
		if (ops->poweroff) {
			error = ops->poweroff(dev);
			suspend_report_result(ops->poweroff, error);
		}
		break;
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
		if (ops->thaw) {
			error = ops->thaw(dev);
			suspend_report_result(ops->thaw, error);
		}
		break;
	case PM_EVENT_RESTORE:
		if (ops->restore) {
			error = ops->restore(dev);
			suspend_report_result(ops->restore, error);
		}
		break;
268
#endif /* CONFIG_HIBERNATE_CALLBACKS */
269 270 271
	default:
		error = -EINVAL;
	}
272

273
	initcall_debug_report(dev, calltime, error);
274

275 276 277 278
	return error;
}

/**
279 280 281 282
 * pm_noirq_op - Execute the PM operation appropriate for given PM event.
 * @dev: Device to handle.
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
283
 *
284 285
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
286
 */
287 288
static int pm_noirq_op(struct device *dev,
			const struct dev_pm_ops *ops,
289 290 291
			pm_message_t state)
{
	int error = 0;
292
	ktime_t calltime = ktime_set(0, 0), delta, rettime;
293 294

	if (initcall_debug) {
295 296 297
		pr_info("calling  %s+ @ %i, parent: %s\n",
				dev_name(dev), task_pid_nr(current),
				dev->parent ? dev_name(dev->parent) : "none");
298 299
		calltime = ktime_get();
	}
300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315

	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
		if (ops->suspend_noirq) {
			error = ops->suspend_noirq(dev);
			suspend_report_result(ops->suspend_noirq, error);
		}
		break;
	case PM_EVENT_RESUME:
		if (ops->resume_noirq) {
			error = ops->resume_noirq(dev);
			suspend_report_result(ops->resume_noirq, error);
		}
		break;
#endif /* CONFIG_SUSPEND */
316
#ifdef CONFIG_HIBERNATE_CALLBACKS
317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		if (ops->freeze_noirq) {
			error = ops->freeze_noirq(dev);
			suspend_report_result(ops->freeze_noirq, error);
		}
		break;
	case PM_EVENT_HIBERNATE:
		if (ops->poweroff_noirq) {
			error = ops->poweroff_noirq(dev);
			suspend_report_result(ops->poweroff_noirq, error);
		}
		break;
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
		if (ops->thaw_noirq) {
			error = ops->thaw_noirq(dev);
			suspend_report_result(ops->thaw_noirq, error);
		}
		break;
	case PM_EVENT_RESTORE:
		if (ops->restore_noirq) {
			error = ops->restore_noirq(dev);
			suspend_report_result(ops->restore_noirq, error);
		}
		break;
343
#endif /* CONFIG_HIBERNATE_CALLBACKS */
344 345 346
	default:
		error = -EINVAL;
	}
347 348 349 350

	if (initcall_debug) {
		rettime = ktime_get();
		delta = ktime_sub(rettime, calltime);
351 352 353
		printk("initcall %s_i+ returned %d after %Ld usecs\n",
			dev_name(dev), error,
			(unsigned long long)ktime_to_ns(delta) >> 10);
354 355
	}

356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393
	return error;
}

static char *pm_verb(int event)
{
	switch (event) {
	case PM_EVENT_SUSPEND:
		return "suspend";
	case PM_EVENT_RESUME:
		return "resume";
	case PM_EVENT_FREEZE:
		return "freeze";
	case PM_EVENT_QUIESCE:
		return "quiesce";
	case PM_EVENT_HIBERNATE:
		return "hibernate";
	case PM_EVENT_THAW:
		return "thaw";
	case PM_EVENT_RESTORE:
		return "restore";
	case PM_EVENT_RECOVER:
		return "recover";
	default:
		return "(unknown PM event)";
	}
}

static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
{
	dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
		", may wakeup" : "");
}

static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
			int error)
{
	printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
394
		dev_name(dev), pm_verb(state.event), info, error);
395 396
}

397 398 399
static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
{
	ktime_t calltime;
400
	u64 usecs64;
401 402 403 404 405 406 407 408 409 410 411 412 413
	int usecs;

	calltime = ktime_get();
	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
	do_div(usecs64, NSEC_PER_USEC);
	usecs = usecs64;
	if (usecs == 0)
		usecs = 1;
	pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
		info ?: "", info ? " " : "", pm_verb(state.event),
		usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
}

414 415 416
/*------------------------- Resume routines -------------------------*/

/**
417 418 419
 * device_resume_noirq - Execute an "early resume" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
420
 *
421 422
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
423
 */
424
static int device_resume_noirq(struct device *dev, pm_message_t state)
425 426 427 428 429 430
{
	int error = 0;

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);

431
	if (dev->pm_domain) {
432
		pm_dev_dbg(dev, state, "EARLY power domain ");
433
		error = pm_noirq_op(dev, &dev->pm_domain->ops, state);
434
	} else if (dev->type && dev->type->pm) {
435 436
		pm_dev_dbg(dev, state, "EARLY type ");
		error = pm_noirq_op(dev, dev->type->pm, state);
437
	} else if (dev->class && dev->class->pm) {
438 439
		pm_dev_dbg(dev, state, "EARLY class ");
		error = pm_noirq_op(dev, dev->class->pm, state);
440 441 442
	} else if (dev->bus && dev->bus->pm) {
		pm_dev_dbg(dev, state, "EARLY ");
		error = pm_noirq_op(dev, dev->bus->pm, state);
443 444
	}

445 446 447 448 449
	TRACE_RESUME(error);
	return error;
}

/**
450 451
 * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices.
 * @state: PM transition of the system being carried out.
452
 *
453 454
 * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and
 * enable device drivers to receive interrupts.
455
 */
456
void dpm_resume_noirq(pm_message_t state)
457
{
458
	ktime_t starttime = ktime_get();
459

460
	mutex_lock(&dpm_list_mtx);
461 462
	while (!list_empty(&dpm_noirq_list)) {
		struct device *dev = to_device(dpm_noirq_list.next);
463
		int error;
464 465

		get_device(dev);
466 467
		list_move_tail(&dev->power.entry, &dpm_suspended_list);
		mutex_unlock(&dpm_list_mtx);
468

469 470 471
		error = device_resume_noirq(dev, state);
		if (error)
			pm_dev_err(dev, state, " early", error);
472

473
		mutex_lock(&dpm_list_mtx);
474 475
		put_device(dev);
	}
476
	mutex_unlock(&dpm_list_mtx);
477
	dpm_show_time(starttime, state, "early");
478
	resume_device_irqs();
479
}
480
EXPORT_SYMBOL_GPL(dpm_resume_noirq);
481

482 483
/**
 * legacy_resume - Execute a legacy (bus or class) resume callback for device.
R
Randy Dunlap 已提交
484 485
 * @dev: Device to resume.
 * @cb: Resume callback to execute.
486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501
 */
static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
{
	int error;
	ktime_t calltime;

	calltime = initcall_debug_start(dev);

	error = cb(dev);
	suspend_report_result(cb, error);

	initcall_debug_report(dev, calltime, error);

	return error;
}

502
/**
503
 * device_resume - Execute "resume" callbacks for given device.
504 505
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
506
 * @async: If true, the device is being resumed asynchronously.
507
 */
508
static int device_resume(struct device *dev, pm_message_t state, bool async)
509 510
{
	int error = 0;
511
	bool put = false;
512 513 514

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);
515

516
	dpm_wait(dev->parent, async);
517
	device_lock(dev);
518

519 520 521 522 523
	/*
	 * This is a fib.  But we'll allow new children to be added below
	 * a resumed device, even if the device hasn't been completed yet.
	 */
	dev->power.is_prepared = false;
524

525 526 527
	if (!dev->power.is_suspended)
		goto Unlock;

528 529 530
	pm_runtime_enable(dev);
	put = true;

531
	if (dev->pm_domain) {
532
		pm_dev_dbg(dev, state, "power domain ");
533
		error = pm_op(dev, &dev->pm_domain->ops, state);
534
		goto End;
535 536
	}

537 538 539 540
	if (dev->type && dev->type->pm) {
		pm_dev_dbg(dev, state, "type ");
		error = pm_op(dev, dev->type->pm, state);
		goto End;
541 542
	}

543 544 545 546
	if (dev->class) {
		if (dev->class->pm) {
			pm_dev_dbg(dev, state, "class ");
			error = pm_op(dev, dev->class->pm, state);
547
			goto End;
548 549
		} else if (dev->class->resume) {
			pm_dev_dbg(dev, state, "legacy class ");
550
			error = legacy_resume(dev, dev->class->resume);
551
			goto End;
552
		}
553
	}
554 555 556 557 558 559 560 561 562 563 564

	if (dev->bus) {
		if (dev->bus->pm) {
			pm_dev_dbg(dev, state, "");
			error = pm_op(dev, dev->bus->pm, state);
		} else if (dev->bus->resume) {
			pm_dev_dbg(dev, state, "legacy ");
			error = legacy_resume(dev, dev->bus->resume);
		}
	}

565
 End:
566 567 568
	dev->power.is_suspended = false;

 Unlock:
569
	device_unlock(dev);
570
	complete_all(&dev->power.completion);
571

572
	TRACE_RESUME(error);
573 574 575 576

	if (put)
		pm_runtime_put_sync(dev);

577 578 579
	return error;
}

580 581 582 583 584
static void async_resume(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

585
	error = device_resume(dev, pm_transition, true);
586 587 588 589 590
	if (error)
		pm_dev_err(dev, pm_transition, " async", error);
	put_device(dev);
}

591
static bool is_async(struct device *dev)
592
{
593 594
	return dev->power.async_suspend && pm_async_enabled
		&& !pm_trace_is_enabled();
595 596
}

597
/**
598 599
 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
 * @state: PM transition of the system being carried out.
600
 *
601 602
 * Execute the appropriate "resume" callback for all devices whose status
 * indicates that they are suspended.
603
 */
604
void dpm_resume(pm_message_t state)
605
{
606
	struct device *dev;
607
	ktime_t starttime = ktime_get();
608

609 610
	might_sleep();

611
	mutex_lock(&dpm_list_mtx);
612
	pm_transition = state;
613
	async_error = 0;
614

615
	list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
616 617 618 619 620 621 622
		INIT_COMPLETION(dev->power.completion);
		if (is_async(dev)) {
			get_device(dev);
			async_schedule(async_resume, dev);
		}
	}

623 624
	while (!list_empty(&dpm_suspended_list)) {
		dev = to_device(dpm_suspended_list.next);
625
		get_device(dev);
626
		if (!is_async(dev)) {
627 628 629 630
			int error;

			mutex_unlock(&dpm_list_mtx);

631
			error = device_resume(dev, state, false);
632 633
			if (error)
				pm_dev_err(dev, state, "", error);
634 635

			mutex_lock(&dpm_list_mtx);
636 637
		}
		if (!list_empty(&dev->power.entry))
638
			list_move_tail(&dev->power.entry, &dpm_prepared_list);
639 640 641
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
642
	async_synchronize_full();
643
	dpm_show_time(starttime, state, NULL);
644 645 646
}

/**
647 648 649
 * device_complete - Complete a PM transition for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
650
 */
651
static void device_complete(struct device *dev, pm_message_t state)
652
{
653
	device_lock(dev);
654

655
	if (dev->pm_domain) {
656
		pm_dev_dbg(dev, state, "completing power domain ");
657 658
		if (dev->pm_domain->ops.complete)
			dev->pm_domain->ops.complete(dev);
659
	} else if (dev->type && dev->type->pm) {
660
		pm_dev_dbg(dev, state, "completing type ");
661 662 663 664 665 666 667
		if (dev->type->pm->complete)
			dev->type->pm->complete(dev);
	} else if (dev->class && dev->class->pm) {
		pm_dev_dbg(dev, state, "completing class ");
		if (dev->class->pm->complete)
			dev->class->pm->complete(dev);
	} else if (dev->bus && dev->bus->pm) {
668
		pm_dev_dbg(dev, state, "completing ");
669 670
		if (dev->bus->pm->complete)
			dev->bus->pm->complete(dev);
671 672
	}

673
	device_unlock(dev);
674 675 676
}

/**
677 678
 * dpm_complete - Complete a PM transition for all non-sysdev devices.
 * @state: PM transition of the system being carried out.
679
 *
680 681
 * Execute the ->complete() callbacks for all devices whose PM status is not
 * DPM_ON (this allows new devices to be registered).
682
 */
683
void dpm_complete(pm_message_t state)
684
{
685 686
	struct list_head list;

687 688
	might_sleep();

689
	INIT_LIST_HEAD(&list);
690
	mutex_lock(&dpm_list_mtx);
691 692
	while (!list_empty(&dpm_prepared_list)) {
		struct device *dev = to_device(dpm_prepared_list.prev);
693

694
		get_device(dev);
695
		dev->power.is_prepared = false;
696 697
		list_move(&dev->power.entry, &list);
		mutex_unlock(&dpm_list_mtx);
698

699
		device_complete(dev, state);
700

701
		mutex_lock(&dpm_list_mtx);
702
		put_device(dev);
703
	}
704
	list_splice(&list, &dpm_list);
705 706 707 708
	mutex_unlock(&dpm_list_mtx);
}

/**
709 710
 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
 * @state: PM transition of the system being carried out.
711
 *
712 713
 * Execute "resume" callbacks for all devices and complete the PM transition of
 * the system.
714
 */
715
void dpm_resume_end(pm_message_t state)
716
{
717 718
	dpm_resume(state);
	dpm_complete(state);
719
}
720
EXPORT_SYMBOL_GPL(dpm_resume_end);
721 722 723 724


/*------------------------- Suspend routines -------------------------*/

725
/**
726 727 728 729 730
 * resume_event - Return a "resume" message for given "suspend" sleep state.
 * @sleep_state: PM message representing a sleep state.
 *
 * Return a PM message representing the resume event corresponding to given
 * sleep state.
731 732
 */
static pm_message_t resume_event(pm_message_t sleep_state)
733
{
734 735 736 737 738 739 740 741
	switch (sleep_state.event) {
	case PM_EVENT_SUSPEND:
		return PMSG_RESUME;
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		return PMSG_RECOVER;
	case PM_EVENT_HIBERNATE:
		return PMSG_RESTORE;
742
	}
743
	return PMSG_ON;
744 745 746
}

/**
747 748 749
 * device_suspend_noirq - Execute a "late suspend" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
750
 *
751 752
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
753
 */
754
static int device_suspend_noirq(struct device *dev, pm_message_t state)
755
{
756
	int error;
757

758
	if (dev->pm_domain) {
759
		pm_dev_dbg(dev, state, "LATE power domain ");
760
		error = pm_noirq_op(dev, &dev->pm_domain->ops, state);
761 762 763
		if (error)
			return error;
	} else if (dev->type && dev->type->pm) {
764 765 766
		pm_dev_dbg(dev, state, "LATE type ");
		error = pm_noirq_op(dev, dev->type->pm, state);
		if (error)
767 768 769 770 771 772 773
			return error;
	} else if (dev->class && dev->class->pm) {
		pm_dev_dbg(dev, state, "LATE class ");
		error = pm_noirq_op(dev, dev->class->pm, state);
		if (error)
			return error;
	} else if (dev->bus && dev->bus->pm) {
774 775
		pm_dev_dbg(dev, state, "LATE ");
		error = pm_noirq_op(dev, dev->bus->pm, state);
776
		if (error)
777
			return error;
778 779
	}

780
	return 0;
781 782 783
}

/**
784 785
 * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices.
 * @state: PM transition of the system being carried out.
786
 *
787 788
 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
 * handlers for all non-sysdev devices.
789
 */
790
int dpm_suspend_noirq(pm_message_t state)
791
{
792
	ktime_t starttime = ktime_get();
793 794
	int error = 0;

795
	suspend_device_irqs();
796
	mutex_lock(&dpm_list_mtx);
797 798
	while (!list_empty(&dpm_suspended_list)) {
		struct device *dev = to_device(dpm_suspended_list.prev);
799 800 801 802

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

803
		error = device_suspend_noirq(dev, state);
804 805

		mutex_lock(&dpm_list_mtx);
806
		if (error) {
807
			pm_dev_err(dev, state, " late", error);
808
			put_device(dev);
809 810
			break;
		}
811
		if (!list_empty(&dev->power.entry))
812
			list_move(&dev->power.entry, &dpm_noirq_list);
813
		put_device(dev);
814
	}
815
	mutex_unlock(&dpm_list_mtx);
816
	if (error)
817
		dpm_resume_noirq(resume_event(state));
818 819
	else
		dpm_show_time(starttime, state, "late");
820 821
	return error;
}
822
EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
823

824 825
/**
 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
R
Randy Dunlap 已提交
826 827 828
 * @dev: Device to suspend.
 * @state: PM transition of the system being carried out.
 * @cb: Suspend callback to execute.
829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845
 */
static int legacy_suspend(struct device *dev, pm_message_t state,
			  int (*cb)(struct device *dev, pm_message_t state))
{
	int error;
	ktime_t calltime;

	calltime = initcall_debug_start(dev);

	error = cb(dev, state);
	suspend_report_result(cb, error);

	initcall_debug_report(dev, calltime, error);

	return error;
}

846
/**
847 848 849
 * device_suspend - Execute "suspend" callbacks for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
850
 * @async: If true, the device is being suspended asynchronously.
851
 */
852
static int __device_suspend(struct device *dev, pm_message_t state, bool async)
853 854 855
{
	int error = 0;

856
	dpm_wait_for_children(dev, async);
857

858
	if (async_error)
859 860 861 862 863
		return 0;

	pm_runtime_get_noresume(dev);
	if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
		pm_wakeup_event(dev, 0);
864

865
	if (pm_wakeup_pending()) {
866
		pm_runtime_put_sync(dev);
867
		async_error = -EBUSY;
868
		return 0;
869 870
	}

871 872
	device_lock(dev);

873
	if (dev->pm_domain) {
874
		pm_dev_dbg(dev, state, "power domain ");
875
		error = pm_op(dev, &dev->pm_domain->ops, state);
876 877 878
		goto End;
	}

879 880 881
	if (dev->type && dev->type->pm) {
		pm_dev_dbg(dev, state, "type ");
		error = pm_op(dev, dev->type->pm, state);
882
		goto End;
883 884
	}

885 886 887 888
	if (dev->class) {
		if (dev->class->pm) {
			pm_dev_dbg(dev, state, "class ");
			error = pm_op(dev, dev->class->pm, state);
889
			goto End;
890 891
		} else if (dev->class->suspend) {
			pm_dev_dbg(dev, state, "legacy class ");
892
			error = legacy_suspend(dev, state, dev->class->suspend);
893
			goto End;
894
		}
895 896
	}

897 898 899
	if (dev->bus) {
		if (dev->bus->pm) {
			pm_dev_dbg(dev, state, "");
900
			error = pm_op(dev, dev->bus->pm, state);
901 902
		} else if (dev->bus->suspend) {
			pm_dev_dbg(dev, state, "legacy ");
903
			error = legacy_suspend(dev, state, dev->bus->suspend);
904
		}
905 906
	}

907
 End:
908 909
	dev->power.is_suspended = !error;

910
	device_unlock(dev);
911
	complete_all(&dev->power.completion);
912

913 914
	if (error) {
		pm_runtime_put_sync(dev);
915
		async_error = error;
916 917 918
	} else if (dev->power.is_suspended) {
		__pm_runtime_disable(dev, false);
	}
919

920 921 922
	return error;
}

923 924 925 926 927 928
static void async_suspend(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = __device_suspend(dev, pm_transition, true);
929
	if (error)
930 931 932 933 934 935 936 937 938
		pm_dev_err(dev, pm_transition, " async", error);

	put_device(dev);
}

static int device_suspend(struct device *dev)
{
	INIT_COMPLETION(dev->power.completion);

939
	if (pm_async_enabled && dev->power.async_suspend) {
940 941 942 943 944 945 946 947
		get_device(dev);
		async_schedule(async_suspend, dev);
		return 0;
	}

	return __device_suspend(dev, pm_transition, false);
}

948
/**
949 950
 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
 * @state: PM transition of the system being carried out.
951
 */
952
int dpm_suspend(pm_message_t state)
953
{
954
	ktime_t starttime = ktime_get();
955 956
	int error = 0;

957 958
	might_sleep();

959
	mutex_lock(&dpm_list_mtx);
960 961
	pm_transition = state;
	async_error = 0;
962 963
	while (!list_empty(&dpm_prepared_list)) {
		struct device *dev = to_device(dpm_prepared_list.prev);
964

965
		get_device(dev);
966
		mutex_unlock(&dpm_list_mtx);
967

968
		error = device_suspend(dev);
969

970
		mutex_lock(&dpm_list_mtx);
971
		if (error) {
972 973
			pm_dev_err(dev, state, "", error);
			put_device(dev);
974 975
			break;
		}
976
		if (!list_empty(&dev->power.entry))
977
			list_move(&dev->power.entry, &dpm_suspended_list);
978
		put_device(dev);
979 980
		if (async_error)
			break;
981 982
	}
	mutex_unlock(&dpm_list_mtx);
983 984 985
	async_synchronize_full();
	if (!error)
		error = async_error;
986 987
	if (!error)
		dpm_show_time(starttime, state, NULL);
988 989 990 991
	return error;
}

/**
992 993 994 995 996 997
 * device_prepare - Prepare a device for system power transition.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
 *
 * Execute the ->prepare() callback(s) for given device.  No new children of the
 * device may be registered after this function has returned.
998
 */
999
static int device_prepare(struct device *dev, pm_message_t state)
1000 1001 1002
{
	int error = 0;

1003
	device_lock(dev);
1004

1005
	if (dev->pm_domain) {
1006
		pm_dev_dbg(dev, state, "preparing power domain ");
1007 1008 1009
		if (dev->pm_domain->ops.prepare)
			error = dev->pm_domain->ops.prepare(dev);
		suspend_report_result(dev->pm_domain->ops.prepare, error);
1010 1011 1012
		if (error)
			goto End;
	} else if (dev->type && dev->type->pm) {
1013
		pm_dev_dbg(dev, state, "preparing type ");
1014 1015
		if (dev->type->pm->prepare)
			error = dev->type->pm->prepare(dev);
1016 1017 1018
		suspend_report_result(dev->type->pm->prepare, error);
		if (error)
			goto End;
1019
	} else if (dev->class && dev->class->pm) {
1020
		pm_dev_dbg(dev, state, "preparing class ");
1021 1022
		if (dev->class->pm->prepare)
			error = dev->class->pm->prepare(dev);
1023
		suspend_report_result(dev->class->pm->prepare, error);
1024 1025
		if (error)
			goto End;
1026 1027 1028 1029 1030
	} else if (dev->bus && dev->bus->pm) {
		pm_dev_dbg(dev, state, "preparing ");
		if (dev->bus->pm->prepare)
			error = dev->bus->pm->prepare(dev);
		suspend_report_result(dev->bus->pm->prepare, error);
1031
	}
1032

1033
 End:
1034
	device_unlock(dev);
1035 1036 1037

	return error;
}
1038

1039
/**
1040 1041
 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
 * @state: PM transition of the system being carried out.
1042
 *
1043
 * Execute the ->prepare() callback(s) for all devices.
1044
 */
1045
int dpm_prepare(pm_message_t state)
1046 1047 1048
{
	int error = 0;

1049 1050
	might_sleep();

1051 1052 1053 1054 1055 1056 1057
	mutex_lock(&dpm_list_mtx);
	while (!list_empty(&dpm_list)) {
		struct device *dev = to_device(dpm_list.next);

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

1058
		error = device_prepare(dev, state);
1059 1060 1061 1062 1063

		mutex_lock(&dpm_list_mtx);
		if (error) {
			if (error == -EAGAIN) {
				put_device(dev);
S
Sebastian Ott 已提交
1064
				error = 0;
1065 1066
				continue;
			}
1067 1068
			printk(KERN_INFO "PM: Device %s not prepared "
				"for power transition: code %d\n",
1069
				dev_name(dev), error);
1070 1071 1072
			put_device(dev);
			break;
		}
1073
		dev->power.is_prepared = true;
1074
		if (!list_empty(&dev->power.entry))
1075
			list_move_tail(&dev->power.entry, &dpm_prepared_list);
1076 1077 1078
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
1079 1080 1081
	return error;
}

1082
/**
1083 1084
 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
 * @state: PM transition of the system being carried out.
1085
 *
1086 1087
 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
 * callbacks for them.
1088
 */
1089
int dpm_suspend_start(pm_message_t state)
1090 1091
{
	int error;
1092

1093 1094 1095
	error = dpm_prepare(state);
	if (!error)
		error = dpm_suspend(state);
1096 1097
	return error;
}
1098
EXPORT_SYMBOL_GPL(dpm_suspend_start);
1099 1100 1101

void __suspend_report_result(const char *function, void *fn, int ret)
{
1102 1103
	if (ret)
		printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1104 1105
}
EXPORT_SYMBOL_GPL(__suspend_report_result);
1106 1107 1108 1109 1110 1111

/**
 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
 * @dev: Device to wait for.
 * @subordinate: Device that needs to wait for @dev.
 */
1112
int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1113 1114
{
	dpm_wait(dev, subordinate->power.async_suspend);
1115
	return async_error;
1116 1117
}
EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);