main.c 27.2 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/*
 * drivers/base/power/main.c - Where the driver meets power management.
 *
 * Copyright (c) 2003 Patrick Mochel
 * Copyright (c) 2003 Open Source Development Lab
 *
 * This file is released under the GPLv2
 *
 *
 * The driver model core calls device_pm_add() when a device is registered.
11
 * This will initialize the embedded device_pm_info object in the device
L
Linus Torvalds 已提交
12 13 14
 * and add it to the list of power-controlled devices. sysfs entries for
 * controlling device power management will also be added.
 *
15 16 17
 * A separate list is used for keeping track of power info, because the power
 * domain dependencies may differ from the ancestral dependencies that the
 * subsystem list maintains.
L
Linus Torvalds 已提交
18 19 20
 */

#include <linux/device.h>
21
#include <linux/kallsyms.h>
22
#include <linux/mutex.h>
23
#include <linux/pm.h>
24
#include <linux/pm_runtime.h>
25
#include <linux/resume-trace.h>
26
#include <linux/interrupt.h>
27
#include <linux/sched.h>
28
#include <linux/async.h>
29
#include <linux/suspend.h>
30

31
#include "../base.h"
L
Linus Torvalds 已提交
32 33
#include "power.h"

34
/*
35
 * The entries in the dpm_list list are in a depth first order, simply
36 37 38
 * because children are guaranteed to be discovered after parents, and
 * are inserted at the back of the list on discovery.
 *
39 40
 * Since device_pm_add() may be called with a device lock held,
 * we must never try to acquire a device lock while holding
41 42 43
 * dpm_list_mutex.
 */

44
LIST_HEAD(dpm_list);
45 46 47
LIST_HEAD(dpm_prepared_list);
LIST_HEAD(dpm_suspended_list);
LIST_HEAD(dpm_noirq_list);
L
Linus Torvalds 已提交
48

49
static DEFINE_MUTEX(dpm_list_mtx);
50
static pm_message_t pm_transition;
L
Linus Torvalds 已提交
51

52 53
static int async_error;

54
/**
55
 * device_pm_init - Initialize the PM-related part of a device object.
56 57 58 59
 * @dev: Device object being initialized.
 */
void device_pm_init(struct device *dev)
{
60
	dev->power.is_prepared = false;
61
	dev->power.is_suspended = false;
62
	init_completion(&dev->power.completion);
63
	complete_all(&dev->power.completion);
64 65
	dev->power.wakeup = NULL;
	spin_lock_init(&dev->power.lock);
66
	pm_runtime_init(dev);
67
	INIT_LIST_HEAD(&dev->power.entry);
68 69
}

70
/**
71
 * device_pm_lock - Lock the list of active devices used by the PM core.
72 73 74 75 76 77 78
 */
void device_pm_lock(void)
{
	mutex_lock(&dpm_list_mtx);
}

/**
79
 * device_pm_unlock - Unlock the list of active devices used by the PM core.
80 81 82 83 84
 */
void device_pm_unlock(void)
{
	mutex_unlock(&dpm_list_mtx);
}
85

86
/**
87 88
 * device_pm_add - Add a device to the PM core's list of active devices.
 * @dev: Device to add to the list.
89
 */
90
void device_pm_add(struct device *dev)
L
Linus Torvalds 已提交
91 92
{
	pr_debug("PM: Adding info for %s:%s\n",
93
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
94
	mutex_lock(&dpm_list_mtx);
95
	if (dev->parent && dev->parent->power.is_prepared)
96 97
		dev_warn(dev, "parent %s should not be sleeping\n",
			dev_name(dev->parent));
98
	list_add_tail(&dev->power.entry, &dpm_list);
99
	mutex_unlock(&dpm_list_mtx);
L
Linus Torvalds 已提交
100 101
}

102
/**
103 104
 * device_pm_remove - Remove a device from the PM core's list of active devices.
 * @dev: Device to be removed from the list.
105
 */
106
void device_pm_remove(struct device *dev)
L
Linus Torvalds 已提交
107 108
{
	pr_debug("PM: Removing info for %s:%s\n",
109
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
110
	complete_all(&dev->power.completion);
111
	mutex_lock(&dpm_list_mtx);
L
Linus Torvalds 已提交
112
	list_del_init(&dev->power.entry);
113
	mutex_unlock(&dpm_list_mtx);
114
	device_wakeup_disable(dev);
115
	pm_runtime_remove(dev);
116 117
}

118
/**
119 120 121
 * device_pm_move_before - Move device in the PM core's list of active devices.
 * @deva: Device to move in dpm_list.
 * @devb: Device @deva should come before.
122 123 124 125
 */
void device_pm_move_before(struct device *deva, struct device *devb)
{
	pr_debug("PM: Moving %s:%s before %s:%s\n",
126 127
		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
128 129 130 131 132
	/* Delete deva from dpm_list and reinsert before devb. */
	list_move_tail(&deva->power.entry, &devb->power.entry);
}

/**
133 134 135
 * device_pm_move_after - Move device in the PM core's list of active devices.
 * @deva: Device to move in dpm_list.
 * @devb: Device @deva should come after.
136 137 138 139
 */
void device_pm_move_after(struct device *deva, struct device *devb)
{
	pr_debug("PM: Moving %s:%s after %s:%s\n",
140 141
		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
142 143 144 145 146
	/* Delete deva from dpm_list and reinsert after devb. */
	list_move(&deva->power.entry, &devb->power.entry);
}

/**
147 148
 * device_pm_move_last - Move device to end of the PM core's list of devices.
 * @dev: Device to move in dpm_list.
149 150 151 152
 */
void device_pm_move_last(struct device *dev)
{
	pr_debug("PM: Moving %s:%s to end of list\n",
153
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
154 155 156
	list_move_tail(&dev->power.entry, &dpm_list);
}

157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182
static ktime_t initcall_debug_start(struct device *dev)
{
	ktime_t calltime = ktime_set(0, 0);

	if (initcall_debug) {
		pr_info("calling  %s+ @ %i\n",
				dev_name(dev), task_pid_nr(current));
		calltime = ktime_get();
	}

	return calltime;
}

static void initcall_debug_report(struct device *dev, ktime_t calltime,
				  int error)
{
	ktime_t delta, rettime;

	if (initcall_debug) {
		rettime = ktime_get();
		delta = ktime_sub(rettime, calltime);
		pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
			error, (unsigned long long)ktime_to_ns(delta) >> 10);
	}
}

183 184 185 186 187 188 189 190 191 192
/**
 * dpm_wait - Wait for a PM operation to complete.
 * @dev: Device to wait for.
 * @async: If unset, wait only if the device's power.async_suspend flag is set.
 */
static void dpm_wait(struct device *dev, bool async)
{
	if (!dev)
		return;

193
	if (async || (pm_async_enabled && dev->power.async_suspend))
194 195 196 197 198 199 200 201 202 203 204 205 206 207
		wait_for_completion(&dev->power.completion);
}

static int dpm_wait_fn(struct device *dev, void *async_ptr)
{
	dpm_wait(dev, *((bool *)async_ptr));
	return 0;
}

static void dpm_wait_for_children(struct device *dev, bool async)
{
       device_for_each_child(dev, &async, dpm_wait_fn);
}

208
/**
209 210 211 212
 * pm_op - Execute the PM operation appropriate for given PM event.
 * @dev: Device to handle.
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
213
 */
214 215 216
static int pm_op(struct device *dev,
		 const struct dev_pm_ops *ops,
		 pm_message_t state)
217 218
{
	int error = 0;
219
	ktime_t calltime;
220

221
	calltime = initcall_debug_start(dev);
222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237

	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
		if (ops->suspend) {
			error = ops->suspend(dev);
			suspend_report_result(ops->suspend, error);
		}
		break;
	case PM_EVENT_RESUME:
		if (ops->resume) {
			error = ops->resume(dev);
			suspend_report_result(ops->resume, error);
		}
		break;
#endif /* CONFIG_SUSPEND */
238
#ifdef CONFIG_HIBERNATE_CALLBACKS
239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		if (ops->freeze) {
			error = ops->freeze(dev);
			suspend_report_result(ops->freeze, error);
		}
		break;
	case PM_EVENT_HIBERNATE:
		if (ops->poweroff) {
			error = ops->poweroff(dev);
			suspend_report_result(ops->poweroff, error);
		}
		break;
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
		if (ops->thaw) {
			error = ops->thaw(dev);
			suspend_report_result(ops->thaw, error);
		}
		break;
	case PM_EVENT_RESTORE:
		if (ops->restore) {
			error = ops->restore(dev);
			suspend_report_result(ops->restore, error);
		}
		break;
265
#endif /* CONFIG_HIBERNATE_CALLBACKS */
266 267 268
	default:
		error = -EINVAL;
	}
269

270
	initcall_debug_report(dev, calltime, error);
271

272 273 274 275
	return error;
}

/**
276 277 278 279
 * pm_noirq_op - Execute the PM operation appropriate for given PM event.
 * @dev: Device to handle.
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
280
 *
281 282
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
283
 */
284 285
static int pm_noirq_op(struct device *dev,
			const struct dev_pm_ops *ops,
286 287 288
			pm_message_t state)
{
	int error = 0;
289
	ktime_t calltime = ktime_set(0, 0), delta, rettime;
290 291

	if (initcall_debug) {
292 293 294
		pr_info("calling  %s+ @ %i, parent: %s\n",
				dev_name(dev), task_pid_nr(current),
				dev->parent ? dev_name(dev->parent) : "none");
295 296
		calltime = ktime_get();
	}
297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312

	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
		if (ops->suspend_noirq) {
			error = ops->suspend_noirq(dev);
			suspend_report_result(ops->suspend_noirq, error);
		}
		break;
	case PM_EVENT_RESUME:
		if (ops->resume_noirq) {
			error = ops->resume_noirq(dev);
			suspend_report_result(ops->resume_noirq, error);
		}
		break;
#endif /* CONFIG_SUSPEND */
313
#ifdef CONFIG_HIBERNATE_CALLBACKS
314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		if (ops->freeze_noirq) {
			error = ops->freeze_noirq(dev);
			suspend_report_result(ops->freeze_noirq, error);
		}
		break;
	case PM_EVENT_HIBERNATE:
		if (ops->poweroff_noirq) {
			error = ops->poweroff_noirq(dev);
			suspend_report_result(ops->poweroff_noirq, error);
		}
		break;
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
		if (ops->thaw_noirq) {
			error = ops->thaw_noirq(dev);
			suspend_report_result(ops->thaw_noirq, error);
		}
		break;
	case PM_EVENT_RESTORE:
		if (ops->restore_noirq) {
			error = ops->restore_noirq(dev);
			suspend_report_result(ops->restore_noirq, error);
		}
		break;
340
#endif /* CONFIG_HIBERNATE_CALLBACKS */
341 342 343
	default:
		error = -EINVAL;
	}
344 345 346 347

	if (initcall_debug) {
		rettime = ktime_get();
		delta = ktime_sub(rettime, calltime);
348 349 350
		printk("initcall %s_i+ returned %d after %Ld usecs\n",
			dev_name(dev), error,
			(unsigned long long)ktime_to_ns(delta) >> 10);
351 352
	}

353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390
	return error;
}

static char *pm_verb(int event)
{
	switch (event) {
	case PM_EVENT_SUSPEND:
		return "suspend";
	case PM_EVENT_RESUME:
		return "resume";
	case PM_EVENT_FREEZE:
		return "freeze";
	case PM_EVENT_QUIESCE:
		return "quiesce";
	case PM_EVENT_HIBERNATE:
		return "hibernate";
	case PM_EVENT_THAW:
		return "thaw";
	case PM_EVENT_RESTORE:
		return "restore";
	case PM_EVENT_RECOVER:
		return "recover";
	default:
		return "(unknown PM event)";
	}
}

static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
{
	dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
		", may wakeup" : "");
}

static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
			int error)
{
	printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
391
		dev_name(dev), pm_verb(state.event), info, error);
392 393
}

394 395 396
static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
{
	ktime_t calltime;
397
	u64 usecs64;
398 399 400 401 402 403 404 405 406 407 408 409 410
	int usecs;

	calltime = ktime_get();
	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
	do_div(usecs64, NSEC_PER_USEC);
	usecs = usecs64;
	if (usecs == 0)
		usecs = 1;
	pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
		info ?: "", info ? " " : "", pm_verb(state.event),
		usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
}

411 412 413
/*------------------------- Resume routines -------------------------*/

/**
414 415 416
 * device_resume_noirq - Execute an "early resume" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
417
 *
418 419
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
420
 */
421
static int device_resume_noirq(struct device *dev, pm_message_t state)
422 423 424 425 426 427
{
	int error = 0;

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);

428
	if (dev->pm_domain) {
429
		pm_dev_dbg(dev, state, "EARLY power domain ");
430
		error = pm_noirq_op(dev, &dev->pm_domain->ops, state);
431
	} else if (dev->type && dev->type->pm) {
432 433
		pm_dev_dbg(dev, state, "EARLY type ");
		error = pm_noirq_op(dev, dev->type->pm, state);
434
	} else if (dev->class && dev->class->pm) {
435 436
		pm_dev_dbg(dev, state, "EARLY class ");
		error = pm_noirq_op(dev, dev->class->pm, state);
437 438 439
	} else if (dev->bus && dev->bus->pm) {
		pm_dev_dbg(dev, state, "EARLY ");
		error = pm_noirq_op(dev, dev->bus->pm, state);
440 441
	}

442 443 444 445 446
	TRACE_RESUME(error);
	return error;
}

/**
447 448
 * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices.
 * @state: PM transition of the system being carried out.
449
 *
450 451
 * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and
 * enable device drivers to receive interrupts.
452
 */
453
void dpm_resume_noirq(pm_message_t state)
454
{
455
	ktime_t starttime = ktime_get();
456

457
	mutex_lock(&dpm_list_mtx);
458 459
	while (!list_empty(&dpm_noirq_list)) {
		struct device *dev = to_device(dpm_noirq_list.next);
460
		int error;
461 462

		get_device(dev);
463 464
		list_move_tail(&dev->power.entry, &dpm_suspended_list);
		mutex_unlock(&dpm_list_mtx);
465

466 467 468
		error = device_resume_noirq(dev, state);
		if (error)
			pm_dev_err(dev, state, " early", error);
469

470
		mutex_lock(&dpm_list_mtx);
471 472
		put_device(dev);
	}
473
	mutex_unlock(&dpm_list_mtx);
474
	dpm_show_time(starttime, state, "early");
475
	resume_device_irqs();
476
}
477
EXPORT_SYMBOL_GPL(dpm_resume_noirq);
478

479 480
/**
 * legacy_resume - Execute a legacy (bus or class) resume callback for device.
R
Randy Dunlap 已提交
481 482
 * @dev: Device to resume.
 * @cb: Resume callback to execute.
483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498
 */
static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
{
	int error;
	ktime_t calltime;

	calltime = initcall_debug_start(dev);

	error = cb(dev);
	suspend_report_result(cb, error);

	initcall_debug_report(dev, calltime, error);

	return error;
}

499
/**
500
 * device_resume - Execute "resume" callbacks for given device.
501 502
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
503
 * @async: If true, the device is being resumed asynchronously.
504
 */
505
static int device_resume(struct device *dev, pm_message_t state, bool async)
506 507
{
	int error = 0;
508
	bool put = false;
509 510 511

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);
512

513
	dpm_wait(dev->parent, async);
514
	device_lock(dev);
515

516 517 518 519 520
	/*
	 * This is a fib.  But we'll allow new children to be added below
	 * a resumed device, even if the device hasn't been completed yet.
	 */
	dev->power.is_prepared = false;
521

522 523 524
	if (!dev->power.is_suspended)
		goto Unlock;

525 526 527
	pm_runtime_enable(dev);
	put = true;

528
	if (dev->pm_domain) {
529
		pm_dev_dbg(dev, state, "power domain ");
530
		error = pm_op(dev, &dev->pm_domain->ops, state);
531
		goto End;
532 533
	}

534 535 536 537
	if (dev->type && dev->type->pm) {
		pm_dev_dbg(dev, state, "type ");
		error = pm_op(dev, dev->type->pm, state);
		goto End;
538 539
	}

540 541 542 543
	if (dev->class) {
		if (dev->class->pm) {
			pm_dev_dbg(dev, state, "class ");
			error = pm_op(dev, dev->class->pm, state);
544
			goto End;
545 546
		} else if (dev->class->resume) {
			pm_dev_dbg(dev, state, "legacy class ");
547
			error = legacy_resume(dev, dev->class->resume);
548
			goto End;
549
		}
550
	}
551 552 553 554 555 556 557 558 559 560 561

	if (dev->bus) {
		if (dev->bus->pm) {
			pm_dev_dbg(dev, state, "");
			error = pm_op(dev, dev->bus->pm, state);
		} else if (dev->bus->resume) {
			pm_dev_dbg(dev, state, "legacy ");
			error = legacy_resume(dev, dev->bus->resume);
		}
	}

562
 End:
563 564 565
	dev->power.is_suspended = false;

 Unlock:
566
	device_unlock(dev);
567
	complete_all(&dev->power.completion);
568

569
	TRACE_RESUME(error);
570 571 572 573

	if (put)
		pm_runtime_put_sync(dev);

574 575 576
	return error;
}

577 578 579 580 581
static void async_resume(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

582
	error = device_resume(dev, pm_transition, true);
583 584 585 586 587
	if (error)
		pm_dev_err(dev, pm_transition, " async", error);
	put_device(dev);
}

588
static bool is_async(struct device *dev)
589
{
590 591
	return dev->power.async_suspend && pm_async_enabled
		&& !pm_trace_is_enabled();
592 593
}

594
/**
595 596
 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
 * @state: PM transition of the system being carried out.
597
 *
598 599
 * Execute the appropriate "resume" callback for all devices whose status
 * indicates that they are suspended.
600
 */
601
void dpm_resume(pm_message_t state)
602
{
603
	struct device *dev;
604
	ktime_t starttime = ktime_get();
605

606 607
	might_sleep();

608
	mutex_lock(&dpm_list_mtx);
609
	pm_transition = state;
610
	async_error = 0;
611

612
	list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
613 614 615 616 617 618 619
		INIT_COMPLETION(dev->power.completion);
		if (is_async(dev)) {
			get_device(dev);
			async_schedule(async_resume, dev);
		}
	}

620 621
	while (!list_empty(&dpm_suspended_list)) {
		dev = to_device(dpm_suspended_list.next);
622
		get_device(dev);
623
		if (!is_async(dev)) {
624 625 626 627
			int error;

			mutex_unlock(&dpm_list_mtx);

628
			error = device_resume(dev, state, false);
629 630
			if (error)
				pm_dev_err(dev, state, "", error);
631 632

			mutex_lock(&dpm_list_mtx);
633 634
		}
		if (!list_empty(&dev->power.entry))
635
			list_move_tail(&dev->power.entry, &dpm_prepared_list);
636 637 638
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
639
	async_synchronize_full();
640
	dpm_show_time(starttime, state, NULL);
641 642 643
}

/**
644 645 646
 * device_complete - Complete a PM transition for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
647
 */
648
static void device_complete(struct device *dev, pm_message_t state)
649
{
650
	device_lock(dev);
651

652
	if (dev->pm_domain) {
653
		pm_dev_dbg(dev, state, "completing power domain ");
654 655
		if (dev->pm_domain->ops.complete)
			dev->pm_domain->ops.complete(dev);
656
	} else if (dev->type && dev->type->pm) {
657
		pm_dev_dbg(dev, state, "completing type ");
658 659 660 661 662 663 664
		if (dev->type->pm->complete)
			dev->type->pm->complete(dev);
	} else if (dev->class && dev->class->pm) {
		pm_dev_dbg(dev, state, "completing class ");
		if (dev->class->pm->complete)
			dev->class->pm->complete(dev);
	} else if (dev->bus && dev->bus->pm) {
665
		pm_dev_dbg(dev, state, "completing ");
666 667
		if (dev->bus->pm->complete)
			dev->bus->pm->complete(dev);
668 669
	}

670
	device_unlock(dev);
671 672 673
}

/**
674 675
 * dpm_complete - Complete a PM transition for all non-sysdev devices.
 * @state: PM transition of the system being carried out.
676
 *
677 678
 * Execute the ->complete() callbacks for all devices whose PM status is not
 * DPM_ON (this allows new devices to be registered).
679
 */
680
void dpm_complete(pm_message_t state)
681
{
682 683
	struct list_head list;

684 685
	might_sleep();

686
	INIT_LIST_HEAD(&list);
687
	mutex_lock(&dpm_list_mtx);
688 689
	while (!list_empty(&dpm_prepared_list)) {
		struct device *dev = to_device(dpm_prepared_list.prev);
690

691
		get_device(dev);
692
		dev->power.is_prepared = false;
693 694
		list_move(&dev->power.entry, &list);
		mutex_unlock(&dpm_list_mtx);
695

696
		device_complete(dev, state);
697

698
		mutex_lock(&dpm_list_mtx);
699
		put_device(dev);
700
	}
701
	list_splice(&list, &dpm_list);
702 703 704 705
	mutex_unlock(&dpm_list_mtx);
}

/**
706 707
 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
 * @state: PM transition of the system being carried out.
708
 *
709 710
 * Execute "resume" callbacks for all devices and complete the PM transition of
 * the system.
711
 */
712
void dpm_resume_end(pm_message_t state)
713
{
714 715
	dpm_resume(state);
	dpm_complete(state);
716
}
717
EXPORT_SYMBOL_GPL(dpm_resume_end);
718 719 720 721


/*------------------------- Suspend routines -------------------------*/

722
/**
723 724 725 726 727
 * resume_event - Return a "resume" message for given "suspend" sleep state.
 * @sleep_state: PM message representing a sleep state.
 *
 * Return a PM message representing the resume event corresponding to given
 * sleep state.
728 729
 */
static pm_message_t resume_event(pm_message_t sleep_state)
730
{
731 732 733 734 735 736 737 738
	switch (sleep_state.event) {
	case PM_EVENT_SUSPEND:
		return PMSG_RESUME;
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		return PMSG_RECOVER;
	case PM_EVENT_HIBERNATE:
		return PMSG_RESTORE;
739
	}
740
	return PMSG_ON;
741 742 743
}

/**
744 745 746
 * device_suspend_noirq - Execute a "late suspend" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
747
 *
748 749
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
750
 */
751
static int device_suspend_noirq(struct device *dev, pm_message_t state)
752
{
753
	int error;
754

755
	if (dev->pm_domain) {
756
		pm_dev_dbg(dev, state, "LATE power domain ");
757
		error = pm_noirq_op(dev, &dev->pm_domain->ops, state);
758 759 760
		if (error)
			return error;
	} else if (dev->type && dev->type->pm) {
761 762 763
		pm_dev_dbg(dev, state, "LATE type ");
		error = pm_noirq_op(dev, dev->type->pm, state);
		if (error)
764 765 766 767 768 769 770
			return error;
	} else if (dev->class && dev->class->pm) {
		pm_dev_dbg(dev, state, "LATE class ");
		error = pm_noirq_op(dev, dev->class->pm, state);
		if (error)
			return error;
	} else if (dev->bus && dev->bus->pm) {
771 772
		pm_dev_dbg(dev, state, "LATE ");
		error = pm_noirq_op(dev, dev->bus->pm, state);
773
		if (error)
774
			return error;
775 776
	}

777
	return 0;
778 779 780
}

/**
781 782
 * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices.
 * @state: PM transition of the system being carried out.
783
 *
784 785
 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
 * handlers for all non-sysdev devices.
786
 */
787
int dpm_suspend_noirq(pm_message_t state)
788
{
789
	ktime_t starttime = ktime_get();
790 791
	int error = 0;

792
	suspend_device_irqs();
793
	mutex_lock(&dpm_list_mtx);
794 795
	while (!list_empty(&dpm_suspended_list)) {
		struct device *dev = to_device(dpm_suspended_list.prev);
796 797 798 799

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

800
		error = device_suspend_noirq(dev, state);
801 802

		mutex_lock(&dpm_list_mtx);
803
		if (error) {
804
			pm_dev_err(dev, state, " late", error);
805
			put_device(dev);
806 807
			break;
		}
808
		if (!list_empty(&dev->power.entry))
809
			list_move(&dev->power.entry, &dpm_noirq_list);
810
		put_device(dev);
811
	}
812
	mutex_unlock(&dpm_list_mtx);
813
	if (error)
814
		dpm_resume_noirq(resume_event(state));
815 816
	else
		dpm_show_time(starttime, state, "late");
817 818
	return error;
}
819
EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
820

821 822
/**
 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
R
Randy Dunlap 已提交
823 824 825
 * @dev: Device to suspend.
 * @state: PM transition of the system being carried out.
 * @cb: Suspend callback to execute.
826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842
 */
static int legacy_suspend(struct device *dev, pm_message_t state,
			  int (*cb)(struct device *dev, pm_message_t state))
{
	int error;
	ktime_t calltime;

	calltime = initcall_debug_start(dev);

	error = cb(dev, state);
	suspend_report_result(cb, error);

	initcall_debug_report(dev, calltime, error);

	return error;
}

843
/**
844 845 846
 * device_suspend - Execute "suspend" callbacks for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
847
 * @async: If true, the device is being suspended asynchronously.
848
 */
849
static int __device_suspend(struct device *dev, pm_message_t state, bool async)
850 851 852
{
	int error = 0;

853
	dpm_wait_for_children(dev, async);
854

855
	if (async_error)
856 857 858 859 860
		return 0;

	pm_runtime_get_noresume(dev);
	if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
		pm_wakeup_event(dev, 0);
861

862
	if (pm_wakeup_pending()) {
863
		pm_runtime_put_sync(dev);
864
		async_error = -EBUSY;
865
		return 0;
866 867
	}

868 869
	device_lock(dev);

870
	if (dev->pm_domain) {
871
		pm_dev_dbg(dev, state, "power domain ");
872
		error = pm_op(dev, &dev->pm_domain->ops, state);
873 874 875
		goto End;
	}

876 877 878
	if (dev->type && dev->type->pm) {
		pm_dev_dbg(dev, state, "type ");
		error = pm_op(dev, dev->type->pm, state);
879
		goto End;
880 881
	}

882 883 884 885
	if (dev->class) {
		if (dev->class->pm) {
			pm_dev_dbg(dev, state, "class ");
			error = pm_op(dev, dev->class->pm, state);
886
			goto End;
887 888
		} else if (dev->class->suspend) {
			pm_dev_dbg(dev, state, "legacy class ");
889
			error = legacy_suspend(dev, state, dev->class->suspend);
890
			goto End;
891
		}
892 893
	}

894 895 896
	if (dev->bus) {
		if (dev->bus->pm) {
			pm_dev_dbg(dev, state, "");
897
			error = pm_op(dev, dev->bus->pm, state);
898 899
		} else if (dev->bus->suspend) {
			pm_dev_dbg(dev, state, "legacy ");
900
			error = legacy_suspend(dev, state, dev->bus->suspend);
901
		}
902 903
	}

904
 End:
905 906
	dev->power.is_suspended = !error;

907
	device_unlock(dev);
908
	complete_all(&dev->power.completion);
909

910 911
	if (error) {
		pm_runtime_put_sync(dev);
912
		async_error = error;
913 914 915
	} else if (dev->power.is_suspended) {
		__pm_runtime_disable(dev, false);
	}
916

917 918 919
	return error;
}

920 921 922 923 924 925
static void async_suspend(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = __device_suspend(dev, pm_transition, true);
926
	if (error)
927 928 929 930 931 932 933 934 935
		pm_dev_err(dev, pm_transition, " async", error);

	put_device(dev);
}

static int device_suspend(struct device *dev)
{
	INIT_COMPLETION(dev->power.completion);

936
	if (pm_async_enabled && dev->power.async_suspend) {
937 938 939 940 941 942 943 944
		get_device(dev);
		async_schedule(async_suspend, dev);
		return 0;
	}

	return __device_suspend(dev, pm_transition, false);
}

945
/**
946 947
 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
 * @state: PM transition of the system being carried out.
948
 */
949
int dpm_suspend(pm_message_t state)
950
{
951
	ktime_t starttime = ktime_get();
952 953
	int error = 0;

954 955
	might_sleep();

956
	mutex_lock(&dpm_list_mtx);
957 958
	pm_transition = state;
	async_error = 0;
959 960
	while (!list_empty(&dpm_prepared_list)) {
		struct device *dev = to_device(dpm_prepared_list.prev);
961

962
		get_device(dev);
963
		mutex_unlock(&dpm_list_mtx);
964

965
		error = device_suspend(dev);
966

967
		mutex_lock(&dpm_list_mtx);
968
		if (error) {
969 970
			pm_dev_err(dev, state, "", error);
			put_device(dev);
971 972
			break;
		}
973
		if (!list_empty(&dev->power.entry))
974
			list_move(&dev->power.entry, &dpm_suspended_list);
975
		put_device(dev);
976 977
		if (async_error)
			break;
978 979
	}
	mutex_unlock(&dpm_list_mtx);
980 981 982
	async_synchronize_full();
	if (!error)
		error = async_error;
983 984
	if (!error)
		dpm_show_time(starttime, state, NULL);
985 986 987 988
	return error;
}

/**
989 990 991 992 993 994
 * device_prepare - Prepare a device for system power transition.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
 *
 * Execute the ->prepare() callback(s) for given device.  No new children of the
 * device may be registered after this function has returned.
995
 */
996
static int device_prepare(struct device *dev, pm_message_t state)
997 998 999
{
	int error = 0;

1000
	device_lock(dev);
1001

1002
	if (dev->pm_domain) {
1003
		pm_dev_dbg(dev, state, "preparing power domain ");
1004 1005 1006
		if (dev->pm_domain->ops.prepare)
			error = dev->pm_domain->ops.prepare(dev);
		suspend_report_result(dev->pm_domain->ops.prepare, error);
1007 1008 1009
		if (error)
			goto End;
	} else if (dev->type && dev->type->pm) {
1010
		pm_dev_dbg(dev, state, "preparing type ");
1011 1012
		if (dev->type->pm->prepare)
			error = dev->type->pm->prepare(dev);
1013 1014 1015
		suspend_report_result(dev->type->pm->prepare, error);
		if (error)
			goto End;
1016
	} else if (dev->class && dev->class->pm) {
1017
		pm_dev_dbg(dev, state, "preparing class ");
1018 1019
		if (dev->class->pm->prepare)
			error = dev->class->pm->prepare(dev);
1020
		suspend_report_result(dev->class->pm->prepare, error);
1021 1022
		if (error)
			goto End;
1023 1024 1025 1026 1027
	} else if (dev->bus && dev->bus->pm) {
		pm_dev_dbg(dev, state, "preparing ");
		if (dev->bus->pm->prepare)
			error = dev->bus->pm->prepare(dev);
		suspend_report_result(dev->bus->pm->prepare, error);
1028
	}
1029

1030
 End:
1031
	device_unlock(dev);
1032 1033 1034

	return error;
}
1035

1036
/**
1037 1038
 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
 * @state: PM transition of the system being carried out.
1039
 *
1040
 * Execute the ->prepare() callback(s) for all devices.
1041
 */
1042
int dpm_prepare(pm_message_t state)
1043 1044 1045
{
	int error = 0;

1046 1047
	might_sleep();

1048 1049 1050 1051 1052 1053 1054
	mutex_lock(&dpm_list_mtx);
	while (!list_empty(&dpm_list)) {
		struct device *dev = to_device(dpm_list.next);

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

1055
		error = device_prepare(dev, state);
1056 1057 1058 1059 1060

		mutex_lock(&dpm_list_mtx);
		if (error) {
			if (error == -EAGAIN) {
				put_device(dev);
S
Sebastian Ott 已提交
1061
				error = 0;
1062 1063
				continue;
			}
1064 1065
			printk(KERN_INFO "PM: Device %s not prepared "
				"for power transition: code %d\n",
1066
				dev_name(dev), error);
1067 1068 1069
			put_device(dev);
			break;
		}
1070
		dev->power.is_prepared = true;
1071
		if (!list_empty(&dev->power.entry))
1072
			list_move_tail(&dev->power.entry, &dpm_prepared_list);
1073 1074 1075
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
1076 1077 1078
	return error;
}

1079
/**
1080 1081
 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
 * @state: PM transition of the system being carried out.
1082
 *
1083 1084
 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
 * callbacks for them.
1085
 */
1086
int dpm_suspend_start(pm_message_t state)
1087 1088
{
	int error;
1089

1090 1091 1092
	error = dpm_prepare(state);
	if (!error)
		error = dpm_suspend(state);
1093 1094
	return error;
}
1095
EXPORT_SYMBOL_GPL(dpm_suspend_start);
1096 1097 1098

void __suspend_report_result(const char *function, void *fn, int ret)
{
1099 1100
	if (ret)
		printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1101 1102
}
EXPORT_SYMBOL_GPL(__suspend_report_result);
1103 1104 1105 1106 1107 1108

/**
 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
 * @dev: Device to wait for.
 * @subordinate: Device that needs to wait for @dev.
 */
1109
int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1110 1111
{
	dpm_wait(dev, subordinate->power.async_suspend);
1112
	return async_error;
1113 1114
}
EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);