main.c 26.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 * drivers/base/power/main.c - Where the driver meets power management.
 *
 * Copyright (c) 2003 Patrick Mochel
 * Copyright (c) 2003 Open Source Development Lab
 *
 * This file is released under the GPLv2
 *
 *
 * The driver model core calls device_pm_add() when a device is registered.
 * This will intialize the embedded device_pm_info object in the device
 * and add it to the list of power-controlled devices. sysfs entries for
 * controlling device power management will also be added.
 *
15 16 17
 * A separate list is used for keeping track of power info, because the power
 * domain dependencies may differ from the ancestral dependencies that the
 * subsystem list maintains.
L
Linus Torvalds 已提交
18 19 20
 */

#include <linux/device.h>
21
#include <linux/kallsyms.h>
22
#include <linux/mutex.h>
23
#include <linux/pm.h>
24
#include <linux/pm_runtime.h>
25
#include <linux/resume-trace.h>
26
#include <linux/interrupt.h>
27
#include <linux/sched.h>
28
#include <linux/async.h>
29

30
#include "../base.h"
L
Linus Torvalds 已提交
31 32
#include "power.h"

33
/*
34
 * The entries in the dpm_list list are in a depth first order, simply
35 36 37
 * because children are guaranteed to be discovered after parents, and
 * are inserted at the back of the list on discovery.
 *
38 39
 * Since device_pm_add() may be called with a device lock held,
 * we must never try to acquire a device lock while holding
40 41 42
 * dpm_list_mutex.
 */

43
LIST_HEAD(dpm_list);
L
Linus Torvalds 已提交
44

45
static DEFINE_MUTEX(dpm_list_mtx);
46
static pm_message_t pm_transition;
L
Linus Torvalds 已提交
47

48 49 50 51 52 53
/*
 * Set once the preparation of devices for a PM transition has started, reset
 * before starting to resume devices.  Protected by dpm_list_mtx.
 */
static bool transition_started;

54 55
static int async_error;

56
/**
57
 * device_pm_init - Initialize the PM-related part of a device object.
58 59 60 61 62
 * @dev: Device object being initialized.
 */
void device_pm_init(struct device *dev)
{
	dev->power.status = DPM_ON;
63
	init_completion(&dev->power.completion);
64
	complete_all(&dev->power.completion);
65 66
	dev->power.wakeup = NULL;
	spin_lock_init(&dev->power.lock);
67 68 69
	pm_runtime_init(dev);
}

70
/**
71
 * device_pm_lock - Lock the list of active devices used by the PM core.
72 73 74 75 76 77 78
 */
void device_pm_lock(void)
{
	mutex_lock(&dpm_list_mtx);
}

/**
79
 * device_pm_unlock - Unlock the list of active devices used by the PM core.
80 81 82 83 84
 */
void device_pm_unlock(void)
{
	mutex_unlock(&dpm_list_mtx);
}
85

86
/**
87 88
 * device_pm_add - Add a device to the PM core's list of active devices.
 * @dev: Device to add to the list.
89
 */
90
void device_pm_add(struct device *dev)
L
Linus Torvalds 已提交
91 92
{
	pr_debug("PM: Adding info for %s:%s\n",
93 94
		 dev->bus ? dev->bus->name : "No Bus",
		 kobject_name(&dev->kobj));
95
	mutex_lock(&dpm_list_mtx);
96
	if (dev->parent) {
97 98
		if (dev->parent->power.status >= DPM_SUSPENDING)
			dev_warn(dev, "parent %s should not be sleeping\n",
99
				 dev_name(dev->parent));
100 101 102 103 104 105
	} else if (transition_started) {
		/*
		 * We refuse to register parentless devices while a PM
		 * transition is in progress in order to avoid leaving them
		 * unhandled down the road
		 */
106
		dev_WARN(dev, "Parentless device registered during a PM transaction\n");
107
	}
108 109

	list_add_tail(&dev->power.entry, &dpm_list);
110
	mutex_unlock(&dpm_list_mtx);
L
Linus Torvalds 已提交
111 112
}

113
/**
114 115
 * device_pm_remove - Remove a device from the PM core's list of active devices.
 * @dev: Device to be removed from the list.
116
 */
117
void device_pm_remove(struct device *dev)
L
Linus Torvalds 已提交
118 119
{
	pr_debug("PM: Removing info for %s:%s\n",
120 121
		 dev->bus ? dev->bus->name : "No Bus",
		 kobject_name(&dev->kobj));
122
	complete_all(&dev->power.completion);
123
	mutex_lock(&dpm_list_mtx);
L
Linus Torvalds 已提交
124
	list_del_init(&dev->power.entry);
125
	mutex_unlock(&dpm_list_mtx);
126
	device_wakeup_disable(dev);
127
	pm_runtime_remove(dev);
128 129
}

130
/**
131 132 133
 * device_pm_move_before - Move device in the PM core's list of active devices.
 * @deva: Device to move in dpm_list.
 * @devb: Device @deva should come before.
134 135 136 137 138 139 140 141 142 143 144 145 146
 */
void device_pm_move_before(struct device *deva, struct device *devb)
{
	pr_debug("PM: Moving %s:%s before %s:%s\n",
		 deva->bus ? deva->bus->name : "No Bus",
		 kobject_name(&deva->kobj),
		 devb->bus ? devb->bus->name : "No Bus",
		 kobject_name(&devb->kobj));
	/* Delete deva from dpm_list and reinsert before devb. */
	list_move_tail(&deva->power.entry, &devb->power.entry);
}

/**
147 148 149
 * device_pm_move_after - Move device in the PM core's list of active devices.
 * @deva: Device to move in dpm_list.
 * @devb: Device @deva should come after.
150 151 152 153 154 155 156 157 158 159 160 161 162
 */
void device_pm_move_after(struct device *deva, struct device *devb)
{
	pr_debug("PM: Moving %s:%s after %s:%s\n",
		 deva->bus ? deva->bus->name : "No Bus",
		 kobject_name(&deva->kobj),
		 devb->bus ? devb->bus->name : "No Bus",
		 kobject_name(&devb->kobj));
	/* Delete deva from dpm_list and reinsert after devb. */
	list_move(&deva->power.entry, &devb->power.entry);
}

/**
163 164
 * device_pm_move_last - Move device to end of the PM core's list of devices.
 * @dev: Device to move in dpm_list.
165 166 167 168 169 170 171 172 173
 */
void device_pm_move_last(struct device *dev)
{
	pr_debug("PM: Moving %s:%s to end of list\n",
		 dev->bus ? dev->bus->name : "No Bus",
		 kobject_name(&dev->kobj));
	list_move_tail(&dev->power.entry, &dpm_list);
}

174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199
static ktime_t initcall_debug_start(struct device *dev)
{
	ktime_t calltime = ktime_set(0, 0);

	if (initcall_debug) {
		pr_info("calling  %s+ @ %i\n",
				dev_name(dev), task_pid_nr(current));
		calltime = ktime_get();
	}

	return calltime;
}

static void initcall_debug_report(struct device *dev, ktime_t calltime,
				  int error)
{
	ktime_t delta, rettime;

	if (initcall_debug) {
		rettime = ktime_get();
		delta = ktime_sub(rettime, calltime);
		pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
			error, (unsigned long long)ktime_to_ns(delta) >> 10);
	}
}

200 201 202 203 204 205 206 207 208 209
/**
 * dpm_wait - Wait for a PM operation to complete.
 * @dev: Device to wait for.
 * @async: If unset, wait only if the device's power.async_suspend flag is set.
 */
static void dpm_wait(struct device *dev, bool async)
{
	if (!dev)
		return;

210
	if (async || (pm_async_enabled && dev->power.async_suspend))
211 212 213 214 215 216 217 218 219 220 221 222 223 224
		wait_for_completion(&dev->power.completion);
}

static int dpm_wait_fn(struct device *dev, void *async_ptr)
{
	dpm_wait(dev, *((bool *)async_ptr));
	return 0;
}

static void dpm_wait_for_children(struct device *dev, bool async)
{
       device_for_each_child(dev, &async, dpm_wait_fn);
}

225
/**
226 227 228 229
 * pm_op - Execute the PM operation appropriate for given PM event.
 * @dev: Device to handle.
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
230
 */
231 232 233
static int pm_op(struct device *dev,
		 const struct dev_pm_ops *ops,
		 pm_message_t state)
234 235
{
	int error = 0;
236
	ktime_t calltime;
237

238
	calltime = initcall_debug_start(dev);
239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285

	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
		if (ops->suspend) {
			error = ops->suspend(dev);
			suspend_report_result(ops->suspend, error);
		}
		break;
	case PM_EVENT_RESUME:
		if (ops->resume) {
			error = ops->resume(dev);
			suspend_report_result(ops->resume, error);
		}
		break;
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATION
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		if (ops->freeze) {
			error = ops->freeze(dev);
			suspend_report_result(ops->freeze, error);
		}
		break;
	case PM_EVENT_HIBERNATE:
		if (ops->poweroff) {
			error = ops->poweroff(dev);
			suspend_report_result(ops->poweroff, error);
		}
		break;
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
		if (ops->thaw) {
			error = ops->thaw(dev);
			suspend_report_result(ops->thaw, error);
		}
		break;
	case PM_EVENT_RESTORE:
		if (ops->restore) {
			error = ops->restore(dev);
			suspend_report_result(ops->restore, error);
		}
		break;
#endif /* CONFIG_HIBERNATION */
	default:
		error = -EINVAL;
	}
286

287
	initcall_debug_report(dev, calltime, error);
288

289 290 291 292
	return error;
}

/**
293 294 295 296
 * pm_noirq_op - Execute the PM operation appropriate for given PM event.
 * @dev: Device to handle.
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
297
 *
298 299
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
300
 */
301 302
static int pm_noirq_op(struct device *dev,
			const struct dev_pm_ops *ops,
303 304 305
			pm_message_t state)
{
	int error = 0;
306 307 308
	ktime_t calltime, delta, rettime;

	if (initcall_debug) {
309 310 311
		pr_info("calling  %s+ @ %i, parent: %s\n",
				dev_name(dev), task_pid_nr(current),
				dev->parent ? dev_name(dev->parent) : "none");
312 313
		calltime = ktime_get();
	}
314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360

	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
		if (ops->suspend_noirq) {
			error = ops->suspend_noirq(dev);
			suspend_report_result(ops->suspend_noirq, error);
		}
		break;
	case PM_EVENT_RESUME:
		if (ops->resume_noirq) {
			error = ops->resume_noirq(dev);
			suspend_report_result(ops->resume_noirq, error);
		}
		break;
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATION
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		if (ops->freeze_noirq) {
			error = ops->freeze_noirq(dev);
			suspend_report_result(ops->freeze_noirq, error);
		}
		break;
	case PM_EVENT_HIBERNATE:
		if (ops->poweroff_noirq) {
			error = ops->poweroff_noirq(dev);
			suspend_report_result(ops->poweroff_noirq, error);
		}
		break;
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
		if (ops->thaw_noirq) {
			error = ops->thaw_noirq(dev);
			suspend_report_result(ops->thaw_noirq, error);
		}
		break;
	case PM_EVENT_RESTORE:
		if (ops->restore_noirq) {
			error = ops->restore_noirq(dev);
			suspend_report_result(ops->restore_noirq, error);
		}
		break;
#endif /* CONFIG_HIBERNATION */
	default:
		error = -EINVAL;
	}
361 362 363 364

	if (initcall_debug) {
		rettime = ktime_get();
		delta = ktime_sub(rettime, calltime);
365 366 367
		printk("initcall %s_i+ returned %d after %Ld usecs\n",
			dev_name(dev), error,
			(unsigned long long)ktime_to_ns(delta) >> 10);
368 369
	}

370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410
	return error;
}

static char *pm_verb(int event)
{
	switch (event) {
	case PM_EVENT_SUSPEND:
		return "suspend";
	case PM_EVENT_RESUME:
		return "resume";
	case PM_EVENT_FREEZE:
		return "freeze";
	case PM_EVENT_QUIESCE:
		return "quiesce";
	case PM_EVENT_HIBERNATE:
		return "hibernate";
	case PM_EVENT_THAW:
		return "thaw";
	case PM_EVENT_RESTORE:
		return "restore";
	case PM_EVENT_RECOVER:
		return "recover";
	default:
		return "(unknown PM event)";
	}
}

static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
{
	dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
		", may wakeup" : "");
}

static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
			int error)
{
	printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
		kobject_name(&dev->kobj), pm_verb(state.event), info, error);
}

411 412 413
static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
{
	ktime_t calltime;
414
	u64 usecs64;
415 416 417 418 419 420 421 422 423 424 425 426 427
	int usecs;

	calltime = ktime_get();
	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
	do_div(usecs64, NSEC_PER_USEC);
	usecs = usecs64;
	if (usecs == 0)
		usecs = 1;
	pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
		info ?: "", info ? " " : "", pm_verb(state.event),
		usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
}

428 429 430
/*------------------------- Resume routines -------------------------*/

/**
431 432 433
 * device_resume_noirq - Execute an "early resume" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
434
 *
435 436
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
437
 */
438
static int device_resume_noirq(struct device *dev, pm_message_t state)
439 440 441 442 443 444
{
	int error = 0;

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);

445
	if (dev->bus && dev->bus->pm) {
446 447
		pm_dev_dbg(dev, state, "EARLY ");
		error = pm_noirq_op(dev, dev->bus->pm, state);
448 449
		if (error)
			goto End;
450
	}
451

452 453 454 455 456 457 458 459 460 461 462 463 464
	if (dev->type && dev->type->pm) {
		pm_dev_dbg(dev, state, "EARLY type ");
		error = pm_noirq_op(dev, dev->type->pm, state);
		if (error)
			goto End;
	}

	if (dev->class && dev->class->pm) {
		pm_dev_dbg(dev, state, "EARLY class ");
		error = pm_noirq_op(dev, dev->class->pm, state);
	}

End:
465 466 467 468 469
	TRACE_RESUME(error);
	return error;
}

/**
470 471
 * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices.
 * @state: PM transition of the system being carried out.
472
 *
473 474
 * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and
 * enable device drivers to receive interrupts.
475
 */
476
void dpm_resume_noirq(pm_message_t state)
477
{
478
	struct device *dev;
479
	ktime_t starttime = ktime_get();
480

481
	mutex_lock(&dpm_list_mtx);
482
	transition_started = false;
483 484 485
	list_for_each_entry(dev, &dpm_list, power.entry)
		if (dev->power.status > DPM_OFF) {
			int error;
486

487
			dev->power.status = DPM_OFF;
488
			error = device_resume_noirq(dev, state);
489 490 491
			if (error)
				pm_dev_err(dev, state, " early", error);
		}
492
	mutex_unlock(&dpm_list_mtx);
493
	dpm_show_time(starttime, state, "early");
494
	resume_device_irqs();
495
}
496
EXPORT_SYMBOL_GPL(dpm_resume_noirq);
497

498 499
/**
 * legacy_resume - Execute a legacy (bus or class) resume callback for device.
R
Randy Dunlap 已提交
500 501
 * @dev: Device to resume.
 * @cb: Resume callback to execute.
502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517
 */
static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
{
	int error;
	ktime_t calltime;

	calltime = initcall_debug_start(dev);

	error = cb(dev);
	suspend_report_result(cb, error);

	initcall_debug_report(dev, calltime, error);

	return error;
}

518
/**
519
 * device_resume - Execute "resume" callbacks for given device.
520 521
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
522
 * @async: If true, the device is being resumed asynchronously.
523
 */
524
static int device_resume(struct device *dev, pm_message_t state, bool async)
525 526 527 528 529
{
	int error = 0;

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);
530

531
	dpm_wait(dev->parent, async);
532
	device_lock(dev);
533

534 535
	dev->power.status = DPM_RESUMING;

536 537 538
	if (dev->bus) {
		if (dev->bus->pm) {
			pm_dev_dbg(dev, state, "");
539
			error = pm_op(dev, dev->bus->pm, state);
540 541
		} else if (dev->bus->resume) {
			pm_dev_dbg(dev, state, "legacy ");
542
			error = legacy_resume(dev, dev->bus->resume);
543 544 545
		}
		if (error)
			goto End;
546 547
	}

548 549 550 551 552 553 554
	if (dev->type) {
		if (dev->type->pm) {
			pm_dev_dbg(dev, state, "type ");
			error = pm_op(dev, dev->type->pm, state);
		}
		if (error)
			goto End;
555 556
	}

557 558 559 560 561 562
	if (dev->class) {
		if (dev->class->pm) {
			pm_dev_dbg(dev, state, "class ");
			error = pm_op(dev, dev->class->pm, state);
		} else if (dev->class->resume) {
			pm_dev_dbg(dev, state, "legacy class ");
563
			error = legacy_resume(dev, dev->class->resume);
564
		}
565
	}
566
 End:
567
	device_unlock(dev);
568
	complete_all(&dev->power.completion);
569

570 571 572 573
	TRACE_RESUME(error);
	return error;
}

574 575 576 577 578
static void async_resume(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

579
	error = device_resume(dev, pm_transition, true);
580 581 582 583 584
	if (error)
		pm_dev_err(dev, pm_transition, " async", error);
	put_device(dev);
}

585
static bool is_async(struct device *dev)
586
{
587 588
	return dev->power.async_suspend && pm_async_enabled
		&& !pm_trace_is_enabled();
589 590
}

591
/**
592 593
 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
 * @state: PM transition of the system being carried out.
594
 *
595 596
 * Execute the appropriate "resume" callback for all devices whose status
 * indicates that they are suspended.
597 598 599 600
 */
static void dpm_resume(pm_message_t state)
{
	struct list_head list;
601
	struct device *dev;
602
	ktime_t starttime = ktime_get();
603 604 605

	INIT_LIST_HEAD(&list);
	mutex_lock(&dpm_list_mtx);
606
	pm_transition = state;
607
	async_error = 0;
608

609 610 611 612 613 614 615 616 617 618 619 620 621
	list_for_each_entry(dev, &dpm_list, power.entry) {
		if (dev->power.status < DPM_OFF)
			continue;

		INIT_COMPLETION(dev->power.completion);
		if (is_async(dev)) {
			get_device(dev);
			async_schedule(async_resume, dev);
		}
	}

	while (!list_empty(&dpm_list)) {
		dev = to_device(dpm_list.next);
622
		get_device(dev);
623
		if (dev->power.status >= DPM_OFF && !is_async(dev)) {
624 625 626 627
			int error;

			mutex_unlock(&dpm_list_mtx);

628
			error = device_resume(dev, state, false);
629 630 631 632 633 634 635 636 637 638 639 640 641 642

			mutex_lock(&dpm_list_mtx);
			if (error)
				pm_dev_err(dev, state, "", error);
		} else if (dev->power.status == DPM_SUSPENDING) {
			/* Allow new children of the device to be registered */
			dev->power.status = DPM_RESUMING;
		}
		if (!list_empty(&dev->power.entry))
			list_move_tail(&dev->power.entry, &list);
		put_device(dev);
	}
	list_splice(&list, &dpm_list);
	mutex_unlock(&dpm_list_mtx);
643
	async_synchronize_full();
644
	dpm_show_time(starttime, state, NULL);
645 646 647
}

/**
648 649 650
 * device_complete - Complete a PM transition for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
651
 */
652
static void device_complete(struct device *dev, pm_message_t state)
653
{
654
	device_lock(dev);
655 656 657 658 659 660 661 662 663 664 665

	if (dev->class && dev->class->pm && dev->class->pm->complete) {
		pm_dev_dbg(dev, state, "completing class ");
		dev->class->pm->complete(dev);
	}

	if (dev->type && dev->type->pm && dev->type->pm->complete) {
		pm_dev_dbg(dev, state, "completing type ");
		dev->type->pm->complete(dev);
	}

666
	if (dev->bus && dev->bus->pm && dev->bus->pm->complete) {
667
		pm_dev_dbg(dev, state, "completing ");
668
		dev->bus->pm->complete(dev);
669 670
	}

671
	device_unlock(dev);
672 673 674
}

/**
675 676
 * dpm_complete - Complete a PM transition for all non-sysdev devices.
 * @state: PM transition of the system being carried out.
677
 *
678 679
 * Execute the ->complete() callbacks for all devices whose PM status is not
 * DPM_ON (this allows new devices to be registered).
680
 */
681
static void dpm_complete(pm_message_t state)
682
{
683 684 685
	struct list_head list;

	INIT_LIST_HEAD(&list);
686
	mutex_lock(&dpm_list_mtx);
R
Romit Dasgupta 已提交
687
	transition_started = false;
688 689
	while (!list_empty(&dpm_list)) {
		struct device *dev = to_device(dpm_list.prev);
690

691 692 693 694 695
		get_device(dev);
		if (dev->power.status > DPM_ON) {
			dev->power.status = DPM_ON;
			mutex_unlock(&dpm_list_mtx);

696
			device_complete(dev, state);
697
			pm_runtime_put_sync(dev);
698 699 700 701 702 703

			mutex_lock(&dpm_list_mtx);
		}
		if (!list_empty(&dev->power.entry))
			list_move(&dev->power.entry, &list);
		put_device(dev);
704
	}
705
	list_splice(&list, &dpm_list);
706 707 708 709
	mutex_unlock(&dpm_list_mtx);
}

/**
710 711
 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
 * @state: PM transition of the system being carried out.
712
 *
713 714
 * Execute "resume" callbacks for all devices and complete the PM transition of
 * the system.
715
 */
716
void dpm_resume_end(pm_message_t state)
717
{
718
	might_sleep();
719 720
	dpm_resume(state);
	dpm_complete(state);
721
}
722
EXPORT_SYMBOL_GPL(dpm_resume_end);
723 724 725 726


/*------------------------- Suspend routines -------------------------*/

727
/**
728 729 730 731 732
 * resume_event - Return a "resume" message for given "suspend" sleep state.
 * @sleep_state: PM message representing a sleep state.
 *
 * Return a PM message representing the resume event corresponding to given
 * sleep state.
733 734
 */
static pm_message_t resume_event(pm_message_t sleep_state)
735
{
736 737 738 739 740 741 742 743
	switch (sleep_state.event) {
	case PM_EVENT_SUSPEND:
		return PMSG_RESUME;
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		return PMSG_RECOVER;
	case PM_EVENT_HIBERNATE:
		return PMSG_RESTORE;
744
	}
745
	return PMSG_ON;
746 747 748
}

/**
749 750 751
 * device_suspend_noirq - Execute a "late suspend" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
752
 *
753 754
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
755
 */
756
static int device_suspend_noirq(struct device *dev, pm_message_t state)
757 758
{
	int error = 0;
759

760 761 762 763 764 765 766 767 768 769 770 771 772 773
	if (dev->class && dev->class->pm) {
		pm_dev_dbg(dev, state, "LATE class ");
		error = pm_noirq_op(dev, dev->class->pm, state);
		if (error)
			goto End;
	}

	if (dev->type && dev->type->pm) {
		pm_dev_dbg(dev, state, "LATE type ");
		error = pm_noirq_op(dev, dev->type->pm, state);
		if (error)
			goto End;
	}

774
	if (dev->bus && dev->bus->pm) {
775 776
		pm_dev_dbg(dev, state, "LATE ");
		error = pm_noirq_op(dev, dev->bus->pm, state);
777
	}
778 779

End:
780 781 782 783
	return error;
}

/**
784 785
 * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices.
 * @state: PM transition of the system being carried out.
786
 *
787 788
 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
 * handlers for all non-sysdev devices.
789
 */
790
int dpm_suspend_noirq(pm_message_t state)
791
{
792
	struct device *dev;
793
	ktime_t starttime = ktime_get();
794 795
	int error = 0;

796
	suspend_device_irqs();
797
	mutex_lock(&dpm_list_mtx);
798
	list_for_each_entry_reverse(dev, &dpm_list, power.entry) {
799
		error = device_suspend_noirq(dev, state);
800
		if (error) {
801
			pm_dev_err(dev, state, " late", error);
802 803
			break;
		}
804
		dev->power.status = DPM_OFF_IRQ;
805
	}
806
	mutex_unlock(&dpm_list_mtx);
807
	if (error)
808
		dpm_resume_noirq(resume_event(state));
809 810
	else
		dpm_show_time(starttime, state, "late");
811 812
	return error;
}
813
EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
814

815 816
/**
 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
R
Randy Dunlap 已提交
817 818 819
 * @dev: Device to suspend.
 * @state: PM transition of the system being carried out.
 * @cb: Suspend callback to execute.
820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836
 */
static int legacy_suspend(struct device *dev, pm_message_t state,
			  int (*cb)(struct device *dev, pm_message_t state))
{
	int error;
	ktime_t calltime;

	calltime = initcall_debug_start(dev);

	error = cb(dev, state);
	suspend_report_result(cb, error);

	initcall_debug_report(dev, calltime, error);

	return error;
}

837
/**
838 839 840
 * device_suspend - Execute "suspend" callbacks for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
841
 * @async: If true, the device is being suspended asynchronously.
842
 */
843
static int __device_suspend(struct device *dev, pm_message_t state, bool async)
844 845 846
{
	int error = 0;

847
	dpm_wait_for_children(dev, async);
848
	device_lock(dev);
849

850 851 852
	if (async_error)
		goto End;

853 854 855 856 857 858
	if (dev->class) {
		if (dev->class->pm) {
			pm_dev_dbg(dev, state, "class ");
			error = pm_op(dev, dev->class->pm, state);
		} else if (dev->class->suspend) {
			pm_dev_dbg(dev, state, "legacy class ");
859
			error = legacy_suspend(dev, state, dev->class->suspend);
860 861 862
		}
		if (error)
			goto End;
863 864
	}

865 866 867 868 869 870 871
	if (dev->type) {
		if (dev->type->pm) {
			pm_dev_dbg(dev, state, "type ");
			error = pm_op(dev, dev->type->pm, state);
		}
		if (error)
			goto End;
872 873
	}

874 875 876
	if (dev->bus) {
		if (dev->bus->pm) {
			pm_dev_dbg(dev, state, "");
877
			error = pm_op(dev, dev->bus->pm, state);
878 879
		} else if (dev->bus->suspend) {
			pm_dev_dbg(dev, state, "legacy ");
880
			error = legacy_suspend(dev, state, dev->bus->suspend);
881
		}
882
	}
883 884 885 886

	if (!error)
		dev->power.status = DPM_OFF;

887
 End:
888
	device_unlock(dev);
889
	complete_all(&dev->power.completion);
890

891 892 893
	if (error)
		async_error = error;

894 895 896
	return error;
}

897 898 899 900 901 902
static void async_suspend(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = __device_suspend(dev, pm_transition, true);
903
	if (error)
904 905 906 907 908 909 910 911 912
		pm_dev_err(dev, pm_transition, " async", error);

	put_device(dev);
}

static int device_suspend(struct device *dev)
{
	INIT_COMPLETION(dev->power.completion);

913
	if (pm_async_enabled && dev->power.async_suspend) {
914 915 916 917 918 919 920 921
		get_device(dev);
		async_schedule(async_suspend, dev);
		return 0;
	}

	return __device_suspend(dev, pm_transition, false);
}

922
/**
923 924
 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
 * @state: PM transition of the system being carried out.
925
 */
926
static int dpm_suspend(pm_message_t state)
927
{
928
	struct list_head list;
929
	ktime_t starttime = ktime_get();
930 931
	int error = 0;

932
	INIT_LIST_HEAD(&list);
933
	mutex_lock(&dpm_list_mtx);
934 935
	pm_transition = state;
	async_error = 0;
936 937
	while (!list_empty(&dpm_list)) {
		struct device *dev = to_device(dpm_list.prev);
938

939
		get_device(dev);
940
		mutex_unlock(&dpm_list_mtx);
941

942
		error = device_suspend(dev);
943

944
		mutex_lock(&dpm_list_mtx);
945
		if (error) {
946 947
			pm_dev_err(dev, state, "", error);
			put_device(dev);
948 949
			break;
		}
950
		if (!list_empty(&dev->power.entry))
951 952
			list_move(&dev->power.entry, &list);
		put_device(dev);
953 954
		if (async_error)
			break;
955
	}
956
	list_splice(&list, dpm_list.prev);
957
	mutex_unlock(&dpm_list_mtx);
958 959 960
	async_synchronize_full();
	if (!error)
		error = async_error;
961 962
	if (!error)
		dpm_show_time(starttime, state, NULL);
963 964 965 966
	return error;
}

/**
967 968 969 970 971 972
 * device_prepare - Prepare a device for system power transition.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
 *
 * Execute the ->prepare() callback(s) for given device.  No new children of the
 * device may be registered after this function has returned.
973
 */
974
static int device_prepare(struct device *dev, pm_message_t state)
975 976 977
{
	int error = 0;

978
	device_lock(dev);
979

980
	if (dev->bus && dev->bus->pm && dev->bus->pm->prepare) {
981
		pm_dev_dbg(dev, state, "preparing ");
982 983
		error = dev->bus->pm->prepare(dev);
		suspend_report_result(dev->bus->pm->prepare, error);
984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001
		if (error)
			goto End;
	}

	if (dev->type && dev->type->pm && dev->type->pm->prepare) {
		pm_dev_dbg(dev, state, "preparing type ");
		error = dev->type->pm->prepare(dev);
		suspend_report_result(dev->type->pm->prepare, error);
		if (error)
			goto End;
	}

	if (dev->class && dev->class->pm && dev->class->pm->prepare) {
		pm_dev_dbg(dev, state, "preparing class ");
		error = dev->class->pm->prepare(dev);
		suspend_report_result(dev->class->pm->prepare, error);
	}
 End:
1002
	device_unlock(dev);
1003 1004 1005

	return error;
}
1006

1007
/**
1008 1009
 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
 * @state: PM transition of the system being carried out.
1010
 *
1011
 * Execute the ->prepare() callback(s) for all devices.
1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027
 */
static int dpm_prepare(pm_message_t state)
{
	struct list_head list;
	int error = 0;

	INIT_LIST_HEAD(&list);
	mutex_lock(&dpm_list_mtx);
	transition_started = true;
	while (!list_empty(&dpm_list)) {
		struct device *dev = to_device(dpm_list.next);

		get_device(dev);
		dev->power.status = DPM_PREPARING;
		mutex_unlock(&dpm_list_mtx);

1028 1029 1030
		pm_runtime_get_noresume(dev);
		if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) {
			/* Wake-up requested during system sleep transition. */
1031
			pm_runtime_put_sync(dev);
1032 1033 1034 1035
			error = -EBUSY;
		} else {
			error = device_prepare(dev, state);
		}
1036 1037 1038 1039 1040 1041

		mutex_lock(&dpm_list_mtx);
		if (error) {
			dev->power.status = DPM_ON;
			if (error == -EAGAIN) {
				put_device(dev);
S
Sebastian Ott 已提交
1042
				error = 0;
1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057
				continue;
			}
			printk(KERN_ERR "PM: Failed to prepare device %s "
				"for power transition: error %d\n",
				kobject_name(&dev->kobj), error);
			put_device(dev);
			break;
		}
		dev->power.status = DPM_SUSPENDING;
		if (!list_empty(&dev->power.entry))
			list_move_tail(&dev->power.entry, &list);
		put_device(dev);
	}
	list_splice(&list, &dpm_list);
	mutex_unlock(&dpm_list_mtx);
1058 1059 1060
	return error;
}

1061
/**
1062 1063
 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
 * @state: PM transition of the system being carried out.
1064
 *
1065 1066
 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
 * callbacks for them.
1067
 */
1068
int dpm_suspend_start(pm_message_t state)
1069 1070
{
	int error;
1071

1072
	might_sleep();
1073 1074 1075
	error = dpm_prepare(state);
	if (!error)
		error = dpm_suspend(state);
1076 1077
	return error;
}
1078
EXPORT_SYMBOL_GPL(dpm_suspend_start);
1079 1080 1081

void __suspend_report_result(const char *function, void *fn, int ret)
{
1082 1083
	if (ret)
		printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1084 1085
}
EXPORT_SYMBOL_GPL(__suspend_report_result);
1086 1087 1088 1089 1090 1091

/**
 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
 * @dev: Device to wait for.
 * @subordinate: Device that needs to wait for @dev.
 */
1092
int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1093 1094
{
	dpm_wait(dev, subordinate->power.async_suspend);
1095
	return async_error;
1096 1097
}
EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);