main.c 27.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/*
 * drivers/base/power/main.c - Where the driver meets power management.
 *
 * Copyright (c) 2003 Patrick Mochel
 * Copyright (c) 2003 Open Source Development Lab
 *
 * This file is released under the GPLv2
 *
 *
 * The driver model core calls device_pm_add() when a device is registered.
11
 * This will initialize the embedded device_pm_info object in the device
L
Linus Torvalds 已提交
12 13 14
 * and add it to the list of power-controlled devices. sysfs entries for
 * controlling device power management will also be added.
 *
15 16 17
 * A separate list is used for keeping track of power info, because the power
 * domain dependencies may differ from the ancestral dependencies that the
 * subsystem list maintains.
L
Linus Torvalds 已提交
18 19 20
 */

#include <linux/device.h>
21
#include <linux/kallsyms.h>
22
#include <linux/mutex.h>
23
#include <linux/pm.h>
24
#include <linux/pm_runtime.h>
25
#include <linux/resume-trace.h>
26
#include <linux/interrupt.h>
27
#include <linux/sched.h>
28
#include <linux/async.h>
29
#include <linux/suspend.h>
30

31
#include "../base.h"
L
Linus Torvalds 已提交
32 33
#include "power.h"

34
/*
35
 * The entries in the dpm_list list are in a depth first order, simply
36 37 38
 * because children are guaranteed to be discovered after parents, and
 * are inserted at the back of the list on discovery.
 *
39 40
 * Since device_pm_add() may be called with a device lock held,
 * we must never try to acquire a device lock while holding
41 42 43
 * dpm_list_mutex.
 */

44
LIST_HEAD(dpm_list);
45 46 47
LIST_HEAD(dpm_prepared_list);
LIST_HEAD(dpm_suspended_list);
LIST_HEAD(dpm_noirq_list);
L
Linus Torvalds 已提交
48

49
static DEFINE_MUTEX(dpm_list_mtx);
50
static pm_message_t pm_transition;
L
Linus Torvalds 已提交
51

52 53
static int async_error;

54
/**
55
 * device_pm_init - Initialize the PM-related part of a device object.
56 57 58 59
 * @dev: Device object being initialized.
 */
void device_pm_init(struct device *dev)
{
60
	dev->power.is_prepared = false;
61
	dev->power.is_suspended = false;
62
	init_completion(&dev->power.completion);
63
	complete_all(&dev->power.completion);
64 65
	dev->power.wakeup = NULL;
	spin_lock_init(&dev->power.lock);
66
	pm_runtime_init(dev);
67
	INIT_LIST_HEAD(&dev->power.entry);
68 69
}

70
/**
71
 * device_pm_lock - Lock the list of active devices used by the PM core.
72 73 74 75 76 77 78
 */
void device_pm_lock(void)
{
	mutex_lock(&dpm_list_mtx);
}

/**
79
 * device_pm_unlock - Unlock the list of active devices used by the PM core.
80 81 82 83 84
 */
void device_pm_unlock(void)
{
	mutex_unlock(&dpm_list_mtx);
}
85

86
/**
87 88
 * device_pm_add - Add a device to the PM core's list of active devices.
 * @dev: Device to add to the list.
89
 */
90
void device_pm_add(struct device *dev)
L
Linus Torvalds 已提交
91 92
{
	pr_debug("PM: Adding info for %s:%s\n",
93
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
94
	mutex_lock(&dpm_list_mtx);
95
	if (dev->parent && dev->parent->power.is_prepared)
96 97
		dev_warn(dev, "parent %s should not be sleeping\n",
			dev_name(dev->parent));
98
	list_add_tail(&dev->power.entry, &dpm_list);
99
	mutex_unlock(&dpm_list_mtx);
L
Linus Torvalds 已提交
100 101
}

102
/**
103 104
 * device_pm_remove - Remove a device from the PM core's list of active devices.
 * @dev: Device to be removed from the list.
105
 */
106
void device_pm_remove(struct device *dev)
L
Linus Torvalds 已提交
107 108
{
	pr_debug("PM: Removing info for %s:%s\n",
109
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
110
	complete_all(&dev->power.completion);
111
	mutex_lock(&dpm_list_mtx);
L
Linus Torvalds 已提交
112
	list_del_init(&dev->power.entry);
113
	mutex_unlock(&dpm_list_mtx);
114
	device_wakeup_disable(dev);
115
	pm_runtime_remove(dev);
116 117
}

118
/**
119 120 121
 * device_pm_move_before - Move device in the PM core's list of active devices.
 * @deva: Device to move in dpm_list.
 * @devb: Device @deva should come before.
122 123 124 125
 */
void device_pm_move_before(struct device *deva, struct device *devb)
{
	pr_debug("PM: Moving %s:%s before %s:%s\n",
126 127
		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
128 129 130 131 132
	/* Delete deva from dpm_list and reinsert before devb. */
	list_move_tail(&deva->power.entry, &devb->power.entry);
}

/**
133 134 135
 * device_pm_move_after - Move device in the PM core's list of active devices.
 * @deva: Device to move in dpm_list.
 * @devb: Device @deva should come after.
136 137 138 139
 */
void device_pm_move_after(struct device *deva, struct device *devb)
{
	pr_debug("PM: Moving %s:%s after %s:%s\n",
140 141
		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
142 143 144 145 146
	/* Delete deva from dpm_list and reinsert after devb. */
	list_move(&deva->power.entry, &devb->power.entry);
}

/**
147 148
 * device_pm_move_last - Move device to end of the PM core's list of devices.
 * @dev: Device to move in dpm_list.
149 150 151 152
 */
void device_pm_move_last(struct device *dev)
{
	pr_debug("PM: Moving %s:%s to end of list\n",
153
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
154 155 156
	list_move_tail(&dev->power.entry, &dpm_list);
}

157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182
static ktime_t initcall_debug_start(struct device *dev)
{
	ktime_t calltime = ktime_set(0, 0);

	if (initcall_debug) {
		pr_info("calling  %s+ @ %i\n",
				dev_name(dev), task_pid_nr(current));
		calltime = ktime_get();
	}

	return calltime;
}

static void initcall_debug_report(struct device *dev, ktime_t calltime,
				  int error)
{
	ktime_t delta, rettime;

	if (initcall_debug) {
		rettime = ktime_get();
		delta = ktime_sub(rettime, calltime);
		pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
			error, (unsigned long long)ktime_to_ns(delta) >> 10);
	}
}

183 184 185 186 187 188 189 190 191 192
/**
 * dpm_wait - Wait for a PM operation to complete.
 * @dev: Device to wait for.
 * @async: If unset, wait only if the device's power.async_suspend flag is set.
 */
static void dpm_wait(struct device *dev, bool async)
{
	if (!dev)
		return;

193
	if (async || (pm_async_enabled && dev->power.async_suspend))
194 195 196 197 198 199 200 201 202 203 204 205 206 207
		wait_for_completion(&dev->power.completion);
}

static int dpm_wait_fn(struct device *dev, void *async_ptr)
{
	dpm_wait(dev, *((bool *)async_ptr));
	return 0;
}

static void dpm_wait_for_children(struct device *dev, bool async)
{
       device_for_each_child(dev, &async, dpm_wait_fn);
}

208
/**
209 210 211 212
 * pm_op - Execute the PM operation appropriate for given PM event.
 * @dev: Device to handle.
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
213
 */
214 215 216
static int pm_op(struct device *dev,
		 const struct dev_pm_ops *ops,
		 pm_message_t state)
217 218
{
	int error = 0;
219
	ktime_t calltime;
220

221
	calltime = initcall_debug_start(dev);
222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237

	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
		if (ops->suspend) {
			error = ops->suspend(dev);
			suspend_report_result(ops->suspend, error);
		}
		break;
	case PM_EVENT_RESUME:
		if (ops->resume) {
			error = ops->resume(dev);
			suspend_report_result(ops->resume, error);
		}
		break;
#endif /* CONFIG_SUSPEND */
238
#ifdef CONFIG_HIBERNATE_CALLBACKS
239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		if (ops->freeze) {
			error = ops->freeze(dev);
			suspend_report_result(ops->freeze, error);
		}
		break;
	case PM_EVENT_HIBERNATE:
		if (ops->poweroff) {
			error = ops->poweroff(dev);
			suspend_report_result(ops->poweroff, error);
		}
		break;
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
		if (ops->thaw) {
			error = ops->thaw(dev);
			suspend_report_result(ops->thaw, error);
		}
		break;
	case PM_EVENT_RESTORE:
		if (ops->restore) {
			error = ops->restore(dev);
			suspend_report_result(ops->restore, error);
		}
		break;
265
#endif /* CONFIG_HIBERNATE_CALLBACKS */
266 267 268
	default:
		error = -EINVAL;
	}
269

270
	initcall_debug_report(dev, calltime, error);
271

272 273 274 275
	return error;
}

/**
276 277 278 279
 * pm_noirq_op - Execute the PM operation appropriate for given PM event.
 * @dev: Device to handle.
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
280
 *
281 282
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
283
 */
284 285
static int pm_noirq_op(struct device *dev,
			const struct dev_pm_ops *ops,
286 287 288
			pm_message_t state)
{
	int error = 0;
289
	ktime_t calltime = ktime_set(0, 0), delta, rettime;
290 291

	if (initcall_debug) {
292 293 294
		pr_info("calling  %s+ @ %i, parent: %s\n",
				dev_name(dev), task_pid_nr(current),
				dev->parent ? dev_name(dev->parent) : "none");
295 296
		calltime = ktime_get();
	}
297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312

	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
		if (ops->suspend_noirq) {
			error = ops->suspend_noirq(dev);
			suspend_report_result(ops->suspend_noirq, error);
		}
		break;
	case PM_EVENT_RESUME:
		if (ops->resume_noirq) {
			error = ops->resume_noirq(dev);
			suspend_report_result(ops->resume_noirq, error);
		}
		break;
#endif /* CONFIG_SUSPEND */
313
#ifdef CONFIG_HIBERNATE_CALLBACKS
314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		if (ops->freeze_noirq) {
			error = ops->freeze_noirq(dev);
			suspend_report_result(ops->freeze_noirq, error);
		}
		break;
	case PM_EVENT_HIBERNATE:
		if (ops->poweroff_noirq) {
			error = ops->poweroff_noirq(dev);
			suspend_report_result(ops->poweroff_noirq, error);
		}
		break;
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
		if (ops->thaw_noirq) {
			error = ops->thaw_noirq(dev);
			suspend_report_result(ops->thaw_noirq, error);
		}
		break;
	case PM_EVENT_RESTORE:
		if (ops->restore_noirq) {
			error = ops->restore_noirq(dev);
			suspend_report_result(ops->restore_noirq, error);
		}
		break;
340
#endif /* CONFIG_HIBERNATE_CALLBACKS */
341 342 343
	default:
		error = -EINVAL;
	}
344 345 346 347

	if (initcall_debug) {
		rettime = ktime_get();
		delta = ktime_sub(rettime, calltime);
348 349 350
		printk("initcall %s_i+ returned %d after %Ld usecs\n",
			dev_name(dev), error,
			(unsigned long long)ktime_to_ns(delta) >> 10);
351 352
	}

353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390
	return error;
}

static char *pm_verb(int event)
{
	switch (event) {
	case PM_EVENT_SUSPEND:
		return "suspend";
	case PM_EVENT_RESUME:
		return "resume";
	case PM_EVENT_FREEZE:
		return "freeze";
	case PM_EVENT_QUIESCE:
		return "quiesce";
	case PM_EVENT_HIBERNATE:
		return "hibernate";
	case PM_EVENT_THAW:
		return "thaw";
	case PM_EVENT_RESTORE:
		return "restore";
	case PM_EVENT_RECOVER:
		return "recover";
	default:
		return "(unknown PM event)";
	}
}

static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
{
	dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
		", may wakeup" : "");
}

static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
			int error)
{
	printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
391
		dev_name(dev), pm_verb(state.event), info, error);
392 393
}

394 395 396
static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
{
	ktime_t calltime;
397
	u64 usecs64;
398 399 400 401 402 403 404 405 406 407 408 409 410
	int usecs;

	calltime = ktime_get();
	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
	do_div(usecs64, NSEC_PER_USEC);
	usecs = usecs64;
	if (usecs == 0)
		usecs = 1;
	pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
		info ?: "", info ? " " : "", pm_verb(state.event),
		usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
}

411 412 413
/*------------------------- Resume routines -------------------------*/

/**
414 415 416
 * device_resume_noirq - Execute an "early resume" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
417
 *
418 419
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
420
 */
421
static int device_resume_noirq(struct device *dev, pm_message_t state)
422 423 424 425 426 427
{
	int error = 0;

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);

428
	if (dev->pm_domain) {
429
		pm_dev_dbg(dev, state, "EARLY power domain ");
430
		error = pm_noirq_op(dev, &dev->pm_domain->ops, state);
431
	} else if (dev->type && dev->type->pm) {
432 433
		pm_dev_dbg(dev, state, "EARLY type ");
		error = pm_noirq_op(dev, dev->type->pm, state);
434
	} else if (dev->class && dev->class->pm) {
435 436
		pm_dev_dbg(dev, state, "EARLY class ");
		error = pm_noirq_op(dev, dev->class->pm, state);
437 438 439
	} else if (dev->bus && dev->bus->pm) {
		pm_dev_dbg(dev, state, "EARLY ");
		error = pm_noirq_op(dev, dev->bus->pm, state);
440 441
	}

442 443 444 445 446
	TRACE_RESUME(error);
	return error;
}

/**
447 448
 * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices.
 * @state: PM transition of the system being carried out.
449
 *
450 451
 * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and
 * enable device drivers to receive interrupts.
452
 */
453
void dpm_resume_noirq(pm_message_t state)
454
{
455
	ktime_t starttime = ktime_get();
456

457
	mutex_lock(&dpm_list_mtx);
458 459
	while (!list_empty(&dpm_noirq_list)) {
		struct device *dev = to_device(dpm_noirq_list.next);
460
		int error;
461 462

		get_device(dev);
463 464
		list_move_tail(&dev->power.entry, &dpm_suspended_list);
		mutex_unlock(&dpm_list_mtx);
465

466 467 468
		error = device_resume_noirq(dev, state);
		if (error)
			pm_dev_err(dev, state, " early", error);
469

470
		mutex_lock(&dpm_list_mtx);
471 472
		put_device(dev);
	}
473
	mutex_unlock(&dpm_list_mtx);
474
	dpm_show_time(starttime, state, "early");
475
	resume_device_irqs();
476
}
477
EXPORT_SYMBOL_GPL(dpm_resume_noirq);
478

479 480
/**
 * legacy_resume - Execute a legacy (bus or class) resume callback for device.
R
Randy Dunlap 已提交
481 482
 * @dev: Device to resume.
 * @cb: Resume callback to execute.
483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498
 */
static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
{
	int error;
	ktime_t calltime;

	calltime = initcall_debug_start(dev);

	error = cb(dev);
	suspend_report_result(cb, error);

	initcall_debug_report(dev, calltime, error);

	return error;
}

499
/**
500
 * device_resume - Execute "resume" callbacks for given device.
501 502
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
503
 * @async: If true, the device is being resumed asynchronously.
504
 */
505
static int device_resume(struct device *dev, pm_message_t state, bool async)
506 507 508 509 510
{
	int error = 0;

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);
511

512
	dpm_wait(dev->parent, async);
513
	device_lock(dev);
514

515 516 517 518 519
	/*
	 * This is a fib.  But we'll allow new children to be added below
	 * a resumed device, even if the device hasn't been completed yet.
	 */
	dev->power.is_prepared = false;
520

521 522 523
	if (!dev->power.is_suspended)
		goto Unlock;

524
	if (dev->pm_domain) {
525
		pm_dev_dbg(dev, state, "power domain ");
526
		error = pm_op(dev, &dev->pm_domain->ops, state);
527
		goto End;
528 529
	}

530 531 532 533
	if (dev->type && dev->type->pm) {
		pm_dev_dbg(dev, state, "type ");
		error = pm_op(dev, dev->type->pm, state);
		goto End;
534 535
	}

536 537 538 539
	if (dev->class) {
		if (dev->class->pm) {
			pm_dev_dbg(dev, state, "class ");
			error = pm_op(dev, dev->class->pm, state);
540
			goto End;
541 542
		} else if (dev->class->resume) {
			pm_dev_dbg(dev, state, "legacy class ");
543
			error = legacy_resume(dev, dev->class->resume);
544
			goto End;
545
		}
546
	}
547 548 549 550 551 552 553 554 555 556 557

	if (dev->bus) {
		if (dev->bus->pm) {
			pm_dev_dbg(dev, state, "");
			error = pm_op(dev, dev->bus->pm, state);
		} else if (dev->bus->resume) {
			pm_dev_dbg(dev, state, "legacy ");
			error = legacy_resume(dev, dev->bus->resume);
		}
	}

558
 End:
559 560 561
	dev->power.is_suspended = false;

 Unlock:
562
	device_unlock(dev);
563
	complete_all(&dev->power.completion);
564

565 566 567 568
	TRACE_RESUME(error);
	return error;
}

569 570 571 572 573
static void async_resume(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

574
	error = device_resume(dev, pm_transition, true);
575 576 577 578 579
	if (error)
		pm_dev_err(dev, pm_transition, " async", error);
	put_device(dev);
}

580
static bool is_async(struct device *dev)
581
{
582 583
	return dev->power.async_suspend && pm_async_enabled
		&& !pm_trace_is_enabled();
584 585
}

586
/**
587 588
 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
 * @state: PM transition of the system being carried out.
589
 *
590 591
 * Execute the appropriate "resume" callback for all devices whose status
 * indicates that they are suspended.
592
 */
593
void dpm_resume(pm_message_t state)
594
{
595
	struct device *dev;
596
	ktime_t starttime = ktime_get();
597

598 599
	might_sleep();

600
	mutex_lock(&dpm_list_mtx);
601
	pm_transition = state;
602
	async_error = 0;
603

604
	list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
605 606 607 608 609 610 611
		INIT_COMPLETION(dev->power.completion);
		if (is_async(dev)) {
			get_device(dev);
			async_schedule(async_resume, dev);
		}
	}

612 613
	while (!list_empty(&dpm_suspended_list)) {
		dev = to_device(dpm_suspended_list.next);
614
		get_device(dev);
615
		if (!is_async(dev)) {
616 617 618 619
			int error;

			mutex_unlock(&dpm_list_mtx);

620
			error = device_resume(dev, state, false);
621 622
			if (error)
				pm_dev_err(dev, state, "", error);
623 624

			mutex_lock(&dpm_list_mtx);
625 626
		}
		if (!list_empty(&dev->power.entry))
627
			list_move_tail(&dev->power.entry, &dpm_prepared_list);
628 629 630
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
631
	async_synchronize_full();
632
	dpm_show_time(starttime, state, NULL);
633 634 635
}

/**
636 637 638
 * device_complete - Complete a PM transition for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
639
 */
640
static void device_complete(struct device *dev, pm_message_t state)
641
{
642
	device_lock(dev);
643

644
	if (dev->pm_domain) {
645
		pm_dev_dbg(dev, state, "completing power domain ");
646 647
		if (dev->pm_domain->ops.complete)
			dev->pm_domain->ops.complete(dev);
648
	} else if (dev->type && dev->type->pm) {
649
		pm_dev_dbg(dev, state, "completing type ");
650 651 652 653 654 655 656
		if (dev->type->pm->complete)
			dev->type->pm->complete(dev);
	} else if (dev->class && dev->class->pm) {
		pm_dev_dbg(dev, state, "completing class ");
		if (dev->class->pm->complete)
			dev->class->pm->complete(dev);
	} else if (dev->bus && dev->bus->pm) {
657
		pm_dev_dbg(dev, state, "completing ");
658 659
		if (dev->bus->pm->complete)
			dev->bus->pm->complete(dev);
660 661
	}

662
	device_unlock(dev);
663 664 665
}

/**
666 667
 * dpm_complete - Complete a PM transition for all non-sysdev devices.
 * @state: PM transition of the system being carried out.
668
 *
669 670
 * Execute the ->complete() callbacks for all devices whose PM status is not
 * DPM_ON (this allows new devices to be registered).
671
 */
672
void dpm_complete(pm_message_t state)
673
{
674 675
	struct list_head list;

676 677
	might_sleep();

678
	INIT_LIST_HEAD(&list);
679
	mutex_lock(&dpm_list_mtx);
680 681
	while (!list_empty(&dpm_prepared_list)) {
		struct device *dev = to_device(dpm_prepared_list.prev);
682

683
		get_device(dev);
684
		dev->power.is_prepared = false;
685 686
		list_move(&dev->power.entry, &list);
		mutex_unlock(&dpm_list_mtx);
687

688
		device_complete(dev, state);
689

690
		mutex_lock(&dpm_list_mtx);
691
		put_device(dev);
692
	}
693
	list_splice(&list, &dpm_list);
694 695 696 697
	mutex_unlock(&dpm_list_mtx);
}

/**
698 699
 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
 * @state: PM transition of the system being carried out.
700
 *
701 702
 * Execute "resume" callbacks for all devices and complete the PM transition of
 * the system.
703
 */
704
void dpm_resume_end(pm_message_t state)
705
{
706 707
	dpm_resume(state);
	dpm_complete(state);
708
}
709
EXPORT_SYMBOL_GPL(dpm_resume_end);
710 711 712 713


/*------------------------- Suspend routines -------------------------*/

714
/**
715 716 717 718 719
 * resume_event - Return a "resume" message for given "suspend" sleep state.
 * @sleep_state: PM message representing a sleep state.
 *
 * Return a PM message representing the resume event corresponding to given
 * sleep state.
720 721
 */
static pm_message_t resume_event(pm_message_t sleep_state)
722
{
723 724 725 726 727 728 729 730
	switch (sleep_state.event) {
	case PM_EVENT_SUSPEND:
		return PMSG_RESUME;
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		return PMSG_RECOVER;
	case PM_EVENT_HIBERNATE:
		return PMSG_RESTORE;
731
	}
732
	return PMSG_ON;
733 734 735
}

/**
736 737 738
 * device_suspend_noirq - Execute a "late suspend" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
739
 *
740 741
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
742
 */
743
static int device_suspend_noirq(struct device *dev, pm_message_t state)
744
{
745
	int error;
746

747
	if (dev->pm_domain) {
748
		pm_dev_dbg(dev, state, "LATE power domain ");
749
		error = pm_noirq_op(dev, &dev->pm_domain->ops, state);
750 751 752
		if (error)
			return error;
	} else if (dev->type && dev->type->pm) {
753 754 755
		pm_dev_dbg(dev, state, "LATE type ");
		error = pm_noirq_op(dev, dev->type->pm, state);
		if (error)
756 757 758 759 760 761 762
			return error;
	} else if (dev->class && dev->class->pm) {
		pm_dev_dbg(dev, state, "LATE class ");
		error = pm_noirq_op(dev, dev->class->pm, state);
		if (error)
			return error;
	} else if (dev->bus && dev->bus->pm) {
763 764
		pm_dev_dbg(dev, state, "LATE ");
		error = pm_noirq_op(dev, dev->bus->pm, state);
765
		if (error)
766
			return error;
767 768
	}

769
	return 0;
770 771 772
}

/**
773 774
 * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices.
 * @state: PM transition of the system being carried out.
775
 *
776 777
 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
 * handlers for all non-sysdev devices.
778
 */
779
int dpm_suspend_noirq(pm_message_t state)
780
{
781
	ktime_t starttime = ktime_get();
782 783
	int error = 0;

784
	suspend_device_irqs();
785
	mutex_lock(&dpm_list_mtx);
786 787
	while (!list_empty(&dpm_suspended_list)) {
		struct device *dev = to_device(dpm_suspended_list.prev);
788 789 790 791

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

792
		error = device_suspend_noirq(dev, state);
793 794

		mutex_lock(&dpm_list_mtx);
795
		if (error) {
796
			pm_dev_err(dev, state, " late", error);
797
			put_device(dev);
798 799
			break;
		}
800
		if (!list_empty(&dev->power.entry))
801
			list_move(&dev->power.entry, &dpm_noirq_list);
802
		put_device(dev);
803
	}
804
	mutex_unlock(&dpm_list_mtx);
805
	if (error)
806
		dpm_resume_noirq(resume_event(state));
807 808
	else
		dpm_show_time(starttime, state, "late");
809 810
	return error;
}
811
EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
812

813 814
/**
 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
R
Randy Dunlap 已提交
815 816 817
 * @dev: Device to suspend.
 * @state: PM transition of the system being carried out.
 * @cb: Suspend callback to execute.
818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834
 */
static int legacy_suspend(struct device *dev, pm_message_t state,
			  int (*cb)(struct device *dev, pm_message_t state))
{
	int error;
	ktime_t calltime;

	calltime = initcall_debug_start(dev);

	error = cb(dev, state);
	suspend_report_result(cb, error);

	initcall_debug_report(dev, calltime, error);

	return error;
}

835
/**
836 837 838
 * device_suspend - Execute "suspend" callbacks for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
839
 * @async: If true, the device is being suspended asynchronously.
840
 */
841
static int __device_suspend(struct device *dev, pm_message_t state, bool async)
842 843 844
{
	int error = 0;

845
	dpm_wait_for_children(dev, async);
846
	device_lock(dev);
847

848
	if (async_error)
849
		goto Unlock;
850

851 852
	if (pm_wakeup_pending()) {
		async_error = -EBUSY;
853
		goto Unlock;
854 855
	}

856
	if (dev->pm_domain) {
857
		pm_dev_dbg(dev, state, "power domain ");
858
		error = pm_op(dev, &dev->pm_domain->ops, state);
859 860 861
		goto End;
	}

862 863 864
	if (dev->type && dev->type->pm) {
		pm_dev_dbg(dev, state, "type ");
		error = pm_op(dev, dev->type->pm, state);
865
		goto End;
866 867
	}

868 869 870 871
	if (dev->class) {
		if (dev->class->pm) {
			pm_dev_dbg(dev, state, "class ");
			error = pm_op(dev, dev->class->pm, state);
872
			goto End;
873 874
		} else if (dev->class->suspend) {
			pm_dev_dbg(dev, state, "legacy class ");
875
			error = legacy_suspend(dev, state, dev->class->suspend);
876
			goto End;
877
		}
878 879
	}

880 881 882
	if (dev->bus) {
		if (dev->bus->pm) {
			pm_dev_dbg(dev, state, "");
883
			error = pm_op(dev, dev->bus->pm, state);
884 885
		} else if (dev->bus->suspend) {
			pm_dev_dbg(dev, state, "legacy ");
886
			error = legacy_suspend(dev, state, dev->bus->suspend);
887
		}
888 889
	}

890
 End:
891 892 893
	dev->power.is_suspended = !error;

 Unlock:
894
	device_unlock(dev);
895
	complete_all(&dev->power.completion);
896

897 898 899
	if (error)
		async_error = error;

900 901 902
	return error;
}

903 904 905 906 907 908
static void async_suspend(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = __device_suspend(dev, pm_transition, true);
909
	if (error)
910 911 912 913 914 915 916 917 918
		pm_dev_err(dev, pm_transition, " async", error);

	put_device(dev);
}

static int device_suspend(struct device *dev)
{
	INIT_COMPLETION(dev->power.completion);

919
	if (pm_async_enabled && dev->power.async_suspend) {
920 921 922 923 924 925 926 927
		get_device(dev);
		async_schedule(async_suspend, dev);
		return 0;
	}

	return __device_suspend(dev, pm_transition, false);
}

928
/**
929 930
 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
 * @state: PM transition of the system being carried out.
931
 */
932
int dpm_suspend(pm_message_t state)
933
{
934
	ktime_t starttime = ktime_get();
935 936
	int error = 0;

937 938
	might_sleep();

939
	mutex_lock(&dpm_list_mtx);
940 941
	pm_transition = state;
	async_error = 0;
942 943
	while (!list_empty(&dpm_prepared_list)) {
		struct device *dev = to_device(dpm_prepared_list.prev);
944

945
		get_device(dev);
946
		mutex_unlock(&dpm_list_mtx);
947

948
		error = device_suspend(dev);
949

950
		mutex_lock(&dpm_list_mtx);
951
		if (error) {
952 953
			pm_dev_err(dev, state, "", error);
			put_device(dev);
954 955
			break;
		}
956
		if (!list_empty(&dev->power.entry))
957
			list_move(&dev->power.entry, &dpm_suspended_list);
958
		put_device(dev);
959 960
		if (async_error)
			break;
961 962
	}
	mutex_unlock(&dpm_list_mtx);
963 964 965
	async_synchronize_full();
	if (!error)
		error = async_error;
966 967
	if (!error)
		dpm_show_time(starttime, state, NULL);
968 969 970 971
	return error;
}

/**
972 973 974 975 976 977
 * device_prepare - Prepare a device for system power transition.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
 *
 * Execute the ->prepare() callback(s) for given device.  No new children of the
 * device may be registered after this function has returned.
978
 */
979
static int device_prepare(struct device *dev, pm_message_t state)
980 981 982
{
	int error = 0;

983
	device_lock(dev);
984

985
	if (dev->pm_domain) {
986
		pm_dev_dbg(dev, state, "preparing power domain ");
987 988 989
		if (dev->pm_domain->ops.prepare)
			error = dev->pm_domain->ops.prepare(dev);
		suspend_report_result(dev->pm_domain->ops.prepare, error);
990 991 992
		if (error)
			goto End;
	} else if (dev->type && dev->type->pm) {
993
		pm_dev_dbg(dev, state, "preparing type ");
994 995
		if (dev->type->pm->prepare)
			error = dev->type->pm->prepare(dev);
996 997 998
		suspend_report_result(dev->type->pm->prepare, error);
		if (error)
			goto End;
999
	} else if (dev->class && dev->class->pm) {
1000
		pm_dev_dbg(dev, state, "preparing class ");
1001 1002
		if (dev->class->pm->prepare)
			error = dev->class->pm->prepare(dev);
1003
		suspend_report_result(dev->class->pm->prepare, error);
1004 1005
		if (error)
			goto End;
1006 1007 1008 1009 1010
	} else if (dev->bus && dev->bus->pm) {
		pm_dev_dbg(dev, state, "preparing ");
		if (dev->bus->pm->prepare)
			error = dev->bus->pm->prepare(dev);
		suspend_report_result(dev->bus->pm->prepare, error);
1011
	}
1012

1013
 End:
1014
	device_unlock(dev);
1015 1016 1017

	return error;
}
1018

1019
/**
1020 1021
 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
 * @state: PM transition of the system being carried out.
1022
 *
1023
 * Execute the ->prepare() callback(s) for all devices.
1024
 */
1025
int dpm_prepare(pm_message_t state)
1026 1027 1028
{
	int error = 0;

1029 1030
	might_sleep();

1031 1032 1033 1034 1035 1036 1037
	mutex_lock(&dpm_list_mtx);
	while (!list_empty(&dpm_list)) {
		struct device *dev = to_device(dpm_list.next);

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

1038
		pm_runtime_get_noresume(dev);
1039 1040 1041
		if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
			pm_wakeup_event(dev, 0);

1042 1043 1044
		pm_runtime_put_sync(dev);
		error = pm_wakeup_pending() ?
				-EBUSY : device_prepare(dev, state);
1045 1046 1047 1048 1049

		mutex_lock(&dpm_list_mtx);
		if (error) {
			if (error == -EAGAIN) {
				put_device(dev);
S
Sebastian Ott 已提交
1050
				error = 0;
1051 1052
				continue;
			}
1053 1054
			printk(KERN_INFO "PM: Device %s not prepared "
				"for power transition: code %d\n",
1055
				dev_name(dev), error);
1056 1057 1058
			put_device(dev);
			break;
		}
1059
		dev->power.is_prepared = true;
1060
		if (!list_empty(&dev->power.entry))
1061
			list_move_tail(&dev->power.entry, &dpm_prepared_list);
1062 1063 1064
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
1065 1066 1067
	return error;
}

1068
/**
1069 1070
 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
 * @state: PM transition of the system being carried out.
1071
 *
1072 1073
 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
 * callbacks for them.
1074
 */
1075
int dpm_suspend_start(pm_message_t state)
1076 1077
{
	int error;
1078

1079 1080 1081
	error = dpm_prepare(state);
	if (!error)
		error = dpm_suspend(state);
1082 1083
	return error;
}
1084
EXPORT_SYMBOL_GPL(dpm_suspend_start);
1085 1086 1087

void __suspend_report_result(const char *function, void *fn, int ret)
{
1088 1089
	if (ret)
		printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1090 1091
}
EXPORT_SYMBOL_GPL(__suspend_report_result);
1092 1093 1094 1095 1096 1097

/**
 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
 * @dev: Device to wait for.
 * @subordinate: Device that needs to wait for @dev.
 */
1098
int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1099 1100
{
	dpm_wait(dev, subordinate->power.async_suspend);
1101
	return async_error;
1102 1103
}
EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);