main.c 26.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/*
 * drivers/base/power/main.c - Where the driver meets power management.
 *
 * Copyright (c) 2003 Patrick Mochel
 * Copyright (c) 2003 Open Source Development Lab
 *
 * This file is released under the GPLv2
 *
 *
 * The driver model core calls device_pm_add() when a device is registered.
11
 * This will initialize the embedded device_pm_info object in the device
L
Linus Torvalds 已提交
12 13 14
 * and add it to the list of power-controlled devices. sysfs entries for
 * controlling device power management will also be added.
 *
15 16 17
 * A separate list is used for keeping track of power info, because the power
 * domain dependencies may differ from the ancestral dependencies that the
 * subsystem list maintains.
L
Linus Torvalds 已提交
18 19 20
 */

#include <linux/device.h>
21
#include <linux/kallsyms.h>
22
#include <linux/mutex.h>
23
#include <linux/pm.h>
24
#include <linux/pm_runtime.h>
25
#include <linux/resume-trace.h>
26
#include <linux/interrupt.h>
27
#include <linux/sched.h>
28
#include <linux/async.h>
29
#include <linux/suspend.h>
30

31
#include "../base.h"
L
Linus Torvalds 已提交
32 33
#include "power.h"

34
/*
35
 * The entries in the dpm_list list are in a depth first order, simply
36 37 38
 * because children are guaranteed to be discovered after parents, and
 * are inserted at the back of the list on discovery.
 *
39 40
 * Since device_pm_add() may be called with a device lock held,
 * we must never try to acquire a device lock while holding
41 42 43
 * dpm_list_mutex.
 */

44
LIST_HEAD(dpm_list);
45 46 47
LIST_HEAD(dpm_prepared_list);
LIST_HEAD(dpm_suspended_list);
LIST_HEAD(dpm_noirq_list);
L
Linus Torvalds 已提交
48

49
static DEFINE_MUTEX(dpm_list_mtx);
50
static pm_message_t pm_transition;
L
Linus Torvalds 已提交
51

52 53
static int async_error;

54
/**
55
 * device_pm_init - Initialize the PM-related part of a device object.
56 57 58 59
 * @dev: Device object being initialized.
 */
void device_pm_init(struct device *dev)
{
60
	dev->power.in_suspend = false;
61
	init_completion(&dev->power.completion);
62
	complete_all(&dev->power.completion);
63 64
	dev->power.wakeup = NULL;
	spin_lock_init(&dev->power.lock);
65 66 67
	pm_runtime_init(dev);
}

68
/**
69
 * device_pm_lock - Lock the list of active devices used by the PM core.
70 71 72 73 74 75 76
 */
void device_pm_lock(void)
{
	mutex_lock(&dpm_list_mtx);
}

/**
77
 * device_pm_unlock - Unlock the list of active devices used by the PM core.
78 79 80 81 82
 */
void device_pm_unlock(void)
{
	mutex_unlock(&dpm_list_mtx);
}
83

84
/**
85 86
 * device_pm_add - Add a device to the PM core's list of active devices.
 * @dev: Device to add to the list.
87
 */
88
void device_pm_add(struct device *dev)
L
Linus Torvalds 已提交
89 90
{
	pr_debug("PM: Adding info for %s:%s\n",
91
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
92
	mutex_lock(&dpm_list_mtx);
93 94 95
	if (dev->parent && dev->parent->power.in_suspend)
		dev_warn(dev, "parent %s should not be sleeping\n",
			dev_name(dev->parent));
96
	list_add_tail(&dev->power.entry, &dpm_list);
97
	mutex_unlock(&dpm_list_mtx);
L
Linus Torvalds 已提交
98 99
}

100
/**
101 102
 * device_pm_remove - Remove a device from the PM core's list of active devices.
 * @dev: Device to be removed from the list.
103
 */
104
void device_pm_remove(struct device *dev)
L
Linus Torvalds 已提交
105 106
{
	pr_debug("PM: Removing info for %s:%s\n",
107
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
108
	complete_all(&dev->power.completion);
109
	mutex_lock(&dpm_list_mtx);
L
Linus Torvalds 已提交
110
	list_del_init(&dev->power.entry);
111
	mutex_unlock(&dpm_list_mtx);
112
	device_wakeup_disable(dev);
113
	pm_runtime_remove(dev);
114 115
}

116
/**
117 118 119
 * device_pm_move_before - Move device in the PM core's list of active devices.
 * @deva: Device to move in dpm_list.
 * @devb: Device @deva should come before.
120 121 122 123
 */
void device_pm_move_before(struct device *deva, struct device *devb)
{
	pr_debug("PM: Moving %s:%s before %s:%s\n",
124 125
		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
126 127 128 129 130
	/* Delete deva from dpm_list and reinsert before devb. */
	list_move_tail(&deva->power.entry, &devb->power.entry);
}

/**
131 132 133
 * device_pm_move_after - Move device in the PM core's list of active devices.
 * @deva: Device to move in dpm_list.
 * @devb: Device @deva should come after.
134 135 136 137
 */
void device_pm_move_after(struct device *deva, struct device *devb)
{
	pr_debug("PM: Moving %s:%s after %s:%s\n",
138 139
		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
140 141 142 143 144
	/* Delete deva from dpm_list and reinsert after devb. */
	list_move(&deva->power.entry, &devb->power.entry);
}

/**
145 146
 * device_pm_move_last - Move device to end of the PM core's list of devices.
 * @dev: Device to move in dpm_list.
147 148 149 150
 */
void device_pm_move_last(struct device *dev)
{
	pr_debug("PM: Moving %s:%s to end of list\n",
151
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
152 153 154
	list_move_tail(&dev->power.entry, &dpm_list);
}

155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
static ktime_t initcall_debug_start(struct device *dev)
{
	ktime_t calltime = ktime_set(0, 0);

	if (initcall_debug) {
		pr_info("calling  %s+ @ %i\n",
				dev_name(dev), task_pid_nr(current));
		calltime = ktime_get();
	}

	return calltime;
}

static void initcall_debug_report(struct device *dev, ktime_t calltime,
				  int error)
{
	ktime_t delta, rettime;

	if (initcall_debug) {
		rettime = ktime_get();
		delta = ktime_sub(rettime, calltime);
		pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
			error, (unsigned long long)ktime_to_ns(delta) >> 10);
	}
}

181 182 183 184 185 186 187 188 189 190
/**
 * dpm_wait - Wait for a PM operation to complete.
 * @dev: Device to wait for.
 * @async: If unset, wait only if the device's power.async_suspend flag is set.
 */
static void dpm_wait(struct device *dev, bool async)
{
	if (!dev)
		return;

191
	if (async || (pm_async_enabled && dev->power.async_suspend))
192 193 194 195 196 197 198 199 200 201 202 203 204 205
		wait_for_completion(&dev->power.completion);
}

static int dpm_wait_fn(struct device *dev, void *async_ptr)
{
	dpm_wait(dev, *((bool *)async_ptr));
	return 0;
}

static void dpm_wait_for_children(struct device *dev, bool async)
{
       device_for_each_child(dev, &async, dpm_wait_fn);
}

206
/**
207 208 209 210
 * pm_op - Execute the PM operation appropriate for given PM event.
 * @dev: Device to handle.
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
211
 */
212 213 214
static int pm_op(struct device *dev,
		 const struct dev_pm_ops *ops,
		 pm_message_t state)
215 216
{
	int error = 0;
217
	ktime_t calltime;
218

219
	calltime = initcall_debug_start(dev);
220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266

	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
		if (ops->suspend) {
			error = ops->suspend(dev);
			suspend_report_result(ops->suspend, error);
		}
		break;
	case PM_EVENT_RESUME:
		if (ops->resume) {
			error = ops->resume(dev);
			suspend_report_result(ops->resume, error);
		}
		break;
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATION
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		if (ops->freeze) {
			error = ops->freeze(dev);
			suspend_report_result(ops->freeze, error);
		}
		break;
	case PM_EVENT_HIBERNATE:
		if (ops->poweroff) {
			error = ops->poweroff(dev);
			suspend_report_result(ops->poweroff, error);
		}
		break;
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
		if (ops->thaw) {
			error = ops->thaw(dev);
			suspend_report_result(ops->thaw, error);
		}
		break;
	case PM_EVENT_RESTORE:
		if (ops->restore) {
			error = ops->restore(dev);
			suspend_report_result(ops->restore, error);
		}
		break;
#endif /* CONFIG_HIBERNATION */
	default:
		error = -EINVAL;
	}
267

268
	initcall_debug_report(dev, calltime, error);
269

270 271 272 273
	return error;
}

/**
274 275 276 277
 * pm_noirq_op - Execute the PM operation appropriate for given PM event.
 * @dev: Device to handle.
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
278
 *
279 280
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
281
 */
282 283
static int pm_noirq_op(struct device *dev,
			const struct dev_pm_ops *ops,
284 285 286
			pm_message_t state)
{
	int error = 0;
287
	ktime_t calltime = ktime_set(0, 0), delta, rettime;
288 289

	if (initcall_debug) {
290 291 292
		pr_info("calling  %s+ @ %i, parent: %s\n",
				dev_name(dev), task_pid_nr(current),
				dev->parent ? dev_name(dev->parent) : "none");
293 294
		calltime = ktime_get();
	}
295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341

	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
		if (ops->suspend_noirq) {
			error = ops->suspend_noirq(dev);
			suspend_report_result(ops->suspend_noirq, error);
		}
		break;
	case PM_EVENT_RESUME:
		if (ops->resume_noirq) {
			error = ops->resume_noirq(dev);
			suspend_report_result(ops->resume_noirq, error);
		}
		break;
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATION
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		if (ops->freeze_noirq) {
			error = ops->freeze_noirq(dev);
			suspend_report_result(ops->freeze_noirq, error);
		}
		break;
	case PM_EVENT_HIBERNATE:
		if (ops->poweroff_noirq) {
			error = ops->poweroff_noirq(dev);
			suspend_report_result(ops->poweroff_noirq, error);
		}
		break;
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
		if (ops->thaw_noirq) {
			error = ops->thaw_noirq(dev);
			suspend_report_result(ops->thaw_noirq, error);
		}
		break;
	case PM_EVENT_RESTORE:
		if (ops->restore_noirq) {
			error = ops->restore_noirq(dev);
			suspend_report_result(ops->restore_noirq, error);
		}
		break;
#endif /* CONFIG_HIBERNATION */
	default:
		error = -EINVAL;
	}
342 343 344 345

	if (initcall_debug) {
		rettime = ktime_get();
		delta = ktime_sub(rettime, calltime);
346 347 348
		printk("initcall %s_i+ returned %d after %Ld usecs\n",
			dev_name(dev), error,
			(unsigned long long)ktime_to_ns(delta) >> 10);
349 350
	}

351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388
	return error;
}

static char *pm_verb(int event)
{
	switch (event) {
	case PM_EVENT_SUSPEND:
		return "suspend";
	case PM_EVENT_RESUME:
		return "resume";
	case PM_EVENT_FREEZE:
		return "freeze";
	case PM_EVENT_QUIESCE:
		return "quiesce";
	case PM_EVENT_HIBERNATE:
		return "hibernate";
	case PM_EVENT_THAW:
		return "thaw";
	case PM_EVENT_RESTORE:
		return "restore";
	case PM_EVENT_RECOVER:
		return "recover";
	default:
		return "(unknown PM event)";
	}
}

static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
{
	dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
		", may wakeup" : "");
}

static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
			int error)
{
	printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
389
		dev_name(dev), pm_verb(state.event), info, error);
390 391
}

392 393 394
static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
{
	ktime_t calltime;
395
	u64 usecs64;
396 397 398 399 400 401 402 403 404 405 406 407 408
	int usecs;

	calltime = ktime_get();
	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
	do_div(usecs64, NSEC_PER_USEC);
	usecs = usecs64;
	if (usecs == 0)
		usecs = 1;
	pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
		info ?: "", info ? " " : "", pm_verb(state.event),
		usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
}

409 410 411
/*------------------------- Resume routines -------------------------*/

/**
412 413 414
 * device_resume_noirq - Execute an "early resume" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
415
 *
416 417
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
418
 */
419
static int device_resume_noirq(struct device *dev, pm_message_t state)
420 421 422 423 424 425
{
	int error = 0;

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);

426 427 428 429 430
	if (dev->pwr_domain) {
		pm_dev_dbg(dev, state, "EARLY power domain ");
		pm_noirq_op(dev, &dev->pwr_domain->ops, state);
	}

431 432 433
	if (dev->type && dev->type->pm) {
		pm_dev_dbg(dev, state, "EARLY type ");
		error = pm_noirq_op(dev, dev->type->pm, state);
434
	} else if (dev->class && dev->class->pm) {
435 436
		pm_dev_dbg(dev, state, "EARLY class ");
		error = pm_noirq_op(dev, dev->class->pm, state);
437 438 439
	} else if (dev->bus && dev->bus->pm) {
		pm_dev_dbg(dev, state, "EARLY ");
		error = pm_noirq_op(dev, dev->bus->pm, state);
440 441
	}

442 443 444 445 446
	TRACE_RESUME(error);
	return error;
}

/**
447 448
 * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices.
 * @state: PM transition of the system being carried out.
449
 *
450 451
 * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and
 * enable device drivers to receive interrupts.
452
 */
453
void dpm_resume_noirq(pm_message_t state)
454
{
455
	ktime_t starttime = ktime_get();
456

457
	mutex_lock(&dpm_list_mtx);
458 459
	while (!list_empty(&dpm_noirq_list)) {
		struct device *dev = to_device(dpm_noirq_list.next);
460
		int error;
461 462

		get_device(dev);
463 464
		list_move_tail(&dev->power.entry, &dpm_suspended_list);
		mutex_unlock(&dpm_list_mtx);
465

466 467 468
		error = device_resume_noirq(dev, state);
		if (error)
			pm_dev_err(dev, state, " early", error);
469

470
		mutex_lock(&dpm_list_mtx);
471 472
		put_device(dev);
	}
473
	mutex_unlock(&dpm_list_mtx);
474
	dpm_show_time(starttime, state, "early");
475
	resume_device_irqs();
476
}
477
EXPORT_SYMBOL_GPL(dpm_resume_noirq);
478

479 480
/**
 * legacy_resume - Execute a legacy (bus or class) resume callback for device.
R
Randy Dunlap 已提交
481 482
 * @dev: Device to resume.
 * @cb: Resume callback to execute.
483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498
 */
static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
{
	int error;
	ktime_t calltime;

	calltime = initcall_debug_start(dev);

	error = cb(dev);
	suspend_report_result(cb, error);

	initcall_debug_report(dev, calltime, error);

	return error;
}

499
/**
500
 * device_resume - Execute "resume" callbacks for given device.
501 502
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
503
 * @async: If true, the device is being resumed asynchronously.
504
 */
505
static int device_resume(struct device *dev, pm_message_t state, bool async)
506 507 508 509 510
{
	int error = 0;

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);
511

512
	dpm_wait(dev->parent, async);
513
	device_lock(dev);
514

515
	dev->power.in_suspend = false;
516

517 518 519 520 521
	if (dev->pwr_domain) {
		pm_dev_dbg(dev, state, "power domain ");
		pm_op(dev, &dev->pwr_domain->ops, state);
	}

522 523 524 525
	if (dev->type && dev->type->pm) {
		pm_dev_dbg(dev, state, "type ");
		error = pm_op(dev, dev->type->pm, state);
		goto End;
526 527
	}

528 529 530 531
	if (dev->class) {
		if (dev->class->pm) {
			pm_dev_dbg(dev, state, "class ");
			error = pm_op(dev, dev->class->pm, state);
532
			goto End;
533 534
		} else if (dev->class->resume) {
			pm_dev_dbg(dev, state, "legacy class ");
535
			error = legacy_resume(dev, dev->class->resume);
536
			goto End;
537
		}
538
	}
539 540 541 542 543 544 545 546 547 548 549

	if (dev->bus) {
		if (dev->bus->pm) {
			pm_dev_dbg(dev, state, "");
			error = pm_op(dev, dev->bus->pm, state);
		} else if (dev->bus->resume) {
			pm_dev_dbg(dev, state, "legacy ");
			error = legacy_resume(dev, dev->bus->resume);
		}
	}

550
 End:
551
	device_unlock(dev);
552
	complete_all(&dev->power.completion);
553

554 555 556 557
	TRACE_RESUME(error);
	return error;
}

558 559 560 561 562
static void async_resume(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

563
	error = device_resume(dev, pm_transition, true);
564 565 566 567 568
	if (error)
		pm_dev_err(dev, pm_transition, " async", error);
	put_device(dev);
}

569
static bool is_async(struct device *dev)
570
{
571 572
	return dev->power.async_suspend && pm_async_enabled
		&& !pm_trace_is_enabled();
573 574
}

575
/**
576 577
 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
 * @state: PM transition of the system being carried out.
578
 *
579 580
 * Execute the appropriate "resume" callback for all devices whose status
 * indicates that they are suspended.
581 582 583
 */
static void dpm_resume(pm_message_t state)
{
584
	struct device *dev;
585
	ktime_t starttime = ktime_get();
586 587

	mutex_lock(&dpm_list_mtx);
588
	pm_transition = state;
589
	async_error = 0;
590

591
	list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
592 593 594 595 596 597 598
		INIT_COMPLETION(dev->power.completion);
		if (is_async(dev)) {
			get_device(dev);
			async_schedule(async_resume, dev);
		}
	}

599 600
	while (!list_empty(&dpm_suspended_list)) {
		dev = to_device(dpm_suspended_list.next);
601
		get_device(dev);
602
		if (!is_async(dev)) {
603 604 605 606
			int error;

			mutex_unlock(&dpm_list_mtx);

607
			error = device_resume(dev, state, false);
608 609
			if (error)
				pm_dev_err(dev, state, "", error);
610 611

			mutex_lock(&dpm_list_mtx);
612 613
		}
		if (!list_empty(&dev->power.entry))
614
			list_move_tail(&dev->power.entry, &dpm_prepared_list);
615 616 617
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
618
	async_synchronize_full();
619
	dpm_show_time(starttime, state, NULL);
620 621 622
}

/**
623 624 625
 * device_complete - Complete a PM transition for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
626
 */
627
static void device_complete(struct device *dev, pm_message_t state)
628
{
629
	device_lock(dev);
630

631 632 633 634 635
	if (dev->pwr_domain && dev->pwr_domain->ops.complete) {
		pm_dev_dbg(dev, state, "completing power domain ");
		dev->pwr_domain->ops.complete(dev);
	}

636
	if (dev->type && dev->type->pm) {
637
		pm_dev_dbg(dev, state, "completing type ");
638 639 640 641 642 643 644
		if (dev->type->pm->complete)
			dev->type->pm->complete(dev);
	} else if (dev->class && dev->class->pm) {
		pm_dev_dbg(dev, state, "completing class ");
		if (dev->class->pm->complete)
			dev->class->pm->complete(dev);
	} else if (dev->bus && dev->bus->pm) {
645
		pm_dev_dbg(dev, state, "completing ");
646 647
		if (dev->bus->pm->complete)
			dev->bus->pm->complete(dev);
648 649
	}

650
	device_unlock(dev);
651 652 653
}

/**
654 655
 * dpm_complete - Complete a PM transition for all non-sysdev devices.
 * @state: PM transition of the system being carried out.
656
 *
657 658
 * Execute the ->complete() callbacks for all devices whose PM status is not
 * DPM_ON (this allows new devices to be registered).
659
 */
660
static void dpm_complete(pm_message_t state)
661
{
662 663 664
	struct list_head list;

	INIT_LIST_HEAD(&list);
665
	mutex_lock(&dpm_list_mtx);
666 667
	while (!list_empty(&dpm_prepared_list)) {
		struct device *dev = to_device(dpm_prepared_list.prev);
668

669
		get_device(dev);
670
		dev->power.in_suspend = false;
671 672
		list_move(&dev->power.entry, &list);
		mutex_unlock(&dpm_list_mtx);
673

674
		device_complete(dev, state);
675

676
		mutex_lock(&dpm_list_mtx);
677
		put_device(dev);
678
	}
679
	list_splice(&list, &dpm_list);
680 681 682 683
	mutex_unlock(&dpm_list_mtx);
}

/**
684 685
 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
 * @state: PM transition of the system being carried out.
686
 *
687 688
 * Execute "resume" callbacks for all devices and complete the PM transition of
 * the system.
689
 */
690
void dpm_resume_end(pm_message_t state)
691
{
692
	might_sleep();
693 694
	dpm_resume(state);
	dpm_complete(state);
695
}
696
EXPORT_SYMBOL_GPL(dpm_resume_end);
697 698 699 700


/*------------------------- Suspend routines -------------------------*/

701
/**
702 703 704 705 706
 * resume_event - Return a "resume" message for given "suspend" sleep state.
 * @sleep_state: PM message representing a sleep state.
 *
 * Return a PM message representing the resume event corresponding to given
 * sleep state.
707 708
 */
static pm_message_t resume_event(pm_message_t sleep_state)
709
{
710 711 712 713 714 715 716 717
	switch (sleep_state.event) {
	case PM_EVENT_SUSPEND:
		return PMSG_RESUME;
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		return PMSG_RECOVER;
	case PM_EVENT_HIBERNATE:
		return PMSG_RESTORE;
718
	}
719
	return PMSG_ON;
720 721 722
}

/**
723 724 725
 * device_suspend_noirq - Execute a "late suspend" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
726
 *
727 728
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
729
 */
730
static int device_suspend_noirq(struct device *dev, pm_message_t state)
731
{
732
	int error;
733 734 735 736 737

	if (dev->type && dev->type->pm) {
		pm_dev_dbg(dev, state, "LATE type ");
		error = pm_noirq_op(dev, dev->type->pm, state);
		if (error)
738 739 740 741 742 743 744
			return error;
	} else if (dev->class && dev->class->pm) {
		pm_dev_dbg(dev, state, "LATE class ");
		error = pm_noirq_op(dev, dev->class->pm, state);
		if (error)
			return error;
	} else if (dev->bus && dev->bus->pm) {
745 746
		pm_dev_dbg(dev, state, "LATE ");
		error = pm_noirq_op(dev, dev->bus->pm, state);
747
		if (error)
748
			return error;
749 750 751 752 753
	}

	if (dev->pwr_domain) {
		pm_dev_dbg(dev, state, "LATE power domain ");
		pm_noirq_op(dev, &dev->pwr_domain->ops, state);
754
	}
755

756
	return 0;
757 758 759
}

/**
760 761
 * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices.
 * @state: PM transition of the system being carried out.
762
 *
763 764
 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
 * handlers for all non-sysdev devices.
765
 */
766
int dpm_suspend_noirq(pm_message_t state)
767
{
768
	ktime_t starttime = ktime_get();
769 770
	int error = 0;

771
	suspend_device_irqs();
772
	mutex_lock(&dpm_list_mtx);
773 774
	while (!list_empty(&dpm_suspended_list)) {
		struct device *dev = to_device(dpm_suspended_list.prev);
775 776 777 778

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

779
		error = device_suspend_noirq(dev, state);
780 781

		mutex_lock(&dpm_list_mtx);
782
		if (error) {
783
			pm_dev_err(dev, state, " late", error);
784
			put_device(dev);
785 786
			break;
		}
787
		if (!list_empty(&dev->power.entry))
788
			list_move(&dev->power.entry, &dpm_noirq_list);
789
		put_device(dev);
790
	}
791
	mutex_unlock(&dpm_list_mtx);
792
	if (error)
793
		dpm_resume_noirq(resume_event(state));
794 795
	else
		dpm_show_time(starttime, state, "late");
796 797
	return error;
}
798
EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
799

800 801
/**
 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
R
Randy Dunlap 已提交
802 803 804
 * @dev: Device to suspend.
 * @state: PM transition of the system being carried out.
 * @cb: Suspend callback to execute.
805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821
 */
static int legacy_suspend(struct device *dev, pm_message_t state,
			  int (*cb)(struct device *dev, pm_message_t state))
{
	int error;
	ktime_t calltime;

	calltime = initcall_debug_start(dev);

	error = cb(dev, state);
	suspend_report_result(cb, error);

	initcall_debug_report(dev, calltime, error);

	return error;
}

822
/**
823 824 825
 * device_suspend - Execute "suspend" callbacks for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
826
 * @async: If true, the device is being suspended asynchronously.
827
 */
828
static int __device_suspend(struct device *dev, pm_message_t state, bool async)
829 830 831
{
	int error = 0;

832
	dpm_wait_for_children(dev, async);
833
	device_lock(dev);
834

835 836 837
	if (async_error)
		goto End;

838 839 840 841 842
	if (pm_wakeup_pending()) {
		async_error = -EBUSY;
		goto End;
	}

843 844 845 846 847 848
	if (dev->type && dev->type->pm) {
		pm_dev_dbg(dev, state, "type ");
		error = pm_op(dev, dev->type->pm, state);
		goto Domain;
	}

849 850 851 852
	if (dev->class) {
		if (dev->class->pm) {
			pm_dev_dbg(dev, state, "class ");
			error = pm_op(dev, dev->class->pm, state);
853
			goto Domain;
854 855
		} else if (dev->class->suspend) {
			pm_dev_dbg(dev, state, "legacy class ");
856
			error = legacy_suspend(dev, state, dev->class->suspend);
857
			goto Domain;
858
		}
859 860
	}

861 862 863
	if (dev->bus) {
		if (dev->bus->pm) {
			pm_dev_dbg(dev, state, "");
864
			error = pm_op(dev, dev->bus->pm, state);
865 866
		} else if (dev->bus->suspend) {
			pm_dev_dbg(dev, state, "legacy ");
867
			error = legacy_suspend(dev, state, dev->bus->suspend);
868
		}
869 870
	}

871 872
 Domain:
	if (!error && dev->pwr_domain) {
873 874
		pm_dev_dbg(dev, state, "power domain ");
		pm_op(dev, &dev->pwr_domain->ops, state);
875
	}
876

877
 End:
878
	device_unlock(dev);
879
	complete_all(&dev->power.completion);
880

881 882 883
	if (error)
		async_error = error;

884 885 886
	return error;
}

887 888 889 890 891 892
static void async_suspend(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = __device_suspend(dev, pm_transition, true);
893
	if (error)
894 895 896 897 898 899 900 901 902
		pm_dev_err(dev, pm_transition, " async", error);

	put_device(dev);
}

static int device_suspend(struct device *dev)
{
	INIT_COMPLETION(dev->power.completion);

903
	if (pm_async_enabled && dev->power.async_suspend) {
904 905 906 907 908 909 910 911
		get_device(dev);
		async_schedule(async_suspend, dev);
		return 0;
	}

	return __device_suspend(dev, pm_transition, false);
}

912
/**
913 914
 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
 * @state: PM transition of the system being carried out.
915
 */
916
static int dpm_suspend(pm_message_t state)
917
{
918
	ktime_t starttime = ktime_get();
919 920 921
	int error = 0;

	mutex_lock(&dpm_list_mtx);
922 923
	pm_transition = state;
	async_error = 0;
924 925
	while (!list_empty(&dpm_prepared_list)) {
		struct device *dev = to_device(dpm_prepared_list.prev);
926

927
		get_device(dev);
928
		mutex_unlock(&dpm_list_mtx);
929

930
		error = device_suspend(dev);
931

932
		mutex_lock(&dpm_list_mtx);
933
		if (error) {
934 935
			pm_dev_err(dev, state, "", error);
			put_device(dev);
936 937
			break;
		}
938
		if (!list_empty(&dev->power.entry))
939
			list_move(&dev->power.entry, &dpm_suspended_list);
940
		put_device(dev);
941 942
		if (async_error)
			break;
943 944
	}
	mutex_unlock(&dpm_list_mtx);
945 946 947
	async_synchronize_full();
	if (!error)
		error = async_error;
948 949
	if (!error)
		dpm_show_time(starttime, state, NULL);
950 951 952 953
	return error;
}

/**
954 955 956 957 958 959
 * device_prepare - Prepare a device for system power transition.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
 *
 * Execute the ->prepare() callback(s) for given device.  No new children of the
 * device may be registered after this function has returned.
960
 */
961
static int device_prepare(struct device *dev, pm_message_t state)
962 963 964
{
	int error = 0;

965
	device_lock(dev);
966

967
	if (dev->type && dev->type->pm) {
968
		pm_dev_dbg(dev, state, "preparing type ");
969 970
		if (dev->type->pm->prepare)
			error = dev->type->pm->prepare(dev);
971 972 973
		suspend_report_result(dev->type->pm->prepare, error);
		if (error)
			goto End;
974
	} else if (dev->class && dev->class->pm) {
975
		pm_dev_dbg(dev, state, "preparing class ");
976 977
		if (dev->class->pm->prepare)
			error = dev->class->pm->prepare(dev);
978
		suspend_report_result(dev->class->pm->prepare, error);
979 980
		if (error)
			goto End;
981 982 983 984 985 986 987
	} else if (dev->bus && dev->bus->pm) {
		pm_dev_dbg(dev, state, "preparing ");
		if (dev->bus->pm->prepare)
			error = dev->bus->pm->prepare(dev);
		suspend_report_result(dev->bus->pm->prepare, error);
		if (error)
			goto End;
988 989 990 991 992
	}

	if (dev->pwr_domain && dev->pwr_domain->ops.prepare) {
		pm_dev_dbg(dev, state, "preparing power domain ");
		dev->pwr_domain->ops.prepare(dev);
993
	}
994

995
 End:
996
	device_unlock(dev);
997 998 999

	return error;
}
1000

1001
/**
1002 1003
 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
 * @state: PM transition of the system being carried out.
1004
 *
1005
 * Execute the ->prepare() callback(s) for all devices.
1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017
 */
static int dpm_prepare(pm_message_t state)
{
	int error = 0;

	mutex_lock(&dpm_list_mtx);
	while (!list_empty(&dpm_list)) {
		struct device *dev = to_device(dpm_list.next);

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

1018
		pm_runtime_get_noresume(dev);
1019 1020 1021
		if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
			pm_wakeup_event(dev, 0);

1022 1023 1024
		pm_runtime_put_sync(dev);
		error = pm_wakeup_pending() ?
				-EBUSY : device_prepare(dev, state);
1025 1026 1027 1028 1029

		mutex_lock(&dpm_list_mtx);
		if (error) {
			if (error == -EAGAIN) {
				put_device(dev);
S
Sebastian Ott 已提交
1030
				error = 0;
1031 1032
				continue;
			}
1033 1034
			printk(KERN_INFO "PM: Device %s not prepared "
				"for power transition: code %d\n",
1035
				dev_name(dev), error);
1036 1037 1038
			put_device(dev);
			break;
		}
1039
		dev->power.in_suspend = true;
1040
		if (!list_empty(&dev->power.entry))
1041
			list_move_tail(&dev->power.entry, &dpm_prepared_list);
1042 1043 1044
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
1045 1046 1047
	return error;
}

1048
/**
1049 1050
 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
 * @state: PM transition of the system being carried out.
1051
 *
1052 1053
 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
 * callbacks for them.
1054
 */
1055
int dpm_suspend_start(pm_message_t state)
1056 1057
{
	int error;
1058

1059
	might_sleep();
1060 1061 1062
	error = dpm_prepare(state);
	if (!error)
		error = dpm_suspend(state);
1063 1064
	return error;
}
1065
EXPORT_SYMBOL_GPL(dpm_suspend_start);
1066 1067 1068

void __suspend_report_result(const char *function, void *fn, int ret)
{
1069 1070
	if (ret)
		printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1071 1072
}
EXPORT_SYMBOL_GPL(__suspend_report_result);
1073 1074 1075 1076 1077 1078

/**
 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
 * @dev: Device to wait for.
 * @subordinate: Device that needs to wait for @dev.
 */
1079
int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1080 1081
{
	dpm_wait(dev, subordinate->power.async_suspend);
1082
	return async_error;
1083 1084
}
EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);