main.c 25.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/*
 * drivers/base/power/main.c - Where the driver meets power management.
 *
 * Copyright (c) 2003 Patrick Mochel
 * Copyright (c) 2003 Open Source Development Lab
 *
 * This file is released under the GPLv2
 *
 *
 * The driver model core calls device_pm_add() when a device is registered.
11
 * This will initialize the embedded device_pm_info object in the device
L
Linus Torvalds 已提交
12 13 14
 * and add it to the list of power-controlled devices. sysfs entries for
 * controlling device power management will also be added.
 *
15 16 17
 * A separate list is used for keeping track of power info, because the power
 * domain dependencies may differ from the ancestral dependencies that the
 * subsystem list maintains.
L
Linus Torvalds 已提交
18 19 20
 */

#include <linux/device.h>
21
#include <linux/kallsyms.h>
22
#include <linux/mutex.h>
23
#include <linux/pm.h>
24
#include <linux/pm_runtime.h>
25
#include <linux/resume-trace.h>
26
#include <linux/interrupt.h>
27
#include <linux/sched.h>
28
#include <linux/async.h>
29
#include <linux/suspend.h>
30

31
#include "../base.h"
L
Linus Torvalds 已提交
32 33
#include "power.h"

34
/*
35
 * The entries in the dpm_list list are in a depth first order, simply
36 37 38
 * because children are guaranteed to be discovered after parents, and
 * are inserted at the back of the list on discovery.
 *
39 40
 * Since device_pm_add() may be called with a device lock held,
 * we must never try to acquire a device lock while holding
41 42 43
 * dpm_list_mutex.
 */

44
LIST_HEAD(dpm_list);
45 46 47
LIST_HEAD(dpm_prepared_list);
LIST_HEAD(dpm_suspended_list);
LIST_HEAD(dpm_noirq_list);
L
Linus Torvalds 已提交
48

49
static DEFINE_MUTEX(dpm_list_mtx);
50
static pm_message_t pm_transition;
L
Linus Torvalds 已提交
51

52 53
static int async_error;

54
/**
55
 * device_pm_init - Initialize the PM-related part of a device object.
56 57 58 59
 * @dev: Device object being initialized.
 */
void device_pm_init(struct device *dev)
{
60
	dev->power.in_suspend = false;
61
	init_completion(&dev->power.completion);
62
	complete_all(&dev->power.completion);
63 64
	dev->power.wakeup = NULL;
	spin_lock_init(&dev->power.lock);
65 66 67
	pm_runtime_init(dev);
}

68
/**
69
 * device_pm_lock - Lock the list of active devices used by the PM core.
70 71 72 73 74 75 76
 */
void device_pm_lock(void)
{
	mutex_lock(&dpm_list_mtx);
}

/**
77
 * device_pm_unlock - Unlock the list of active devices used by the PM core.
78 79 80 81 82
 */
void device_pm_unlock(void)
{
	mutex_unlock(&dpm_list_mtx);
}
83

84
/**
85 86
 * device_pm_add - Add a device to the PM core's list of active devices.
 * @dev: Device to add to the list.
87
 */
88
void device_pm_add(struct device *dev)
L
Linus Torvalds 已提交
89 90
{
	pr_debug("PM: Adding info for %s:%s\n",
91
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
92
	mutex_lock(&dpm_list_mtx);
93 94 95
	if (dev->parent && dev->parent->power.in_suspend)
		dev_warn(dev, "parent %s should not be sleeping\n",
			dev_name(dev->parent));
96
	list_add_tail(&dev->power.entry, &dpm_list);
97
	mutex_unlock(&dpm_list_mtx);
L
Linus Torvalds 已提交
98 99
}

100
/**
101 102
 * device_pm_remove - Remove a device from the PM core's list of active devices.
 * @dev: Device to be removed from the list.
103
 */
104
void device_pm_remove(struct device *dev)
L
Linus Torvalds 已提交
105 106
{
	pr_debug("PM: Removing info for %s:%s\n",
107
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
108
	complete_all(&dev->power.completion);
109
	mutex_lock(&dpm_list_mtx);
L
Linus Torvalds 已提交
110
	list_del_init(&dev->power.entry);
111
	mutex_unlock(&dpm_list_mtx);
112
	device_wakeup_disable(dev);
113
	pm_runtime_remove(dev);
114 115
}

116
/**
117 118 119
 * device_pm_move_before - Move device in the PM core's list of active devices.
 * @deva: Device to move in dpm_list.
 * @devb: Device @deva should come before.
120 121 122 123
 */
void device_pm_move_before(struct device *deva, struct device *devb)
{
	pr_debug("PM: Moving %s:%s before %s:%s\n",
124 125
		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
126 127 128 129 130
	/* Delete deva from dpm_list and reinsert before devb. */
	list_move_tail(&deva->power.entry, &devb->power.entry);
}

/**
131 132 133
 * device_pm_move_after - Move device in the PM core's list of active devices.
 * @deva: Device to move in dpm_list.
 * @devb: Device @deva should come after.
134 135 136 137
 */
void device_pm_move_after(struct device *deva, struct device *devb)
{
	pr_debug("PM: Moving %s:%s after %s:%s\n",
138 139
		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
140 141 142 143 144
	/* Delete deva from dpm_list and reinsert after devb. */
	list_move(&deva->power.entry, &devb->power.entry);
}

/**
145 146
 * device_pm_move_last - Move device to end of the PM core's list of devices.
 * @dev: Device to move in dpm_list.
147 148 149 150
 */
void device_pm_move_last(struct device *dev)
{
	pr_debug("PM: Moving %s:%s to end of list\n",
151
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
152 153 154
	list_move_tail(&dev->power.entry, &dpm_list);
}

155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
static ktime_t initcall_debug_start(struct device *dev)
{
	ktime_t calltime = ktime_set(0, 0);

	if (initcall_debug) {
		pr_info("calling  %s+ @ %i\n",
				dev_name(dev), task_pid_nr(current));
		calltime = ktime_get();
	}

	return calltime;
}

static void initcall_debug_report(struct device *dev, ktime_t calltime,
				  int error)
{
	ktime_t delta, rettime;

	if (initcall_debug) {
		rettime = ktime_get();
		delta = ktime_sub(rettime, calltime);
		pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
			error, (unsigned long long)ktime_to_ns(delta) >> 10);
	}
}

181 182 183 184 185 186 187 188 189 190
/**
 * dpm_wait - Wait for a PM operation to complete.
 * @dev: Device to wait for.
 * @async: If unset, wait only if the device's power.async_suspend flag is set.
 */
static void dpm_wait(struct device *dev, bool async)
{
	if (!dev)
		return;

191
	if (async || (pm_async_enabled && dev->power.async_suspend))
192 193 194 195 196 197 198 199 200 201 202 203 204 205
		wait_for_completion(&dev->power.completion);
}

static int dpm_wait_fn(struct device *dev, void *async_ptr)
{
	dpm_wait(dev, *((bool *)async_ptr));
	return 0;
}

static void dpm_wait_for_children(struct device *dev, bool async)
{
       device_for_each_child(dev, &async, dpm_wait_fn);
}

206
/**
207 208 209 210
 * pm_op - Execute the PM operation appropriate for given PM event.
 * @dev: Device to handle.
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
211
 */
212 213 214
static int pm_op(struct device *dev,
		 const struct dev_pm_ops *ops,
		 pm_message_t state)
215 216
{
	int error = 0;
217
	ktime_t calltime;
218

219
	calltime = initcall_debug_start(dev);
220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266

	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
		if (ops->suspend) {
			error = ops->suspend(dev);
			suspend_report_result(ops->suspend, error);
		}
		break;
	case PM_EVENT_RESUME:
		if (ops->resume) {
			error = ops->resume(dev);
			suspend_report_result(ops->resume, error);
		}
		break;
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATION
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		if (ops->freeze) {
			error = ops->freeze(dev);
			suspend_report_result(ops->freeze, error);
		}
		break;
	case PM_EVENT_HIBERNATE:
		if (ops->poweroff) {
			error = ops->poweroff(dev);
			suspend_report_result(ops->poweroff, error);
		}
		break;
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
		if (ops->thaw) {
			error = ops->thaw(dev);
			suspend_report_result(ops->thaw, error);
		}
		break;
	case PM_EVENT_RESTORE:
		if (ops->restore) {
			error = ops->restore(dev);
			suspend_report_result(ops->restore, error);
		}
		break;
#endif /* CONFIG_HIBERNATION */
	default:
		error = -EINVAL;
	}
267

268
	initcall_debug_report(dev, calltime, error);
269

270 271 272 273
	return error;
}

/**
274 275 276 277
 * pm_noirq_op - Execute the PM operation appropriate for given PM event.
 * @dev: Device to handle.
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
278
 *
279 280
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
281
 */
282 283
static int pm_noirq_op(struct device *dev,
			const struct dev_pm_ops *ops,
284 285 286
			pm_message_t state)
{
	int error = 0;
287
	ktime_t calltime = ktime_set(0, 0), delta, rettime;
288 289

	if (initcall_debug) {
290 291 292
		pr_info("calling  %s+ @ %i, parent: %s\n",
				dev_name(dev), task_pid_nr(current),
				dev->parent ? dev_name(dev->parent) : "none");
293 294
		calltime = ktime_get();
	}
295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341

	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
		if (ops->suspend_noirq) {
			error = ops->suspend_noirq(dev);
			suspend_report_result(ops->suspend_noirq, error);
		}
		break;
	case PM_EVENT_RESUME:
		if (ops->resume_noirq) {
			error = ops->resume_noirq(dev);
			suspend_report_result(ops->resume_noirq, error);
		}
		break;
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATION
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		if (ops->freeze_noirq) {
			error = ops->freeze_noirq(dev);
			suspend_report_result(ops->freeze_noirq, error);
		}
		break;
	case PM_EVENT_HIBERNATE:
		if (ops->poweroff_noirq) {
			error = ops->poweroff_noirq(dev);
			suspend_report_result(ops->poweroff_noirq, error);
		}
		break;
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
		if (ops->thaw_noirq) {
			error = ops->thaw_noirq(dev);
			suspend_report_result(ops->thaw_noirq, error);
		}
		break;
	case PM_EVENT_RESTORE:
		if (ops->restore_noirq) {
			error = ops->restore_noirq(dev);
			suspend_report_result(ops->restore_noirq, error);
		}
		break;
#endif /* CONFIG_HIBERNATION */
	default:
		error = -EINVAL;
	}
342 343 344 345

	if (initcall_debug) {
		rettime = ktime_get();
		delta = ktime_sub(rettime, calltime);
346 347 348
		printk("initcall %s_i+ returned %d after %Ld usecs\n",
			dev_name(dev), error,
			(unsigned long long)ktime_to_ns(delta) >> 10);
349 350
	}

351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388
	return error;
}

static char *pm_verb(int event)
{
	switch (event) {
	case PM_EVENT_SUSPEND:
		return "suspend";
	case PM_EVENT_RESUME:
		return "resume";
	case PM_EVENT_FREEZE:
		return "freeze";
	case PM_EVENT_QUIESCE:
		return "quiesce";
	case PM_EVENT_HIBERNATE:
		return "hibernate";
	case PM_EVENT_THAW:
		return "thaw";
	case PM_EVENT_RESTORE:
		return "restore";
	case PM_EVENT_RECOVER:
		return "recover";
	default:
		return "(unknown PM event)";
	}
}

static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
{
	dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
		", may wakeup" : "");
}

static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
			int error)
{
	printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
389
		dev_name(dev), pm_verb(state.event), info, error);
390 391
}

392 393 394
static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
{
	ktime_t calltime;
395
	u64 usecs64;
396 397 398 399 400 401 402 403 404 405 406 407 408
	int usecs;

	calltime = ktime_get();
	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
	do_div(usecs64, NSEC_PER_USEC);
	usecs = usecs64;
	if (usecs == 0)
		usecs = 1;
	pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
		info ?: "", info ? " " : "", pm_verb(state.event),
		usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
}

409 410 411
/*------------------------- Resume routines -------------------------*/

/**
412 413 414
 * device_resume_noirq - Execute an "early resume" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
415
 *
416 417
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
418
 */
419
static int device_resume_noirq(struct device *dev, pm_message_t state)
420 421 422 423 424 425
{
	int error = 0;

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);

426
	if (dev->bus && dev->bus->pm) {
427 428
		pm_dev_dbg(dev, state, "EARLY ");
		error = pm_noirq_op(dev, dev->bus->pm, state);
429 430
		if (error)
			goto End;
431
	}
432

433 434 435 436 437 438 439 440 441 442 443 444 445
	if (dev->type && dev->type->pm) {
		pm_dev_dbg(dev, state, "EARLY type ");
		error = pm_noirq_op(dev, dev->type->pm, state);
		if (error)
			goto End;
	}

	if (dev->class && dev->class->pm) {
		pm_dev_dbg(dev, state, "EARLY class ");
		error = pm_noirq_op(dev, dev->class->pm, state);
	}

End:
446 447 448 449 450
	TRACE_RESUME(error);
	return error;
}

/**
451 452
 * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices.
 * @state: PM transition of the system being carried out.
453
 *
454 455
 * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and
 * enable device drivers to receive interrupts.
456
 */
457
void dpm_resume_noirq(pm_message_t state)
458
{
459
	ktime_t starttime = ktime_get();
460

461
	mutex_lock(&dpm_list_mtx);
462 463
	while (!list_empty(&dpm_noirq_list)) {
		struct device *dev = to_device(dpm_noirq_list.next);
464
		int error;
465 466

		get_device(dev);
467 468
		list_move_tail(&dev->power.entry, &dpm_suspended_list);
		mutex_unlock(&dpm_list_mtx);
469

470 471 472
		error = device_resume_noirq(dev, state);
		if (error)
			pm_dev_err(dev, state, " early", error);
473

474
		mutex_lock(&dpm_list_mtx);
475 476
		put_device(dev);
	}
477
	mutex_unlock(&dpm_list_mtx);
478
	dpm_show_time(starttime, state, "early");
479
	resume_device_irqs();
480
}
481
EXPORT_SYMBOL_GPL(dpm_resume_noirq);
482

483 484
/**
 * legacy_resume - Execute a legacy (bus or class) resume callback for device.
R
Randy Dunlap 已提交
485 486
 * @dev: Device to resume.
 * @cb: Resume callback to execute.
487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502
 */
static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
{
	int error;
	ktime_t calltime;

	calltime = initcall_debug_start(dev);

	error = cb(dev);
	suspend_report_result(cb, error);

	initcall_debug_report(dev, calltime, error);

	return error;
}

503
/**
504
 * device_resume - Execute "resume" callbacks for given device.
505 506
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
507
 * @async: If true, the device is being resumed asynchronously.
508
 */
509
static int device_resume(struct device *dev, pm_message_t state, bool async)
510 511 512 513 514
{
	int error = 0;

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);
515

516
	dpm_wait(dev->parent, async);
517
	device_lock(dev);
518

519
	dev->power.in_suspend = false;
520

521 522 523
	if (dev->bus) {
		if (dev->bus->pm) {
			pm_dev_dbg(dev, state, "");
524
			error = pm_op(dev, dev->bus->pm, state);
525 526
		} else if (dev->bus->resume) {
			pm_dev_dbg(dev, state, "legacy ");
527
			error = legacy_resume(dev, dev->bus->resume);
528 529 530
		}
		if (error)
			goto End;
531 532
	}

533 534 535 536 537 538 539
	if (dev->type) {
		if (dev->type->pm) {
			pm_dev_dbg(dev, state, "type ");
			error = pm_op(dev, dev->type->pm, state);
		}
		if (error)
			goto End;
540 541
	}

542 543 544 545 546 547
	if (dev->class) {
		if (dev->class->pm) {
			pm_dev_dbg(dev, state, "class ");
			error = pm_op(dev, dev->class->pm, state);
		} else if (dev->class->resume) {
			pm_dev_dbg(dev, state, "legacy class ");
548
			error = legacy_resume(dev, dev->class->resume);
549
		}
550
	}
551
 End:
552
	device_unlock(dev);
553
	complete_all(&dev->power.completion);
554

555 556 557 558
	TRACE_RESUME(error);
	return error;
}

559 560 561 562 563
static void async_resume(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

564
	error = device_resume(dev, pm_transition, true);
565 566 567 568 569
	if (error)
		pm_dev_err(dev, pm_transition, " async", error);
	put_device(dev);
}

570
static bool is_async(struct device *dev)
571
{
572 573
	return dev->power.async_suspend && pm_async_enabled
		&& !pm_trace_is_enabled();
574 575
}

576
/**
577 578
 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
 * @state: PM transition of the system being carried out.
579
 *
580 581
 * Execute the appropriate "resume" callback for all devices whose status
 * indicates that they are suspended.
582 583 584
 */
static void dpm_resume(pm_message_t state)
{
585
	struct device *dev;
586
	ktime_t starttime = ktime_get();
587 588

	mutex_lock(&dpm_list_mtx);
589
	pm_transition = state;
590
	async_error = 0;
591

592
	list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
593 594 595 596 597 598 599
		INIT_COMPLETION(dev->power.completion);
		if (is_async(dev)) {
			get_device(dev);
			async_schedule(async_resume, dev);
		}
	}

600 601
	while (!list_empty(&dpm_suspended_list)) {
		dev = to_device(dpm_suspended_list.next);
602
		get_device(dev);
603
		if (!is_async(dev)) {
604 605 606 607
			int error;

			mutex_unlock(&dpm_list_mtx);

608
			error = device_resume(dev, state, false);
609 610
			if (error)
				pm_dev_err(dev, state, "", error);
611 612

			mutex_lock(&dpm_list_mtx);
613 614
		}
		if (!list_empty(&dev->power.entry))
615
			list_move_tail(&dev->power.entry, &dpm_prepared_list);
616 617 618
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
619
	async_synchronize_full();
620
	dpm_show_time(starttime, state, NULL);
621 622 623
}

/**
624 625 626
 * device_complete - Complete a PM transition for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
627
 */
628
static void device_complete(struct device *dev, pm_message_t state)
629
{
630
	device_lock(dev);
631 632 633 634 635 636 637 638 639 640 641

	if (dev->class && dev->class->pm && dev->class->pm->complete) {
		pm_dev_dbg(dev, state, "completing class ");
		dev->class->pm->complete(dev);
	}

	if (dev->type && dev->type->pm && dev->type->pm->complete) {
		pm_dev_dbg(dev, state, "completing type ");
		dev->type->pm->complete(dev);
	}

642
	if (dev->bus && dev->bus->pm && dev->bus->pm->complete) {
643
		pm_dev_dbg(dev, state, "completing ");
644
		dev->bus->pm->complete(dev);
645 646
	}

647
	device_unlock(dev);
648 649 650
}

/**
651 652
 * dpm_complete - Complete a PM transition for all non-sysdev devices.
 * @state: PM transition of the system being carried out.
653
 *
654 655
 * Execute the ->complete() callbacks for all devices whose PM status is not
 * DPM_ON (this allows new devices to be registered).
656
 */
657
static void dpm_complete(pm_message_t state)
658
{
659 660 661
	struct list_head list;

	INIT_LIST_HEAD(&list);
662
	mutex_lock(&dpm_list_mtx);
663 664
	while (!list_empty(&dpm_prepared_list)) {
		struct device *dev = to_device(dpm_prepared_list.prev);
665

666
		get_device(dev);
667
		dev->power.in_suspend = false;
668 669
		list_move(&dev->power.entry, &list);
		mutex_unlock(&dpm_list_mtx);
670

671
		device_complete(dev, state);
672

673
		mutex_lock(&dpm_list_mtx);
674
		put_device(dev);
675
	}
676
	list_splice(&list, &dpm_list);
677 678 679 680
	mutex_unlock(&dpm_list_mtx);
}

/**
681 682
 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
 * @state: PM transition of the system being carried out.
683
 *
684 685
 * Execute "resume" callbacks for all devices and complete the PM transition of
 * the system.
686
 */
687
void dpm_resume_end(pm_message_t state)
688
{
689
	might_sleep();
690 691
	dpm_resume(state);
	dpm_complete(state);
692
}
693
EXPORT_SYMBOL_GPL(dpm_resume_end);
694 695 696 697


/*------------------------- Suspend routines -------------------------*/

698
/**
699 700 701 702 703
 * resume_event - Return a "resume" message for given "suspend" sleep state.
 * @sleep_state: PM message representing a sleep state.
 *
 * Return a PM message representing the resume event corresponding to given
 * sleep state.
704 705
 */
static pm_message_t resume_event(pm_message_t sleep_state)
706
{
707 708 709 710 711 712 713 714
	switch (sleep_state.event) {
	case PM_EVENT_SUSPEND:
		return PMSG_RESUME;
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		return PMSG_RECOVER;
	case PM_EVENT_HIBERNATE:
		return PMSG_RESTORE;
715
	}
716
	return PMSG_ON;
717 718 719
}

/**
720 721 722
 * device_suspend_noirq - Execute a "late suspend" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
723
 *
724 725
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
726
 */
727
static int device_suspend_noirq(struct device *dev, pm_message_t state)
728 729
{
	int error = 0;
730

731 732 733 734 735 736 737 738 739 740 741 742 743 744
	if (dev->class && dev->class->pm) {
		pm_dev_dbg(dev, state, "LATE class ");
		error = pm_noirq_op(dev, dev->class->pm, state);
		if (error)
			goto End;
	}

	if (dev->type && dev->type->pm) {
		pm_dev_dbg(dev, state, "LATE type ");
		error = pm_noirq_op(dev, dev->type->pm, state);
		if (error)
			goto End;
	}

745
	if (dev->bus && dev->bus->pm) {
746 747
		pm_dev_dbg(dev, state, "LATE ");
		error = pm_noirq_op(dev, dev->bus->pm, state);
748
	}
749 750

End:
751 752 753 754
	return error;
}

/**
755 756
 * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices.
 * @state: PM transition of the system being carried out.
757
 *
758 759
 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
 * handlers for all non-sysdev devices.
760
 */
761
int dpm_suspend_noirq(pm_message_t state)
762
{
763
	ktime_t starttime = ktime_get();
764 765
	int error = 0;

766
	suspend_device_irqs();
767
	mutex_lock(&dpm_list_mtx);
768 769
	while (!list_empty(&dpm_suspended_list)) {
		struct device *dev = to_device(dpm_suspended_list.prev);
770 771 772 773

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

774
		error = device_suspend_noirq(dev, state);
775 776

		mutex_lock(&dpm_list_mtx);
777
		if (error) {
778
			pm_dev_err(dev, state, " late", error);
779
			put_device(dev);
780 781
			break;
		}
782
		if (!list_empty(&dev->power.entry))
783
			list_move(&dev->power.entry, &dpm_noirq_list);
784
		put_device(dev);
785
	}
786
	mutex_unlock(&dpm_list_mtx);
787
	if (error)
788
		dpm_resume_noirq(resume_event(state));
789 790
	else
		dpm_show_time(starttime, state, "late");
791 792
	return error;
}
793
EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
794

795 796
/**
 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
R
Randy Dunlap 已提交
797 798 799
 * @dev: Device to suspend.
 * @state: PM transition of the system being carried out.
 * @cb: Suspend callback to execute.
800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816
 */
static int legacy_suspend(struct device *dev, pm_message_t state,
			  int (*cb)(struct device *dev, pm_message_t state))
{
	int error;
	ktime_t calltime;

	calltime = initcall_debug_start(dev);

	error = cb(dev, state);
	suspend_report_result(cb, error);

	initcall_debug_report(dev, calltime, error);

	return error;
}

817
/**
818 819 820
 * device_suspend - Execute "suspend" callbacks for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
821
 * @async: If true, the device is being suspended asynchronously.
822
 */
823
static int __device_suspend(struct device *dev, pm_message_t state, bool async)
824 825 826
{
	int error = 0;

827
	dpm_wait_for_children(dev, async);
828
	device_lock(dev);
829

830 831 832
	if (async_error)
		goto End;

833 834 835 836 837
	if (pm_wakeup_pending()) {
		async_error = -EBUSY;
		goto End;
	}

838 839 840 841 842 843
	if (dev->class) {
		if (dev->class->pm) {
			pm_dev_dbg(dev, state, "class ");
			error = pm_op(dev, dev->class->pm, state);
		} else if (dev->class->suspend) {
			pm_dev_dbg(dev, state, "legacy class ");
844
			error = legacy_suspend(dev, state, dev->class->suspend);
845 846 847
		}
		if (error)
			goto End;
848 849
	}

850 851 852 853 854 855 856
	if (dev->type) {
		if (dev->type->pm) {
			pm_dev_dbg(dev, state, "type ");
			error = pm_op(dev, dev->type->pm, state);
		}
		if (error)
			goto End;
857 858
	}

859 860 861
	if (dev->bus) {
		if (dev->bus->pm) {
			pm_dev_dbg(dev, state, "");
862
			error = pm_op(dev, dev->bus->pm, state);
863 864
		} else if (dev->bus->suspend) {
			pm_dev_dbg(dev, state, "legacy ");
865
			error = legacy_suspend(dev, state, dev->bus->suspend);
866
		}
867
	}
868

869
 End:
870
	device_unlock(dev);
871
	complete_all(&dev->power.completion);
872

873 874 875
	if (error)
		async_error = error;

876 877 878
	return error;
}

879 880 881 882 883 884
static void async_suspend(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = __device_suspend(dev, pm_transition, true);
885
	if (error)
886 887 888 889 890 891 892 893 894
		pm_dev_err(dev, pm_transition, " async", error);

	put_device(dev);
}

static int device_suspend(struct device *dev)
{
	INIT_COMPLETION(dev->power.completion);

895
	if (pm_async_enabled && dev->power.async_suspend) {
896 897 898 899 900 901 902 903
		get_device(dev);
		async_schedule(async_suspend, dev);
		return 0;
	}

	return __device_suspend(dev, pm_transition, false);
}

904
/**
905 906
 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
 * @state: PM transition of the system being carried out.
907
 */
908
static int dpm_suspend(pm_message_t state)
909
{
910
	ktime_t starttime = ktime_get();
911 912 913
	int error = 0;

	mutex_lock(&dpm_list_mtx);
914 915
	pm_transition = state;
	async_error = 0;
916 917
	while (!list_empty(&dpm_prepared_list)) {
		struct device *dev = to_device(dpm_prepared_list.prev);
918

919
		get_device(dev);
920
		mutex_unlock(&dpm_list_mtx);
921

922
		error = device_suspend(dev);
923

924
		mutex_lock(&dpm_list_mtx);
925
		if (error) {
926 927
			pm_dev_err(dev, state, "", error);
			put_device(dev);
928 929
			break;
		}
930
		if (!list_empty(&dev->power.entry))
931
			list_move(&dev->power.entry, &dpm_suspended_list);
932
		put_device(dev);
933 934
		if (async_error)
			break;
935 936
	}
	mutex_unlock(&dpm_list_mtx);
937 938 939
	async_synchronize_full();
	if (!error)
		error = async_error;
940 941
	if (!error)
		dpm_show_time(starttime, state, NULL);
942 943 944 945
	return error;
}

/**
946 947 948 949 950 951
 * device_prepare - Prepare a device for system power transition.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
 *
 * Execute the ->prepare() callback(s) for given device.  No new children of the
 * device may be registered after this function has returned.
952
 */
953
static int device_prepare(struct device *dev, pm_message_t state)
954 955 956
{
	int error = 0;

957
	device_lock(dev);
958

959
	if (dev->bus && dev->bus->pm && dev->bus->pm->prepare) {
960
		pm_dev_dbg(dev, state, "preparing ");
961 962
		error = dev->bus->pm->prepare(dev);
		suspend_report_result(dev->bus->pm->prepare, error);
963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980
		if (error)
			goto End;
	}

	if (dev->type && dev->type->pm && dev->type->pm->prepare) {
		pm_dev_dbg(dev, state, "preparing type ");
		error = dev->type->pm->prepare(dev);
		suspend_report_result(dev->type->pm->prepare, error);
		if (error)
			goto End;
	}

	if (dev->class && dev->class->pm && dev->class->pm->prepare) {
		pm_dev_dbg(dev, state, "preparing class ");
		error = dev->class->pm->prepare(dev);
		suspend_report_result(dev->class->pm->prepare, error);
	}
 End:
981
	device_unlock(dev);
982 983 984

	return error;
}
985

986
/**
987 988
 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
 * @state: PM transition of the system being carried out.
989
 *
990
 * Execute the ->prepare() callback(s) for all devices.
991 992 993 994 995 996 997 998 999 1000 1001 1002
 */
static int dpm_prepare(pm_message_t state)
{
	int error = 0;

	mutex_lock(&dpm_list_mtx);
	while (!list_empty(&dpm_list)) {
		struct device *dev = to_device(dpm_list.next);

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

1003
		pm_runtime_get_noresume(dev);
1004 1005 1006
		if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
			pm_wakeup_event(dev, 0);

1007 1008 1009
		pm_runtime_put_sync(dev);
		error = pm_wakeup_pending() ?
				-EBUSY : device_prepare(dev, state);
1010 1011 1012 1013 1014

		mutex_lock(&dpm_list_mtx);
		if (error) {
			if (error == -EAGAIN) {
				put_device(dev);
S
Sebastian Ott 已提交
1015
				error = 0;
1016 1017
				continue;
			}
1018 1019
			printk(KERN_INFO "PM: Device %s not prepared "
				"for power transition: code %d\n",
1020
				dev_name(dev), error);
1021 1022 1023
			put_device(dev);
			break;
		}
1024
		dev->power.in_suspend = true;
1025
		if (!list_empty(&dev->power.entry))
1026
			list_move_tail(&dev->power.entry, &dpm_prepared_list);
1027 1028 1029
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
1030 1031 1032
	return error;
}

1033
/**
1034 1035
 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
 * @state: PM transition of the system being carried out.
1036
 *
1037 1038
 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
 * callbacks for them.
1039
 */
1040
int dpm_suspend_start(pm_message_t state)
1041 1042
{
	int error;
1043

1044
	might_sleep();
1045 1046 1047
	error = dpm_prepare(state);
	if (!error)
		error = dpm_suspend(state);
1048 1049
	return error;
}
1050
EXPORT_SYMBOL_GPL(dpm_suspend_start);
1051 1052 1053

void __suspend_report_result(const char *function, void *fn, int ret)
{
1054 1055
	if (ret)
		printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1056 1057
}
EXPORT_SYMBOL_GPL(__suspend_report_result);
1058 1059 1060 1061 1062 1063

/**
 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
 * @dev: Device to wait for.
 * @subordinate: Device that needs to wait for @dev.
 */
1064
int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1065 1066
{
	dpm_wait(dev, subordinate->power.async_suspend);
1067
	return async_error;
1068 1069
}
EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);