main.c 25.8 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 * drivers/base/power/main.c - Where the driver meets power management.
 *
 * Copyright (c) 2003 Patrick Mochel
 * Copyright (c) 2003 Open Source Development Lab
 *
 * This file is released under the GPLv2
 *
 *
 * The driver model core calls device_pm_add() when a device is registered.
 * This will intialize the embedded device_pm_info object in the device
 * and add it to the list of power-controlled devices. sysfs entries for
 * controlling device power management will also be added.
 *
15 16 17
 * A separate list is used for keeping track of power info, because the power
 * domain dependencies may differ from the ancestral dependencies that the
 * subsystem list maintains.
L
Linus Torvalds 已提交
18 19 20
 */

#include <linux/device.h>
21
#include <linux/kallsyms.h>
22
#include <linux/mutex.h>
23
#include <linux/pm.h>
24
#include <linux/pm_runtime.h>
25
#include <linux/resume-trace.h>
26
#include <linux/interrupt.h>
27
#include <linux/sched.h>
28
#include <linux/async.h>
29
#include <linux/suspend.h>
30

31
#include "../base.h"
L
Linus Torvalds 已提交
32 33
#include "power.h"

34
/*
35
 * The entries in the dpm_list list are in a depth first order, simply
36 37 38
 * because children are guaranteed to be discovered after parents, and
 * are inserted at the back of the list on discovery.
 *
39 40
 * Since device_pm_add() may be called with a device lock held,
 * we must never try to acquire a device lock while holding
41 42 43
 * dpm_list_mutex.
 */

44
LIST_HEAD(dpm_list);
45 46 47
LIST_HEAD(dpm_prepared_list);
LIST_HEAD(dpm_suspended_list);
LIST_HEAD(dpm_noirq_list);
L
Linus Torvalds 已提交
48

49
static DEFINE_MUTEX(dpm_list_mtx);
50
static pm_message_t pm_transition;
L
Linus Torvalds 已提交
51

52 53
static int async_error;

54
/**
55
 * device_pm_init - Initialize the PM-related part of a device object.
56 57 58 59
 * @dev: Device object being initialized.
 */
void device_pm_init(struct device *dev)
{
60
	dev->power.in_suspend = false;
61
	init_completion(&dev->power.completion);
62
	complete_all(&dev->power.completion);
63 64
	dev->power.wakeup = NULL;
	spin_lock_init(&dev->power.lock);
65 66 67
	pm_runtime_init(dev);
}

68
/**
69
 * device_pm_lock - Lock the list of active devices used by the PM core.
70 71 72 73 74 75 76
 */
void device_pm_lock(void)
{
	mutex_lock(&dpm_list_mtx);
}

/**
77
 * device_pm_unlock - Unlock the list of active devices used by the PM core.
78 79 80 81 82
 */
void device_pm_unlock(void)
{
	mutex_unlock(&dpm_list_mtx);
}
83

84
/**
85 86
 * device_pm_add - Add a device to the PM core's list of active devices.
 * @dev: Device to add to the list.
87
 */
88
void device_pm_add(struct device *dev)
L
Linus Torvalds 已提交
89 90
{
	pr_debug("PM: Adding info for %s:%s\n",
91
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
92
	mutex_lock(&dpm_list_mtx);
93 94 95
	if (dev->parent && dev->parent->power.in_suspend)
		dev_warn(dev, "parent %s should not be sleeping\n",
			dev_name(dev->parent));
96
	list_add_tail(&dev->power.entry, &dpm_list);
97
	mutex_unlock(&dpm_list_mtx);
L
Linus Torvalds 已提交
98 99
}

100
/**
101 102
 * device_pm_remove - Remove a device from the PM core's list of active devices.
 * @dev: Device to be removed from the list.
103
 */
104
void device_pm_remove(struct device *dev)
L
Linus Torvalds 已提交
105 106
{
	pr_debug("PM: Removing info for %s:%s\n",
107
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
108
	complete_all(&dev->power.completion);
109
	mutex_lock(&dpm_list_mtx);
L
Linus Torvalds 已提交
110
	list_del_init(&dev->power.entry);
111
	mutex_unlock(&dpm_list_mtx);
112
	device_wakeup_disable(dev);
113
	pm_runtime_remove(dev);
114 115
}

116
/**
117 118 119
 * device_pm_move_before - Move device in the PM core's list of active devices.
 * @deva: Device to move in dpm_list.
 * @devb: Device @deva should come before.
120 121 122 123
 */
void device_pm_move_before(struct device *deva, struct device *devb)
{
	pr_debug("PM: Moving %s:%s before %s:%s\n",
124 125
		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
126 127 128 129 130
	/* Delete deva from dpm_list and reinsert before devb. */
	list_move_tail(&deva->power.entry, &devb->power.entry);
}

/**
131 132 133
 * device_pm_move_after - Move device in the PM core's list of active devices.
 * @deva: Device to move in dpm_list.
 * @devb: Device @deva should come after.
134 135 136 137
 */
void device_pm_move_after(struct device *deva, struct device *devb)
{
	pr_debug("PM: Moving %s:%s after %s:%s\n",
138 139
		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
140 141 142 143 144
	/* Delete deva from dpm_list and reinsert after devb. */
	list_move(&deva->power.entry, &devb->power.entry);
}

/**
145 146
 * device_pm_move_last - Move device to end of the PM core's list of devices.
 * @dev: Device to move in dpm_list.
147 148 149 150
 */
void device_pm_move_last(struct device *dev)
{
	pr_debug("PM: Moving %s:%s to end of list\n",
151
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
152 153 154
	list_move_tail(&dev->power.entry, &dpm_list);
}

155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
static ktime_t initcall_debug_start(struct device *dev)
{
	ktime_t calltime = ktime_set(0, 0);

	if (initcall_debug) {
		pr_info("calling  %s+ @ %i\n",
				dev_name(dev), task_pid_nr(current));
		calltime = ktime_get();
	}

	return calltime;
}

static void initcall_debug_report(struct device *dev, ktime_t calltime,
				  int error)
{
	ktime_t delta, rettime;

	if (initcall_debug) {
		rettime = ktime_get();
		delta = ktime_sub(rettime, calltime);
		pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
			error, (unsigned long long)ktime_to_ns(delta) >> 10);
	}
}

181 182 183 184 185 186 187 188 189 190
/**
 * dpm_wait - Wait for a PM operation to complete.
 * @dev: Device to wait for.
 * @async: If unset, wait only if the device's power.async_suspend flag is set.
 */
static void dpm_wait(struct device *dev, bool async)
{
	if (!dev)
		return;

191
	if (async || (pm_async_enabled && dev->power.async_suspend))
192 193 194 195 196 197 198 199 200 201 202 203 204 205
		wait_for_completion(&dev->power.completion);
}

static int dpm_wait_fn(struct device *dev, void *async_ptr)
{
	dpm_wait(dev, *((bool *)async_ptr));
	return 0;
}

static void dpm_wait_for_children(struct device *dev, bool async)
{
       device_for_each_child(dev, &async, dpm_wait_fn);
}

206
/**
207 208 209 210
 * pm_op - Execute the PM operation appropriate for given PM event.
 * @dev: Device to handle.
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
211
 */
212 213 214
static int pm_op(struct device *dev,
		 const struct dev_pm_ops *ops,
		 pm_message_t state)
215 216
{
	int error = 0;
217
	ktime_t calltime;
218

219
	calltime = initcall_debug_start(dev);
220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266

	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
		if (ops->suspend) {
			error = ops->suspend(dev);
			suspend_report_result(ops->suspend, error);
		}
		break;
	case PM_EVENT_RESUME:
		if (ops->resume) {
			error = ops->resume(dev);
			suspend_report_result(ops->resume, error);
		}
		break;
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATION
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		if (ops->freeze) {
			error = ops->freeze(dev);
			suspend_report_result(ops->freeze, error);
		}
		break;
	case PM_EVENT_HIBERNATE:
		if (ops->poweroff) {
			error = ops->poweroff(dev);
			suspend_report_result(ops->poweroff, error);
		}
		break;
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
		if (ops->thaw) {
			error = ops->thaw(dev);
			suspend_report_result(ops->thaw, error);
		}
		break;
	case PM_EVENT_RESTORE:
		if (ops->restore) {
			error = ops->restore(dev);
			suspend_report_result(ops->restore, error);
		}
		break;
#endif /* CONFIG_HIBERNATION */
	default:
		error = -EINVAL;
	}
267

268
	initcall_debug_report(dev, calltime, error);
269

270 271 272 273
	return error;
}

/**
274 275 276 277
 * pm_noirq_op - Execute the PM operation appropriate for given PM event.
 * @dev: Device to handle.
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
278
 *
279 280
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
281
 */
282 283
static int pm_noirq_op(struct device *dev,
			const struct dev_pm_ops *ops,
284 285 286
			pm_message_t state)
{
	int error = 0;
287
	ktime_t calltime = ktime_set(0, 0), delta, rettime;
288 289

	if (initcall_debug) {
290 291 292
		pr_info("calling  %s+ @ %i, parent: %s\n",
				dev_name(dev), task_pid_nr(current),
				dev->parent ? dev_name(dev->parent) : "none");
293 294
		calltime = ktime_get();
	}
295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341

	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
		if (ops->suspend_noirq) {
			error = ops->suspend_noirq(dev);
			suspend_report_result(ops->suspend_noirq, error);
		}
		break;
	case PM_EVENT_RESUME:
		if (ops->resume_noirq) {
			error = ops->resume_noirq(dev);
			suspend_report_result(ops->resume_noirq, error);
		}
		break;
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATION
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		if (ops->freeze_noirq) {
			error = ops->freeze_noirq(dev);
			suspend_report_result(ops->freeze_noirq, error);
		}
		break;
	case PM_EVENT_HIBERNATE:
		if (ops->poweroff_noirq) {
			error = ops->poweroff_noirq(dev);
			suspend_report_result(ops->poweroff_noirq, error);
		}
		break;
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
		if (ops->thaw_noirq) {
			error = ops->thaw_noirq(dev);
			suspend_report_result(ops->thaw_noirq, error);
		}
		break;
	case PM_EVENT_RESTORE:
		if (ops->restore_noirq) {
			error = ops->restore_noirq(dev);
			suspend_report_result(ops->restore_noirq, error);
		}
		break;
#endif /* CONFIG_HIBERNATION */
	default:
		error = -EINVAL;
	}
342 343 344 345

	if (initcall_debug) {
		rettime = ktime_get();
		delta = ktime_sub(rettime, calltime);
346 347 348
		printk("initcall %s_i+ returned %d after %Ld usecs\n",
			dev_name(dev), error,
			(unsigned long long)ktime_to_ns(delta) >> 10);
349 350
	}

351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388
	return error;
}

static char *pm_verb(int event)
{
	switch (event) {
	case PM_EVENT_SUSPEND:
		return "suspend";
	case PM_EVENT_RESUME:
		return "resume";
	case PM_EVENT_FREEZE:
		return "freeze";
	case PM_EVENT_QUIESCE:
		return "quiesce";
	case PM_EVENT_HIBERNATE:
		return "hibernate";
	case PM_EVENT_THAW:
		return "thaw";
	case PM_EVENT_RESTORE:
		return "restore";
	case PM_EVENT_RECOVER:
		return "recover";
	default:
		return "(unknown PM event)";
	}
}

static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
{
	dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
		", may wakeup" : "");
}

static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
			int error)
{
	printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
389
		dev_name(dev), pm_verb(state.event), info, error);
390 391
}

392 393 394
static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
{
	ktime_t calltime;
395
	u64 usecs64;
396 397 398 399 400 401 402 403 404 405 406 407 408
	int usecs;

	calltime = ktime_get();
	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
	do_div(usecs64, NSEC_PER_USEC);
	usecs = usecs64;
	if (usecs == 0)
		usecs = 1;
	pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
		info ?: "", info ? " " : "", pm_verb(state.event),
		usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
}

409 410 411
/*------------------------- Resume routines -------------------------*/

/**
412 413 414
 * device_resume_noirq - Execute an "early resume" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
415
 *
416 417
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
418
 */
419
static int device_resume_noirq(struct device *dev, pm_message_t state)
420 421 422 423 424 425
{
	int error = 0;

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);

426
	if (dev->bus && dev->bus->pm) {
427 428
		pm_dev_dbg(dev, state, "EARLY ");
		error = pm_noirq_op(dev, dev->bus->pm, state);
429 430
		if (error)
			goto End;
431
	}
432

433 434 435 436 437 438 439 440 441 442 443 444 445
	if (dev->type && dev->type->pm) {
		pm_dev_dbg(dev, state, "EARLY type ");
		error = pm_noirq_op(dev, dev->type->pm, state);
		if (error)
			goto End;
	}

	if (dev->class && dev->class->pm) {
		pm_dev_dbg(dev, state, "EARLY class ");
		error = pm_noirq_op(dev, dev->class->pm, state);
	}

End:
446 447 448 449 450
	TRACE_RESUME(error);
	return error;
}

/**
451 452
 * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices.
 * @state: PM transition of the system being carried out.
453
 *
454 455
 * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and
 * enable device drivers to receive interrupts.
456
 */
457
void dpm_resume_noirq(pm_message_t state)
458
{
459
	ktime_t starttime = ktime_get();
460

461
	mutex_lock(&dpm_list_mtx);
462 463
	while (!list_empty(&dpm_noirq_list)) {
		struct device *dev = to_device(dpm_noirq_list.next);
464
		int error;
465 466

		get_device(dev);
467 468
		list_move_tail(&dev->power.entry, &dpm_suspended_list);
		mutex_unlock(&dpm_list_mtx);
469

470 471 472
		error = device_resume_noirq(dev, state);
		if (error)
			pm_dev_err(dev, state, " early", error);
473

474
		mutex_lock(&dpm_list_mtx);
475 476
		put_device(dev);
	}
477
	mutex_unlock(&dpm_list_mtx);
478
	dpm_show_time(starttime, state, "early");
479
	resume_device_irqs();
480
}
481
EXPORT_SYMBOL_GPL(dpm_resume_noirq);
482

483 484
/**
 * legacy_resume - Execute a legacy (bus or class) resume callback for device.
R
Randy Dunlap 已提交
485 486
 * @dev: Device to resume.
 * @cb: Resume callback to execute.
487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502
 */
static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
{
	int error;
	ktime_t calltime;

	calltime = initcall_debug_start(dev);

	error = cb(dev);
	suspend_report_result(cb, error);

	initcall_debug_report(dev, calltime, error);

	return error;
}

503
/**
504
 * device_resume - Execute "resume" callbacks for given device.
505 506
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
507
 * @async: If true, the device is being resumed asynchronously.
508
 */
509
static int device_resume(struct device *dev, pm_message_t state, bool async)
510 511 512 513 514
{
	int error = 0;

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);
515

516
	dpm_wait(dev->parent, async);
517
	device_lock(dev);
518

519
	dev->power.in_suspend = false;
520

521 522 523
	if (dev->bus) {
		if (dev->bus->pm) {
			pm_dev_dbg(dev, state, "");
524
			error = pm_op(dev, dev->bus->pm, state);
525 526
		} else if (dev->bus->resume) {
			pm_dev_dbg(dev, state, "legacy ");
527
			error = legacy_resume(dev, dev->bus->resume);
528 529 530
		}
		if (error)
			goto End;
531 532
	}

533 534 535 536 537 538 539
	if (dev->type) {
		if (dev->type->pm) {
			pm_dev_dbg(dev, state, "type ");
			error = pm_op(dev, dev->type->pm, state);
		}
		if (error)
			goto End;
540 541
	}

542 543 544 545 546 547
	if (dev->class) {
		if (dev->class->pm) {
			pm_dev_dbg(dev, state, "class ");
			error = pm_op(dev, dev->class->pm, state);
		} else if (dev->class->resume) {
			pm_dev_dbg(dev, state, "legacy class ");
548
			error = legacy_resume(dev, dev->class->resume);
549
		}
550
	}
551
 End:
552
	device_unlock(dev);
553
	complete_all(&dev->power.completion);
554

555 556 557 558
	TRACE_RESUME(error);
	return error;
}

559 560 561 562 563
static void async_resume(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

564
	error = device_resume(dev, pm_transition, true);
565 566 567 568 569
	if (error)
		pm_dev_err(dev, pm_transition, " async", error);
	put_device(dev);
}

570
static bool is_async(struct device *dev)
571
{
572 573
	return dev->power.async_suspend && pm_async_enabled
		&& !pm_trace_is_enabled();
574 575
}

576
/**
577 578
 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
 * @state: PM transition of the system being carried out.
579
 *
580 581
 * Execute the appropriate "resume" callback for all devices whose status
 * indicates that they are suspended.
582 583 584
 */
static void dpm_resume(pm_message_t state)
{
585
	struct device *dev;
586
	ktime_t starttime = ktime_get();
587 588

	mutex_lock(&dpm_list_mtx);
589
	pm_transition = state;
590
	async_error = 0;
591

592
	list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
593 594 595 596 597 598 599
		INIT_COMPLETION(dev->power.completion);
		if (is_async(dev)) {
			get_device(dev);
			async_schedule(async_resume, dev);
		}
	}

600 601
	while (!list_empty(&dpm_suspended_list)) {
		dev = to_device(dpm_suspended_list.next);
602
		get_device(dev);
603
		if (!is_async(dev)) {
604 605 606 607
			int error;

			mutex_unlock(&dpm_list_mtx);

608
			error = device_resume(dev, state, false);
609 610
			if (error)
				pm_dev_err(dev, state, "", error);
611 612

			mutex_lock(&dpm_list_mtx);
613 614
		}
		if (!list_empty(&dev->power.entry))
615
			list_move_tail(&dev->power.entry, &dpm_prepared_list);
616 617 618
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
619
	async_synchronize_full();
620
	dpm_show_time(starttime, state, NULL);
621 622 623
}

/**
624 625 626
 * device_complete - Complete a PM transition for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
627
 */
628
static void device_complete(struct device *dev, pm_message_t state)
629
{
630
	device_lock(dev);
631 632 633 634 635 636 637 638 639 640 641

	if (dev->class && dev->class->pm && dev->class->pm->complete) {
		pm_dev_dbg(dev, state, "completing class ");
		dev->class->pm->complete(dev);
	}

	if (dev->type && dev->type->pm && dev->type->pm->complete) {
		pm_dev_dbg(dev, state, "completing type ");
		dev->type->pm->complete(dev);
	}

642
	if (dev->bus && dev->bus->pm && dev->bus->pm->complete) {
643
		pm_dev_dbg(dev, state, "completing ");
644
		dev->bus->pm->complete(dev);
645 646
	}

647
	device_unlock(dev);
648 649 650
}

/**
651 652
 * dpm_complete - Complete a PM transition for all non-sysdev devices.
 * @state: PM transition of the system being carried out.
653
 *
654 655
 * Execute the ->complete() callbacks for all devices whose PM status is not
 * DPM_ON (this allows new devices to be registered).
656
 */
657
static void dpm_complete(pm_message_t state)
658
{
659 660 661
	struct list_head list;

	INIT_LIST_HEAD(&list);
662
	mutex_lock(&dpm_list_mtx);
663 664
	while (!list_empty(&dpm_prepared_list)) {
		struct device *dev = to_device(dpm_prepared_list.prev);
665

666
		get_device(dev);
667
		dev->power.in_suspend = false;
668 669
		list_move(&dev->power.entry, &list);
		mutex_unlock(&dpm_list_mtx);
670

671 672
		device_complete(dev, state);
		pm_runtime_put_sync(dev);
673

674
		mutex_lock(&dpm_list_mtx);
675
		put_device(dev);
676
	}
677
	list_splice(&list, &dpm_list);
678 679 680 681
	mutex_unlock(&dpm_list_mtx);
}

/**
682 683
 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
 * @state: PM transition of the system being carried out.
684
 *
685 686
 * Execute "resume" callbacks for all devices and complete the PM transition of
 * the system.
687
 */
688
void dpm_resume_end(pm_message_t state)
689
{
690
	might_sleep();
691 692
	dpm_resume(state);
	dpm_complete(state);
693
}
694
EXPORT_SYMBOL_GPL(dpm_resume_end);
695 696 697 698


/*------------------------- Suspend routines -------------------------*/

699
/**
700 701 702 703 704
 * resume_event - Return a "resume" message for given "suspend" sleep state.
 * @sleep_state: PM message representing a sleep state.
 *
 * Return a PM message representing the resume event corresponding to given
 * sleep state.
705 706
 */
static pm_message_t resume_event(pm_message_t sleep_state)
707
{
708 709 710 711 712 713 714 715
	switch (sleep_state.event) {
	case PM_EVENT_SUSPEND:
		return PMSG_RESUME;
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		return PMSG_RECOVER;
	case PM_EVENT_HIBERNATE:
		return PMSG_RESTORE;
716
	}
717
	return PMSG_ON;
718 719 720
}

/**
721 722 723
 * device_suspend_noirq - Execute a "late suspend" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
724
 *
725 726
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
727
 */
728
static int device_suspend_noirq(struct device *dev, pm_message_t state)
729 730
{
	int error = 0;
731

732 733 734 735 736 737 738 739 740 741 742 743 744 745
	if (dev->class && dev->class->pm) {
		pm_dev_dbg(dev, state, "LATE class ");
		error = pm_noirq_op(dev, dev->class->pm, state);
		if (error)
			goto End;
	}

	if (dev->type && dev->type->pm) {
		pm_dev_dbg(dev, state, "LATE type ");
		error = pm_noirq_op(dev, dev->type->pm, state);
		if (error)
			goto End;
	}

746
	if (dev->bus && dev->bus->pm) {
747 748
		pm_dev_dbg(dev, state, "LATE ");
		error = pm_noirq_op(dev, dev->bus->pm, state);
749
	}
750 751

End:
752 753 754 755
	return error;
}

/**
756 757
 * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices.
 * @state: PM transition of the system being carried out.
758
 *
759 760
 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
 * handlers for all non-sysdev devices.
761
 */
762
int dpm_suspend_noirq(pm_message_t state)
763
{
764
	ktime_t starttime = ktime_get();
765 766
	int error = 0;

767
	suspend_device_irqs();
768
	mutex_lock(&dpm_list_mtx);
769 770
	while (!list_empty(&dpm_suspended_list)) {
		struct device *dev = to_device(dpm_suspended_list.prev);
771 772 773 774

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

775
		error = device_suspend_noirq(dev, state);
776 777

		mutex_lock(&dpm_list_mtx);
778
		if (error) {
779
			pm_dev_err(dev, state, " late", error);
780
			put_device(dev);
781 782
			break;
		}
783
		if (!list_empty(&dev->power.entry))
784
			list_move(&dev->power.entry, &dpm_noirq_list);
785
		put_device(dev);
786
	}
787
	mutex_unlock(&dpm_list_mtx);
788
	if (error)
789
		dpm_resume_noirq(resume_event(state));
790 791
	else
		dpm_show_time(starttime, state, "late");
792 793
	return error;
}
794
EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
795

796 797
/**
 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
R
Randy Dunlap 已提交
798 799 800
 * @dev: Device to suspend.
 * @state: PM transition of the system being carried out.
 * @cb: Suspend callback to execute.
801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817
 */
static int legacy_suspend(struct device *dev, pm_message_t state,
			  int (*cb)(struct device *dev, pm_message_t state))
{
	int error;
	ktime_t calltime;

	calltime = initcall_debug_start(dev);

	error = cb(dev, state);
	suspend_report_result(cb, error);

	initcall_debug_report(dev, calltime, error);

	return error;
}

818
/**
819 820 821
 * device_suspend - Execute "suspend" callbacks for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
822
 * @async: If true, the device is being suspended asynchronously.
823
 */
824
static int __device_suspend(struct device *dev, pm_message_t state, bool async)
825 826 827
{
	int error = 0;

828
	dpm_wait_for_children(dev, async);
829
	device_lock(dev);
830

831 832 833
	if (async_error)
		goto End;

834 835 836 837 838
	if (pm_wakeup_pending()) {
		async_error = -EBUSY;
		goto End;
	}

839 840 841 842 843 844
	if (dev->class) {
		if (dev->class->pm) {
			pm_dev_dbg(dev, state, "class ");
			error = pm_op(dev, dev->class->pm, state);
		} else if (dev->class->suspend) {
			pm_dev_dbg(dev, state, "legacy class ");
845
			error = legacy_suspend(dev, state, dev->class->suspend);
846 847 848
		}
		if (error)
			goto End;
849 850
	}

851 852 853 854 855 856 857
	if (dev->type) {
		if (dev->type->pm) {
			pm_dev_dbg(dev, state, "type ");
			error = pm_op(dev, dev->type->pm, state);
		}
		if (error)
			goto End;
858 859
	}

860 861 862
	if (dev->bus) {
		if (dev->bus->pm) {
			pm_dev_dbg(dev, state, "");
863
			error = pm_op(dev, dev->bus->pm, state);
864 865
		} else if (dev->bus->suspend) {
			pm_dev_dbg(dev, state, "legacy ");
866
			error = legacy_suspend(dev, state, dev->bus->suspend);
867
		}
868
	}
869

870
 End:
871
	device_unlock(dev);
872
	complete_all(&dev->power.completion);
873

874 875 876
	if (error)
		async_error = error;

877 878 879
	return error;
}

880 881 882 883 884 885
static void async_suspend(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = __device_suspend(dev, pm_transition, true);
886
	if (error)
887 888 889 890 891 892 893 894 895
		pm_dev_err(dev, pm_transition, " async", error);

	put_device(dev);
}

static int device_suspend(struct device *dev)
{
	INIT_COMPLETION(dev->power.completion);

896
	if (pm_async_enabled && dev->power.async_suspend) {
897 898 899 900 901 902 903 904
		get_device(dev);
		async_schedule(async_suspend, dev);
		return 0;
	}

	return __device_suspend(dev, pm_transition, false);
}

905
/**
906 907
 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
 * @state: PM transition of the system being carried out.
908
 */
909
static int dpm_suspend(pm_message_t state)
910
{
911
	ktime_t starttime = ktime_get();
912 913 914
	int error = 0;

	mutex_lock(&dpm_list_mtx);
915 916
	pm_transition = state;
	async_error = 0;
917 918
	while (!list_empty(&dpm_prepared_list)) {
		struct device *dev = to_device(dpm_prepared_list.prev);
919

920
		get_device(dev);
921
		mutex_unlock(&dpm_list_mtx);
922

923
		error = device_suspend(dev);
924

925
		mutex_lock(&dpm_list_mtx);
926
		if (error) {
927 928
			pm_dev_err(dev, state, "", error);
			put_device(dev);
929 930
			break;
		}
931
		if (!list_empty(&dev->power.entry))
932
			list_move(&dev->power.entry, &dpm_suspended_list);
933
		put_device(dev);
934 935
		if (async_error)
			break;
936 937
	}
	mutex_unlock(&dpm_list_mtx);
938 939 940
	async_synchronize_full();
	if (!error)
		error = async_error;
941 942
	if (!error)
		dpm_show_time(starttime, state, NULL);
943 944 945 946
	return error;
}

/**
947 948 949 950 951 952
 * device_prepare - Prepare a device for system power transition.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
 *
 * Execute the ->prepare() callback(s) for given device.  No new children of the
 * device may be registered after this function has returned.
953
 */
954
static int device_prepare(struct device *dev, pm_message_t state)
955 956 957
{
	int error = 0;

958
	device_lock(dev);
959

960
	if (dev->bus && dev->bus->pm && dev->bus->pm->prepare) {
961
		pm_dev_dbg(dev, state, "preparing ");
962 963
		error = dev->bus->pm->prepare(dev);
		suspend_report_result(dev->bus->pm->prepare, error);
964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981
		if (error)
			goto End;
	}

	if (dev->type && dev->type->pm && dev->type->pm->prepare) {
		pm_dev_dbg(dev, state, "preparing type ");
		error = dev->type->pm->prepare(dev);
		suspend_report_result(dev->type->pm->prepare, error);
		if (error)
			goto End;
	}

	if (dev->class && dev->class->pm && dev->class->pm->prepare) {
		pm_dev_dbg(dev, state, "preparing class ");
		error = dev->class->pm->prepare(dev);
		suspend_report_result(dev->class->pm->prepare, error);
	}
 End:
982
	device_unlock(dev);
983 984 985

	return error;
}
986

987
/**
988 989
 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
 * @state: PM transition of the system being carried out.
990
 *
991
 * Execute the ->prepare() callback(s) for all devices.
992 993 994 995 996 997 998 999 1000 1001 1002 1003
 */
static int dpm_prepare(pm_message_t state)
{
	int error = 0;

	mutex_lock(&dpm_list_mtx);
	while (!list_empty(&dpm_list)) {
		struct device *dev = to_device(dpm_list.next);

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

1004
		pm_runtime_get_noresume(dev);
1005 1006 1007
		if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
			pm_wakeup_event(dev, 0);

1008
		if (pm_wakeup_pending()) {
1009
			pm_runtime_put_sync(dev);
1010 1011 1012 1013
			error = -EBUSY;
		} else {
			error = device_prepare(dev, state);
		}
1014 1015 1016 1017 1018

		mutex_lock(&dpm_list_mtx);
		if (error) {
			if (error == -EAGAIN) {
				put_device(dev);
S
Sebastian Ott 已提交
1019
				error = 0;
1020 1021
				continue;
			}
1022 1023
			printk(KERN_INFO "PM: Device %s not prepared "
				"for power transition: code %d\n",
1024
				dev_name(dev), error);
1025 1026 1027
			put_device(dev);
			break;
		}
1028
		dev->power.in_suspend = true;
1029
		if (!list_empty(&dev->power.entry))
1030
			list_move_tail(&dev->power.entry, &dpm_prepared_list);
1031 1032 1033
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
1034 1035 1036
	return error;
}

1037
/**
1038 1039
 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
 * @state: PM transition of the system being carried out.
1040
 *
1041 1042
 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
 * callbacks for them.
1043
 */
1044
int dpm_suspend_start(pm_message_t state)
1045 1046
{
	int error;
1047

1048
	might_sleep();
1049 1050 1051
	error = dpm_prepare(state);
	if (!error)
		error = dpm_suspend(state);
1052 1053
	return error;
}
1054
EXPORT_SYMBOL_GPL(dpm_suspend_start);
1055 1056 1057

void __suspend_report_result(const char *function, void *fn, int ret)
{
1058 1059
	if (ret)
		printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1060 1061
}
EXPORT_SYMBOL_GPL(__suspend_report_result);
1062 1063 1064 1065 1066 1067

/**
 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
 * @dev: Device to wait for.
 * @subordinate: Device that needs to wait for @dev.
 */
1068
int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1069 1070
{
	dpm_wait(dev, subordinate->power.async_suspend);
1071
	return async_error;
1072 1073
}
EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);