main.c 25.9 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 * drivers/base/power/main.c - Where the driver meets power management.
 *
 * Copyright (c) 2003 Patrick Mochel
 * Copyright (c) 2003 Open Source Development Lab
 *
 * This file is released under the GPLv2
 *
 *
 * The driver model core calls device_pm_add() when a device is registered.
 * This will intialize the embedded device_pm_info object in the device
 * and add it to the list of power-controlled devices. sysfs entries for
 * controlling device power management will also be added.
 *
15 16 17
 * A separate list is used for keeping track of power info, because the power
 * domain dependencies may differ from the ancestral dependencies that the
 * subsystem list maintains.
L
Linus Torvalds 已提交
18 19 20
 */

#include <linux/device.h>
21
#include <linux/kallsyms.h>
22
#include <linux/mutex.h>
23
#include <linux/pm.h>
24
#include <linux/pm_runtime.h>
25
#include <linux/resume-trace.h>
26
#include <linux/interrupt.h>
27
#include <linux/sched.h>
28
#include <linux/async.h>
29
#include <linux/suspend.h>
30

31
#include "../base.h"
L
Linus Torvalds 已提交
32 33
#include "power.h"

34
/*
35
 * The entries in the dpm_list list are in a depth first order, simply
36 37 38
 * because children are guaranteed to be discovered after parents, and
 * are inserted at the back of the list on discovery.
 *
39 40
 * Since device_pm_add() may be called with a device lock held,
 * we must never try to acquire a device lock while holding
41 42 43
 * dpm_list_mutex.
 */

44
LIST_HEAD(dpm_list);
45 46 47
LIST_HEAD(dpm_prepared_list);
LIST_HEAD(dpm_suspended_list);
LIST_HEAD(dpm_noirq_list);
L
Linus Torvalds 已提交
48

49
static DEFINE_MUTEX(dpm_list_mtx);
50
static pm_message_t pm_transition;
L
Linus Torvalds 已提交
51

52 53
static int async_error;

54
/**
55
 * device_pm_init - Initialize the PM-related part of a device object.
56 57 58 59
 * @dev: Device object being initialized.
 */
void device_pm_init(struct device *dev)
{
60
	dev->power.in_suspend = false;
61
	init_completion(&dev->power.completion);
62
	complete_all(&dev->power.completion);
63 64
	dev->power.wakeup = NULL;
	spin_lock_init(&dev->power.lock);
65 66 67
	pm_runtime_init(dev);
}

68
/**
69
 * device_pm_lock - Lock the list of active devices used by the PM core.
70 71 72 73 74 75 76
 */
void device_pm_lock(void)
{
	mutex_lock(&dpm_list_mtx);
}

/**
77
 * device_pm_unlock - Unlock the list of active devices used by the PM core.
78 79 80 81 82
 */
void device_pm_unlock(void)
{
	mutex_unlock(&dpm_list_mtx);
}
83

84
/**
85 86
 * device_pm_add - Add a device to the PM core's list of active devices.
 * @dev: Device to add to the list.
87
 */
88
void device_pm_add(struct device *dev)
L
Linus Torvalds 已提交
89 90
{
	pr_debug("PM: Adding info for %s:%s\n",
91 92
		 dev->bus ? dev->bus->name : "No Bus",
		 kobject_name(&dev->kobj));
93
	mutex_lock(&dpm_list_mtx);
94 95 96
	if (dev->parent && dev->parent->power.in_suspend)
		dev_warn(dev, "parent %s should not be sleeping\n",
			dev_name(dev->parent));
97
	list_add_tail(&dev->power.entry, &dpm_list);
98
	mutex_unlock(&dpm_list_mtx);
L
Linus Torvalds 已提交
99 100
}

101
/**
102 103
 * device_pm_remove - Remove a device from the PM core's list of active devices.
 * @dev: Device to be removed from the list.
104
 */
105
void device_pm_remove(struct device *dev)
L
Linus Torvalds 已提交
106 107
{
	pr_debug("PM: Removing info for %s:%s\n",
108 109
		 dev->bus ? dev->bus->name : "No Bus",
		 kobject_name(&dev->kobj));
110
	complete_all(&dev->power.completion);
111
	mutex_lock(&dpm_list_mtx);
L
Linus Torvalds 已提交
112
	list_del_init(&dev->power.entry);
113
	mutex_unlock(&dpm_list_mtx);
114
	device_wakeup_disable(dev);
115
	pm_runtime_remove(dev);
116 117
}

118
/**
119 120 121
 * device_pm_move_before - Move device in the PM core's list of active devices.
 * @deva: Device to move in dpm_list.
 * @devb: Device @deva should come before.
122 123 124 125 126 127 128 129 130 131 132 133 134
 */
void device_pm_move_before(struct device *deva, struct device *devb)
{
	pr_debug("PM: Moving %s:%s before %s:%s\n",
		 deva->bus ? deva->bus->name : "No Bus",
		 kobject_name(&deva->kobj),
		 devb->bus ? devb->bus->name : "No Bus",
		 kobject_name(&devb->kobj));
	/* Delete deva from dpm_list and reinsert before devb. */
	list_move_tail(&deva->power.entry, &devb->power.entry);
}

/**
135 136 137
 * device_pm_move_after - Move device in the PM core's list of active devices.
 * @deva: Device to move in dpm_list.
 * @devb: Device @deva should come after.
138 139 140 141 142 143 144 145 146 147 148 149 150
 */
void device_pm_move_after(struct device *deva, struct device *devb)
{
	pr_debug("PM: Moving %s:%s after %s:%s\n",
		 deva->bus ? deva->bus->name : "No Bus",
		 kobject_name(&deva->kobj),
		 devb->bus ? devb->bus->name : "No Bus",
		 kobject_name(&devb->kobj));
	/* Delete deva from dpm_list and reinsert after devb. */
	list_move(&deva->power.entry, &devb->power.entry);
}

/**
151 152
 * device_pm_move_last - Move device to end of the PM core's list of devices.
 * @dev: Device to move in dpm_list.
153 154 155 156 157 158 159 160 161
 */
void device_pm_move_last(struct device *dev)
{
	pr_debug("PM: Moving %s:%s to end of list\n",
		 dev->bus ? dev->bus->name : "No Bus",
		 kobject_name(&dev->kobj));
	list_move_tail(&dev->power.entry, &dpm_list);
}

162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
static ktime_t initcall_debug_start(struct device *dev)
{
	ktime_t calltime = ktime_set(0, 0);

	if (initcall_debug) {
		pr_info("calling  %s+ @ %i\n",
				dev_name(dev), task_pid_nr(current));
		calltime = ktime_get();
	}

	return calltime;
}

static void initcall_debug_report(struct device *dev, ktime_t calltime,
				  int error)
{
	ktime_t delta, rettime;

	if (initcall_debug) {
		rettime = ktime_get();
		delta = ktime_sub(rettime, calltime);
		pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
			error, (unsigned long long)ktime_to_ns(delta) >> 10);
	}
}

188 189 190 191 192 193 194 195 196 197
/**
 * dpm_wait - Wait for a PM operation to complete.
 * @dev: Device to wait for.
 * @async: If unset, wait only if the device's power.async_suspend flag is set.
 */
static void dpm_wait(struct device *dev, bool async)
{
	if (!dev)
		return;

198
	if (async || (pm_async_enabled && dev->power.async_suspend))
199 200 201 202 203 204 205 206 207 208 209 210 211 212
		wait_for_completion(&dev->power.completion);
}

static int dpm_wait_fn(struct device *dev, void *async_ptr)
{
	dpm_wait(dev, *((bool *)async_ptr));
	return 0;
}

static void dpm_wait_for_children(struct device *dev, bool async)
{
       device_for_each_child(dev, &async, dpm_wait_fn);
}

213
/**
214 215 216 217
 * pm_op - Execute the PM operation appropriate for given PM event.
 * @dev: Device to handle.
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
218
 */
219 220 221
static int pm_op(struct device *dev,
		 const struct dev_pm_ops *ops,
		 pm_message_t state)
222 223
{
	int error = 0;
224
	ktime_t calltime;
225

226
	calltime = initcall_debug_start(dev);
227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273

	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
		if (ops->suspend) {
			error = ops->suspend(dev);
			suspend_report_result(ops->suspend, error);
		}
		break;
	case PM_EVENT_RESUME:
		if (ops->resume) {
			error = ops->resume(dev);
			suspend_report_result(ops->resume, error);
		}
		break;
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATION
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		if (ops->freeze) {
			error = ops->freeze(dev);
			suspend_report_result(ops->freeze, error);
		}
		break;
	case PM_EVENT_HIBERNATE:
		if (ops->poweroff) {
			error = ops->poweroff(dev);
			suspend_report_result(ops->poweroff, error);
		}
		break;
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
		if (ops->thaw) {
			error = ops->thaw(dev);
			suspend_report_result(ops->thaw, error);
		}
		break;
	case PM_EVENT_RESTORE:
		if (ops->restore) {
			error = ops->restore(dev);
			suspend_report_result(ops->restore, error);
		}
		break;
#endif /* CONFIG_HIBERNATION */
	default:
		error = -EINVAL;
	}
274

275
	initcall_debug_report(dev, calltime, error);
276

277 278 279 280
	return error;
}

/**
281 282 283 284
 * pm_noirq_op - Execute the PM operation appropriate for given PM event.
 * @dev: Device to handle.
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
285
 *
286 287
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
288
 */
289 290
static int pm_noirq_op(struct device *dev,
			const struct dev_pm_ops *ops,
291 292 293
			pm_message_t state)
{
	int error = 0;
294
	ktime_t calltime = ktime_set(0, 0), delta, rettime;
295 296

	if (initcall_debug) {
297 298 299
		pr_info("calling  %s+ @ %i, parent: %s\n",
				dev_name(dev), task_pid_nr(current),
				dev->parent ? dev_name(dev->parent) : "none");
300 301
		calltime = ktime_get();
	}
302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348

	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
		if (ops->suspend_noirq) {
			error = ops->suspend_noirq(dev);
			suspend_report_result(ops->suspend_noirq, error);
		}
		break;
	case PM_EVENT_RESUME:
		if (ops->resume_noirq) {
			error = ops->resume_noirq(dev);
			suspend_report_result(ops->resume_noirq, error);
		}
		break;
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATION
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		if (ops->freeze_noirq) {
			error = ops->freeze_noirq(dev);
			suspend_report_result(ops->freeze_noirq, error);
		}
		break;
	case PM_EVENT_HIBERNATE:
		if (ops->poweroff_noirq) {
			error = ops->poweroff_noirq(dev);
			suspend_report_result(ops->poweroff_noirq, error);
		}
		break;
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
		if (ops->thaw_noirq) {
			error = ops->thaw_noirq(dev);
			suspend_report_result(ops->thaw_noirq, error);
		}
		break;
	case PM_EVENT_RESTORE:
		if (ops->restore_noirq) {
			error = ops->restore_noirq(dev);
			suspend_report_result(ops->restore_noirq, error);
		}
		break;
#endif /* CONFIG_HIBERNATION */
	default:
		error = -EINVAL;
	}
349 350 351 352

	if (initcall_debug) {
		rettime = ktime_get();
		delta = ktime_sub(rettime, calltime);
353 354 355
		printk("initcall %s_i+ returned %d after %Ld usecs\n",
			dev_name(dev), error,
			(unsigned long long)ktime_to_ns(delta) >> 10);
356 357
	}

358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398
	return error;
}

static char *pm_verb(int event)
{
	switch (event) {
	case PM_EVENT_SUSPEND:
		return "suspend";
	case PM_EVENT_RESUME:
		return "resume";
	case PM_EVENT_FREEZE:
		return "freeze";
	case PM_EVENT_QUIESCE:
		return "quiesce";
	case PM_EVENT_HIBERNATE:
		return "hibernate";
	case PM_EVENT_THAW:
		return "thaw";
	case PM_EVENT_RESTORE:
		return "restore";
	case PM_EVENT_RECOVER:
		return "recover";
	default:
		return "(unknown PM event)";
	}
}

static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
{
	dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
		", may wakeup" : "");
}

static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
			int error)
{
	printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
		kobject_name(&dev->kobj), pm_verb(state.event), info, error);
}

399 400 401
static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
{
	ktime_t calltime;
402
	u64 usecs64;
403 404 405 406 407 408 409 410 411 412 413 414 415
	int usecs;

	calltime = ktime_get();
	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
	do_div(usecs64, NSEC_PER_USEC);
	usecs = usecs64;
	if (usecs == 0)
		usecs = 1;
	pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
		info ?: "", info ? " " : "", pm_verb(state.event),
		usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
}

416 417 418
/*------------------------- Resume routines -------------------------*/

/**
419 420 421
 * device_resume_noirq - Execute an "early resume" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
422
 *
423 424
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
425
 */
426
static int device_resume_noirq(struct device *dev, pm_message_t state)
427 428 429 430 431 432
{
	int error = 0;

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);

433
	if (dev->bus && dev->bus->pm) {
434 435
		pm_dev_dbg(dev, state, "EARLY ");
		error = pm_noirq_op(dev, dev->bus->pm, state);
436 437
		if (error)
			goto End;
438
	}
439

440 441 442 443 444 445 446 447 448 449 450 451 452
	if (dev->type && dev->type->pm) {
		pm_dev_dbg(dev, state, "EARLY type ");
		error = pm_noirq_op(dev, dev->type->pm, state);
		if (error)
			goto End;
	}

	if (dev->class && dev->class->pm) {
		pm_dev_dbg(dev, state, "EARLY class ");
		error = pm_noirq_op(dev, dev->class->pm, state);
	}

End:
453 454 455 456 457
	TRACE_RESUME(error);
	return error;
}

/**
458 459
 * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices.
 * @state: PM transition of the system being carried out.
460
 *
461 462
 * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and
 * enable device drivers to receive interrupts.
463
 */
464
void dpm_resume_noirq(pm_message_t state)
465
{
466
	ktime_t starttime = ktime_get();
467

468
	mutex_lock(&dpm_list_mtx);
469 470
	while (!list_empty(&dpm_noirq_list)) {
		struct device *dev = to_device(dpm_noirq_list.next);
471
		int error;
472 473

		get_device(dev);
474 475
		list_move_tail(&dev->power.entry, &dpm_suspended_list);
		mutex_unlock(&dpm_list_mtx);
476

477 478 479
		error = device_resume_noirq(dev, state);
		if (error)
			pm_dev_err(dev, state, " early", error);
480

481
		mutex_lock(&dpm_list_mtx);
482 483
		put_device(dev);
	}
484
	mutex_unlock(&dpm_list_mtx);
485
	dpm_show_time(starttime, state, "early");
486
	resume_device_irqs();
487
}
488
EXPORT_SYMBOL_GPL(dpm_resume_noirq);
489

490 491
/**
 * legacy_resume - Execute a legacy (bus or class) resume callback for device.
R
Randy Dunlap 已提交
492 493
 * @dev: Device to resume.
 * @cb: Resume callback to execute.
494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509
 */
static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
{
	int error;
	ktime_t calltime;

	calltime = initcall_debug_start(dev);

	error = cb(dev);
	suspend_report_result(cb, error);

	initcall_debug_report(dev, calltime, error);

	return error;
}

510
/**
511
 * device_resume - Execute "resume" callbacks for given device.
512 513
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
514
 * @async: If true, the device is being resumed asynchronously.
515
 */
516
static int device_resume(struct device *dev, pm_message_t state, bool async)
517 518 519 520 521
{
	int error = 0;

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);
522

523
	dpm_wait(dev->parent, async);
524
	device_lock(dev);
525

526
	dev->power.in_suspend = false;
527

528 529 530
	if (dev->bus) {
		if (dev->bus->pm) {
			pm_dev_dbg(dev, state, "");
531
			error = pm_op(dev, dev->bus->pm, state);
532 533
		} else if (dev->bus->resume) {
			pm_dev_dbg(dev, state, "legacy ");
534
			error = legacy_resume(dev, dev->bus->resume);
535 536 537
		}
		if (error)
			goto End;
538 539
	}

540 541 542 543 544 545 546
	if (dev->type) {
		if (dev->type->pm) {
			pm_dev_dbg(dev, state, "type ");
			error = pm_op(dev, dev->type->pm, state);
		}
		if (error)
			goto End;
547 548
	}

549 550 551 552 553 554
	if (dev->class) {
		if (dev->class->pm) {
			pm_dev_dbg(dev, state, "class ");
			error = pm_op(dev, dev->class->pm, state);
		} else if (dev->class->resume) {
			pm_dev_dbg(dev, state, "legacy class ");
555
			error = legacy_resume(dev, dev->class->resume);
556
		}
557
	}
558
 End:
559
	device_unlock(dev);
560
	complete_all(&dev->power.completion);
561

562 563 564 565
	TRACE_RESUME(error);
	return error;
}

566 567 568 569 570
static void async_resume(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

571
	error = device_resume(dev, pm_transition, true);
572 573 574 575 576
	if (error)
		pm_dev_err(dev, pm_transition, " async", error);
	put_device(dev);
}

577
static bool is_async(struct device *dev)
578
{
579 580
	return dev->power.async_suspend && pm_async_enabled
		&& !pm_trace_is_enabled();
581 582
}

583
/**
584 585
 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
 * @state: PM transition of the system being carried out.
586
 *
587 588
 * Execute the appropriate "resume" callback for all devices whose status
 * indicates that they are suspended.
589 590 591
 */
static void dpm_resume(pm_message_t state)
{
592
	struct device *dev;
593
	ktime_t starttime = ktime_get();
594 595

	mutex_lock(&dpm_list_mtx);
596
	pm_transition = state;
597
	async_error = 0;
598

599
	list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
600 601 602 603 604 605 606
		INIT_COMPLETION(dev->power.completion);
		if (is_async(dev)) {
			get_device(dev);
			async_schedule(async_resume, dev);
		}
	}

607 608
	while (!list_empty(&dpm_suspended_list)) {
		dev = to_device(dpm_suspended_list.next);
609
		get_device(dev);
610
		if (!is_async(dev)) {
611 612 613 614
			int error;

			mutex_unlock(&dpm_list_mtx);

615
			error = device_resume(dev, state, false);
616 617
			if (error)
				pm_dev_err(dev, state, "", error);
618 619

			mutex_lock(&dpm_list_mtx);
620 621
		}
		if (!list_empty(&dev->power.entry))
622
			list_move_tail(&dev->power.entry, &dpm_prepared_list);
623 624 625
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
626
	async_synchronize_full();
627
	dpm_show_time(starttime, state, NULL);
628 629 630
}

/**
631 632 633
 * device_complete - Complete a PM transition for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
634
 */
635
static void device_complete(struct device *dev, pm_message_t state)
636
{
637
	device_lock(dev);
638 639 640 641 642 643 644 645 646 647 648

	if (dev->class && dev->class->pm && dev->class->pm->complete) {
		pm_dev_dbg(dev, state, "completing class ");
		dev->class->pm->complete(dev);
	}

	if (dev->type && dev->type->pm && dev->type->pm->complete) {
		pm_dev_dbg(dev, state, "completing type ");
		dev->type->pm->complete(dev);
	}

649
	if (dev->bus && dev->bus->pm && dev->bus->pm->complete) {
650
		pm_dev_dbg(dev, state, "completing ");
651
		dev->bus->pm->complete(dev);
652 653
	}

654
	device_unlock(dev);
655 656 657
}

/**
658 659
 * dpm_complete - Complete a PM transition for all non-sysdev devices.
 * @state: PM transition of the system being carried out.
660
 *
661 662
 * Execute the ->complete() callbacks for all devices whose PM status is not
 * DPM_ON (this allows new devices to be registered).
663
 */
664
static void dpm_complete(pm_message_t state)
665
{
666 667 668
	struct list_head list;

	INIT_LIST_HEAD(&list);
669
	mutex_lock(&dpm_list_mtx);
670 671
	while (!list_empty(&dpm_prepared_list)) {
		struct device *dev = to_device(dpm_prepared_list.prev);
672

673
		get_device(dev);
674
		dev->power.in_suspend = false;
675 676
		list_move(&dev->power.entry, &list);
		mutex_unlock(&dpm_list_mtx);
677

678 679
		device_complete(dev, state);
		pm_runtime_put_sync(dev);
680

681
		mutex_lock(&dpm_list_mtx);
682
		put_device(dev);
683
	}
684
	list_splice(&list, &dpm_list);
685 686 687 688
	mutex_unlock(&dpm_list_mtx);
}

/**
689 690
 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
 * @state: PM transition of the system being carried out.
691
 *
692 693
 * Execute "resume" callbacks for all devices and complete the PM transition of
 * the system.
694
 */
695
void dpm_resume_end(pm_message_t state)
696
{
697
	might_sleep();
698 699
	dpm_resume(state);
	dpm_complete(state);
700
}
701
EXPORT_SYMBOL_GPL(dpm_resume_end);
702 703 704 705


/*------------------------- Suspend routines -------------------------*/

706
/**
707 708 709 710 711
 * resume_event - Return a "resume" message for given "suspend" sleep state.
 * @sleep_state: PM message representing a sleep state.
 *
 * Return a PM message representing the resume event corresponding to given
 * sleep state.
712 713
 */
static pm_message_t resume_event(pm_message_t sleep_state)
714
{
715 716 717 718 719 720 721 722
	switch (sleep_state.event) {
	case PM_EVENT_SUSPEND:
		return PMSG_RESUME;
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		return PMSG_RECOVER;
	case PM_EVENT_HIBERNATE:
		return PMSG_RESTORE;
723
	}
724
	return PMSG_ON;
725 726 727
}

/**
728 729 730
 * device_suspend_noirq - Execute a "late suspend" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
731
 *
732 733
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
734
 */
735
static int device_suspend_noirq(struct device *dev, pm_message_t state)
736 737
{
	int error = 0;
738

739 740 741 742 743 744 745 746 747 748 749 750 751 752
	if (dev->class && dev->class->pm) {
		pm_dev_dbg(dev, state, "LATE class ");
		error = pm_noirq_op(dev, dev->class->pm, state);
		if (error)
			goto End;
	}

	if (dev->type && dev->type->pm) {
		pm_dev_dbg(dev, state, "LATE type ");
		error = pm_noirq_op(dev, dev->type->pm, state);
		if (error)
			goto End;
	}

753
	if (dev->bus && dev->bus->pm) {
754 755
		pm_dev_dbg(dev, state, "LATE ");
		error = pm_noirq_op(dev, dev->bus->pm, state);
756
	}
757 758

End:
759 760 761 762
	return error;
}

/**
763 764
 * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices.
 * @state: PM transition of the system being carried out.
765
 *
766 767
 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
 * handlers for all non-sysdev devices.
768
 */
769
int dpm_suspend_noirq(pm_message_t state)
770
{
771
	ktime_t starttime = ktime_get();
772 773
	int error = 0;

774
	suspend_device_irqs();
775
	mutex_lock(&dpm_list_mtx);
776 777
	while (!list_empty(&dpm_suspended_list)) {
		struct device *dev = to_device(dpm_suspended_list.prev);
778 779 780 781

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

782
		error = device_suspend_noirq(dev, state);
783 784

		mutex_lock(&dpm_list_mtx);
785
		if (error) {
786
			pm_dev_err(dev, state, " late", error);
787
			put_device(dev);
788 789
			break;
		}
790
		if (!list_empty(&dev->power.entry))
791
			list_move(&dev->power.entry, &dpm_noirq_list);
792
		put_device(dev);
793
	}
794
	mutex_unlock(&dpm_list_mtx);
795
	if (error)
796
		dpm_resume_noirq(resume_event(state));
797 798
	else
		dpm_show_time(starttime, state, "late");
799 800
	return error;
}
801
EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
802

803 804
/**
 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
R
Randy Dunlap 已提交
805 806 807
 * @dev: Device to suspend.
 * @state: PM transition of the system being carried out.
 * @cb: Suspend callback to execute.
808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824
 */
static int legacy_suspend(struct device *dev, pm_message_t state,
			  int (*cb)(struct device *dev, pm_message_t state))
{
	int error;
	ktime_t calltime;

	calltime = initcall_debug_start(dev);

	error = cb(dev, state);
	suspend_report_result(cb, error);

	initcall_debug_report(dev, calltime, error);

	return error;
}

825
/**
826 827 828
 * device_suspend - Execute "suspend" callbacks for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
829
 * @async: If true, the device is being suspended asynchronously.
830
 */
831
static int __device_suspend(struct device *dev, pm_message_t state, bool async)
832 833 834
{
	int error = 0;

835
	dpm_wait_for_children(dev, async);
836
	device_lock(dev);
837

838 839 840
	if (async_error)
		goto End;

841 842 843 844 845
	if (pm_wakeup_pending()) {
		async_error = -EBUSY;
		goto End;
	}

846 847 848 849 850 851
	if (dev->class) {
		if (dev->class->pm) {
			pm_dev_dbg(dev, state, "class ");
			error = pm_op(dev, dev->class->pm, state);
		} else if (dev->class->suspend) {
			pm_dev_dbg(dev, state, "legacy class ");
852
			error = legacy_suspend(dev, state, dev->class->suspend);
853 854 855
		}
		if (error)
			goto End;
856 857
	}

858 859 860 861 862 863 864
	if (dev->type) {
		if (dev->type->pm) {
			pm_dev_dbg(dev, state, "type ");
			error = pm_op(dev, dev->type->pm, state);
		}
		if (error)
			goto End;
865 866
	}

867 868 869
	if (dev->bus) {
		if (dev->bus->pm) {
			pm_dev_dbg(dev, state, "");
870
			error = pm_op(dev, dev->bus->pm, state);
871 872
		} else if (dev->bus->suspend) {
			pm_dev_dbg(dev, state, "legacy ");
873
			error = legacy_suspend(dev, state, dev->bus->suspend);
874
		}
875
	}
876

877
 End:
878
	device_unlock(dev);
879
	complete_all(&dev->power.completion);
880

881 882 883
	if (error)
		async_error = error;

884 885 886
	return error;
}

887 888 889 890 891 892
static void async_suspend(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = __device_suspend(dev, pm_transition, true);
893
	if (error)
894 895 896 897 898 899 900 901 902
		pm_dev_err(dev, pm_transition, " async", error);

	put_device(dev);
}

static int device_suspend(struct device *dev)
{
	INIT_COMPLETION(dev->power.completion);

903
	if (pm_async_enabled && dev->power.async_suspend) {
904 905 906 907 908 909 910 911
		get_device(dev);
		async_schedule(async_suspend, dev);
		return 0;
	}

	return __device_suspend(dev, pm_transition, false);
}

912
/**
913 914
 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
 * @state: PM transition of the system being carried out.
915
 */
916
static int dpm_suspend(pm_message_t state)
917
{
918
	ktime_t starttime = ktime_get();
919 920 921
	int error = 0;

	mutex_lock(&dpm_list_mtx);
922 923
	pm_transition = state;
	async_error = 0;
924 925
	while (!list_empty(&dpm_prepared_list)) {
		struct device *dev = to_device(dpm_prepared_list.prev);
926

927
		get_device(dev);
928
		mutex_unlock(&dpm_list_mtx);
929

930
		error = device_suspend(dev);
931

932
		mutex_lock(&dpm_list_mtx);
933
		if (error) {
934 935
			pm_dev_err(dev, state, "", error);
			put_device(dev);
936 937
			break;
		}
938
		if (!list_empty(&dev->power.entry))
939
			list_move(&dev->power.entry, &dpm_suspended_list);
940
		put_device(dev);
941 942
		if (async_error)
			break;
943 944
	}
	mutex_unlock(&dpm_list_mtx);
945 946 947
	async_synchronize_full();
	if (!error)
		error = async_error;
948 949
	if (!error)
		dpm_show_time(starttime, state, NULL);
950 951 952 953
	return error;
}

/**
954 955 956 957 958 959
 * device_prepare - Prepare a device for system power transition.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
 *
 * Execute the ->prepare() callback(s) for given device.  No new children of the
 * device may be registered after this function has returned.
960
 */
961
static int device_prepare(struct device *dev, pm_message_t state)
962 963 964
{
	int error = 0;

965
	device_lock(dev);
966

967
	if (dev->bus && dev->bus->pm && dev->bus->pm->prepare) {
968
		pm_dev_dbg(dev, state, "preparing ");
969 970
		error = dev->bus->pm->prepare(dev);
		suspend_report_result(dev->bus->pm->prepare, error);
971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988
		if (error)
			goto End;
	}

	if (dev->type && dev->type->pm && dev->type->pm->prepare) {
		pm_dev_dbg(dev, state, "preparing type ");
		error = dev->type->pm->prepare(dev);
		suspend_report_result(dev->type->pm->prepare, error);
		if (error)
			goto End;
	}

	if (dev->class && dev->class->pm && dev->class->pm->prepare) {
		pm_dev_dbg(dev, state, "preparing class ");
		error = dev->class->pm->prepare(dev);
		suspend_report_result(dev->class->pm->prepare, error);
	}
 End:
989
	device_unlock(dev);
990 991 992

	return error;
}
993

994
/**
995 996
 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
 * @state: PM transition of the system being carried out.
997
 *
998
 * Execute the ->prepare() callback(s) for all devices.
999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010
 */
static int dpm_prepare(pm_message_t state)
{
	int error = 0;

	mutex_lock(&dpm_list_mtx);
	while (!list_empty(&dpm_list)) {
		struct device *dev = to_device(dpm_list.next);

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

1011
		pm_runtime_get_noresume(dev);
1012 1013 1014
		if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
			pm_wakeup_event(dev, 0);

1015
		if (pm_wakeup_pending()) {
1016
			pm_runtime_put_sync(dev);
1017 1018 1019 1020
			error = -EBUSY;
		} else {
			error = device_prepare(dev, state);
		}
1021 1022 1023 1024 1025

		mutex_lock(&dpm_list_mtx);
		if (error) {
			if (error == -EAGAIN) {
				put_device(dev);
S
Sebastian Ott 已提交
1026
				error = 0;
1027 1028
				continue;
			}
1029 1030
			printk(KERN_INFO "PM: Device %s not prepared "
				"for power transition: code %d\n",
1031 1032 1033 1034
				kobject_name(&dev->kobj), error);
			put_device(dev);
			break;
		}
1035
		dev->power.in_suspend = true;
1036
		if (!list_empty(&dev->power.entry))
1037
			list_move_tail(&dev->power.entry, &dpm_prepared_list);
1038 1039 1040
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
1041 1042 1043
	return error;
}

1044
/**
1045 1046
 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
 * @state: PM transition of the system being carried out.
1047
 *
1048 1049
 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
 * callbacks for them.
1050
 */
1051
int dpm_suspend_start(pm_message_t state)
1052 1053
{
	int error;
1054

1055
	might_sleep();
1056 1057 1058
	error = dpm_prepare(state);
	if (!error)
		error = dpm_suspend(state);
1059 1060
	return error;
}
1061
EXPORT_SYMBOL_GPL(dpm_suspend_start);
1062 1063 1064

void __suspend_report_result(const char *function, void *fn, int ret)
{
1065 1066
	if (ret)
		printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1067 1068
}
EXPORT_SYMBOL_GPL(__suspend_report_result);
1069 1070 1071 1072 1073 1074

/**
 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
 * @dev: Device to wait for.
 * @subordinate: Device that needs to wait for @dev.
 */
1075
int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1076 1077
{
	dpm_wait(dev, subordinate->power.async_suspend);
1078
	return async_error;
1079 1080
}
EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);