main.c 27.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 * drivers/base/power/main.c - Where the driver meets power management.
 *
 * Copyright (c) 2003 Patrick Mochel
 * Copyright (c) 2003 Open Source Development Lab
 *
 * This file is released under the GPLv2
 *
 *
 * The driver model core calls device_pm_add() when a device is registered.
 * This will intialize the embedded device_pm_info object in the device
 * and add it to the list of power-controlled devices. sysfs entries for
 * controlling device power management will also be added.
 *
15 16 17
 * A separate list is used for keeping track of power info, because the power
 * domain dependencies may differ from the ancestral dependencies that the
 * subsystem list maintains.
L
Linus Torvalds 已提交
18 19 20
 */

#include <linux/device.h>
21
#include <linux/kallsyms.h>
22
#include <linux/mutex.h>
23
#include <linux/pm.h>
24
#include <linux/pm_runtime.h>
25
#include <linux/resume-trace.h>
26
#include <linux/interrupt.h>
27
#include <linux/sched.h>
28
#include <linux/async.h>
29
#include <linux/suspend.h>
30

31
#include "../base.h"
L
Linus Torvalds 已提交
32 33
#include "power.h"

34
/*
35
 * The entries in the dpm_list list are in a depth first order, simply
36 37 38
 * because children are guaranteed to be discovered after parents, and
 * are inserted at the back of the list on discovery.
 *
39 40
 * Since device_pm_add() may be called with a device lock held,
 * we must never try to acquire a device lock while holding
41 42 43
 * dpm_list_mutex.
 */

44
LIST_HEAD(dpm_list);
L
Linus Torvalds 已提交
45

46
static DEFINE_MUTEX(dpm_list_mtx);
47
static pm_message_t pm_transition;
L
Linus Torvalds 已提交
48

49 50 51 52 53 54
/*
 * Set once the preparation of devices for a PM transition has started, reset
 * before starting to resume devices.  Protected by dpm_list_mtx.
 */
static bool transition_started;

55 56
static int async_error;

57
/**
58
 * device_pm_init - Initialize the PM-related part of a device object.
59 60 61 62 63
 * @dev: Device object being initialized.
 */
void device_pm_init(struct device *dev)
{
	dev->power.status = DPM_ON;
64
	init_completion(&dev->power.completion);
65
	complete_all(&dev->power.completion);
66 67
	dev->power.wakeup = NULL;
	spin_lock_init(&dev->power.lock);
68 69 70
	pm_runtime_init(dev);
}

71
/**
72
 * device_pm_lock - Lock the list of active devices used by the PM core.
73 74 75 76 77 78 79
 */
void device_pm_lock(void)
{
	mutex_lock(&dpm_list_mtx);
}

/**
80
 * device_pm_unlock - Unlock the list of active devices used by the PM core.
81 82 83 84 85
 */
void device_pm_unlock(void)
{
	mutex_unlock(&dpm_list_mtx);
}
86

87
/**
88 89
 * device_pm_add - Add a device to the PM core's list of active devices.
 * @dev: Device to add to the list.
90
 */
91
void device_pm_add(struct device *dev)
L
Linus Torvalds 已提交
92 93
{
	pr_debug("PM: Adding info for %s:%s\n",
94 95
		 dev->bus ? dev->bus->name : "No Bus",
		 kobject_name(&dev->kobj));
96
	mutex_lock(&dpm_list_mtx);
97
	if (dev->parent) {
98 99
		if (dev->parent->power.status >= DPM_SUSPENDING)
			dev_warn(dev, "parent %s should not be sleeping\n",
100
				 dev_name(dev->parent));
101 102 103 104 105 106
	} else if (transition_started) {
		/*
		 * We refuse to register parentless devices while a PM
		 * transition is in progress in order to avoid leaving them
		 * unhandled down the road
		 */
107
		dev_WARN(dev, "Parentless device registered during a PM transaction\n");
108
	}
109 110

	list_add_tail(&dev->power.entry, &dpm_list);
111
	mutex_unlock(&dpm_list_mtx);
L
Linus Torvalds 已提交
112 113
}

114
/**
115 116
 * device_pm_remove - Remove a device from the PM core's list of active devices.
 * @dev: Device to be removed from the list.
117
 */
118
void device_pm_remove(struct device *dev)
L
Linus Torvalds 已提交
119 120
{
	pr_debug("PM: Removing info for %s:%s\n",
121 122
		 dev->bus ? dev->bus->name : "No Bus",
		 kobject_name(&dev->kobj));
123
	complete_all(&dev->power.completion);
124
	mutex_lock(&dpm_list_mtx);
L
Linus Torvalds 已提交
125
	list_del_init(&dev->power.entry);
126
	mutex_unlock(&dpm_list_mtx);
127
	device_wakeup_disable(dev);
128
	pm_runtime_remove(dev);
129 130
}

131
/**
132 133 134
 * device_pm_move_before - Move device in the PM core's list of active devices.
 * @deva: Device to move in dpm_list.
 * @devb: Device @deva should come before.
135 136 137 138 139 140 141 142 143 144 145 146 147
 */
void device_pm_move_before(struct device *deva, struct device *devb)
{
	pr_debug("PM: Moving %s:%s before %s:%s\n",
		 deva->bus ? deva->bus->name : "No Bus",
		 kobject_name(&deva->kobj),
		 devb->bus ? devb->bus->name : "No Bus",
		 kobject_name(&devb->kobj));
	/* Delete deva from dpm_list and reinsert before devb. */
	list_move_tail(&deva->power.entry, &devb->power.entry);
}

/**
148 149 150
 * device_pm_move_after - Move device in the PM core's list of active devices.
 * @deva: Device to move in dpm_list.
 * @devb: Device @deva should come after.
151 152 153 154 155 156 157 158 159 160 161 162 163
 */
void device_pm_move_after(struct device *deva, struct device *devb)
{
	pr_debug("PM: Moving %s:%s after %s:%s\n",
		 deva->bus ? deva->bus->name : "No Bus",
		 kobject_name(&deva->kobj),
		 devb->bus ? devb->bus->name : "No Bus",
		 kobject_name(&devb->kobj));
	/* Delete deva from dpm_list and reinsert after devb. */
	list_move(&deva->power.entry, &devb->power.entry);
}

/**
164 165
 * device_pm_move_last - Move device to end of the PM core's list of devices.
 * @dev: Device to move in dpm_list.
166 167 168 169 170 171 172 173 174
 */
void device_pm_move_last(struct device *dev)
{
	pr_debug("PM: Moving %s:%s to end of list\n",
		 dev->bus ? dev->bus->name : "No Bus",
		 kobject_name(&dev->kobj));
	list_move_tail(&dev->power.entry, &dpm_list);
}

175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200
static ktime_t initcall_debug_start(struct device *dev)
{
	ktime_t calltime = ktime_set(0, 0);

	if (initcall_debug) {
		pr_info("calling  %s+ @ %i\n",
				dev_name(dev), task_pid_nr(current));
		calltime = ktime_get();
	}

	return calltime;
}

static void initcall_debug_report(struct device *dev, ktime_t calltime,
				  int error)
{
	ktime_t delta, rettime;

	if (initcall_debug) {
		rettime = ktime_get();
		delta = ktime_sub(rettime, calltime);
		pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
			error, (unsigned long long)ktime_to_ns(delta) >> 10);
	}
}

201 202 203 204 205 206 207 208 209 210
/**
 * dpm_wait - Wait for a PM operation to complete.
 * @dev: Device to wait for.
 * @async: If unset, wait only if the device's power.async_suspend flag is set.
 */
static void dpm_wait(struct device *dev, bool async)
{
	if (!dev)
		return;

211
	if (async || (pm_async_enabled && dev->power.async_suspend))
212 213 214 215 216 217 218 219 220 221 222 223 224 225
		wait_for_completion(&dev->power.completion);
}

static int dpm_wait_fn(struct device *dev, void *async_ptr)
{
	dpm_wait(dev, *((bool *)async_ptr));
	return 0;
}

static void dpm_wait_for_children(struct device *dev, bool async)
{
       device_for_each_child(dev, &async, dpm_wait_fn);
}

226
/**
227 228 229 230
 * pm_op - Execute the PM operation appropriate for given PM event.
 * @dev: Device to handle.
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
231
 */
232 233 234
static int pm_op(struct device *dev,
		 const struct dev_pm_ops *ops,
		 pm_message_t state)
235 236
{
	int error = 0;
237
	ktime_t calltime;
238

239
	calltime = initcall_debug_start(dev);
240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286

	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
		if (ops->suspend) {
			error = ops->suspend(dev);
			suspend_report_result(ops->suspend, error);
		}
		break;
	case PM_EVENT_RESUME:
		if (ops->resume) {
			error = ops->resume(dev);
			suspend_report_result(ops->resume, error);
		}
		break;
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATION
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		if (ops->freeze) {
			error = ops->freeze(dev);
			suspend_report_result(ops->freeze, error);
		}
		break;
	case PM_EVENT_HIBERNATE:
		if (ops->poweroff) {
			error = ops->poweroff(dev);
			suspend_report_result(ops->poweroff, error);
		}
		break;
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
		if (ops->thaw) {
			error = ops->thaw(dev);
			suspend_report_result(ops->thaw, error);
		}
		break;
	case PM_EVENT_RESTORE:
		if (ops->restore) {
			error = ops->restore(dev);
			suspend_report_result(ops->restore, error);
		}
		break;
#endif /* CONFIG_HIBERNATION */
	default:
		error = -EINVAL;
	}
287

288
	initcall_debug_report(dev, calltime, error);
289

290 291 292 293
	return error;
}

/**
294 295 296 297
 * pm_noirq_op - Execute the PM operation appropriate for given PM event.
 * @dev: Device to handle.
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
298
 *
299 300
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
301
 */
302 303
static int pm_noirq_op(struct device *dev,
			const struct dev_pm_ops *ops,
304 305 306
			pm_message_t state)
{
	int error = 0;
307 308 309
	ktime_t calltime, delta, rettime;

	if (initcall_debug) {
310 311 312
		pr_info("calling  %s+ @ %i, parent: %s\n",
				dev_name(dev), task_pid_nr(current),
				dev->parent ? dev_name(dev->parent) : "none");
313 314
		calltime = ktime_get();
	}
315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361

	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
		if (ops->suspend_noirq) {
			error = ops->suspend_noirq(dev);
			suspend_report_result(ops->suspend_noirq, error);
		}
		break;
	case PM_EVENT_RESUME:
		if (ops->resume_noirq) {
			error = ops->resume_noirq(dev);
			suspend_report_result(ops->resume_noirq, error);
		}
		break;
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATION
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		if (ops->freeze_noirq) {
			error = ops->freeze_noirq(dev);
			suspend_report_result(ops->freeze_noirq, error);
		}
		break;
	case PM_EVENT_HIBERNATE:
		if (ops->poweroff_noirq) {
			error = ops->poweroff_noirq(dev);
			suspend_report_result(ops->poweroff_noirq, error);
		}
		break;
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
		if (ops->thaw_noirq) {
			error = ops->thaw_noirq(dev);
			suspend_report_result(ops->thaw_noirq, error);
		}
		break;
	case PM_EVENT_RESTORE:
		if (ops->restore_noirq) {
			error = ops->restore_noirq(dev);
			suspend_report_result(ops->restore_noirq, error);
		}
		break;
#endif /* CONFIG_HIBERNATION */
	default:
		error = -EINVAL;
	}
362 363 364 365

	if (initcall_debug) {
		rettime = ktime_get();
		delta = ktime_sub(rettime, calltime);
366 367 368
		printk("initcall %s_i+ returned %d after %Ld usecs\n",
			dev_name(dev), error,
			(unsigned long long)ktime_to_ns(delta) >> 10);
369 370
	}

371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411
	return error;
}

static char *pm_verb(int event)
{
	switch (event) {
	case PM_EVENT_SUSPEND:
		return "suspend";
	case PM_EVENT_RESUME:
		return "resume";
	case PM_EVENT_FREEZE:
		return "freeze";
	case PM_EVENT_QUIESCE:
		return "quiesce";
	case PM_EVENT_HIBERNATE:
		return "hibernate";
	case PM_EVENT_THAW:
		return "thaw";
	case PM_EVENT_RESTORE:
		return "restore";
	case PM_EVENT_RECOVER:
		return "recover";
	default:
		return "(unknown PM event)";
	}
}

static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
{
	dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
		", may wakeup" : "");
}

static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
			int error)
{
	printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
		kobject_name(&dev->kobj), pm_verb(state.event), info, error);
}

412 413 414
static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
{
	ktime_t calltime;
415
	u64 usecs64;
416 417 418 419 420 421 422 423 424 425 426 427 428
	int usecs;

	calltime = ktime_get();
	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
	do_div(usecs64, NSEC_PER_USEC);
	usecs = usecs64;
	if (usecs == 0)
		usecs = 1;
	pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
		info ?: "", info ? " " : "", pm_verb(state.event),
		usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
}

429 430 431
/*------------------------- Resume routines -------------------------*/

/**
432 433 434
 * device_resume_noirq - Execute an "early resume" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
435
 *
436 437
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
438
 */
439
static int device_resume_noirq(struct device *dev, pm_message_t state)
440 441 442 443 444 445
{
	int error = 0;

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);

446
	if (dev->bus && dev->bus->pm) {
447 448
		pm_dev_dbg(dev, state, "EARLY ");
		error = pm_noirq_op(dev, dev->bus->pm, state);
449 450
		if (error)
			goto End;
451
	}
452

453 454 455 456 457 458 459 460 461 462 463 464 465
	if (dev->type && dev->type->pm) {
		pm_dev_dbg(dev, state, "EARLY type ");
		error = pm_noirq_op(dev, dev->type->pm, state);
		if (error)
			goto End;
	}

	if (dev->class && dev->class->pm) {
		pm_dev_dbg(dev, state, "EARLY class ");
		error = pm_noirq_op(dev, dev->class->pm, state);
	}

End:
466 467 468 469 470
	TRACE_RESUME(error);
	return error;
}

/**
471 472
 * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices.
 * @state: PM transition of the system being carried out.
473
 *
474 475
 * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and
 * enable device drivers to receive interrupts.
476
 */
477
void dpm_resume_noirq(pm_message_t state)
478
{
479
	struct list_head list;
480
	ktime_t starttime = ktime_get();
481

482
	INIT_LIST_HEAD(&list);
483
	mutex_lock(&dpm_list_mtx);
484
	transition_started = false;
485 486 487 488
	while (!list_empty(&dpm_list)) {
		struct device *dev = to_device(dpm_list.next);

		get_device(dev);
489 490
		if (dev->power.status > DPM_OFF) {
			int error;
491

492
			dev->power.status = DPM_OFF;
493 494
			mutex_unlock(&dpm_list_mtx);

495
			error = device_resume_noirq(dev, state);
496 497

			mutex_lock(&dpm_list_mtx);
498 499 500
			if (error)
				pm_dev_err(dev, state, " early", error);
		}
501 502 503 504 505
		if (!list_empty(&dev->power.entry))
			list_move_tail(&dev->power.entry, &list);
		put_device(dev);
	}
	list_splice(&list, &dpm_list);
506
	mutex_unlock(&dpm_list_mtx);
507
	dpm_show_time(starttime, state, "early");
508
	resume_device_irqs();
509
}
510
EXPORT_SYMBOL_GPL(dpm_resume_noirq);
511

512 513
/**
 * legacy_resume - Execute a legacy (bus or class) resume callback for device.
R
Randy Dunlap 已提交
514 515
 * @dev: Device to resume.
 * @cb: Resume callback to execute.
516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531
 */
static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
{
	int error;
	ktime_t calltime;

	calltime = initcall_debug_start(dev);

	error = cb(dev);
	suspend_report_result(cb, error);

	initcall_debug_report(dev, calltime, error);

	return error;
}

532
/**
533
 * device_resume - Execute "resume" callbacks for given device.
534 535
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
536
 * @async: If true, the device is being resumed asynchronously.
537
 */
538
static int device_resume(struct device *dev, pm_message_t state, bool async)
539 540 541 542 543
{
	int error = 0;

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);
544

545
	dpm_wait(dev->parent, async);
546
	device_lock(dev);
547

548 549
	dev->power.status = DPM_RESUMING;

550 551 552
	if (dev->bus) {
		if (dev->bus->pm) {
			pm_dev_dbg(dev, state, "");
553
			error = pm_op(dev, dev->bus->pm, state);
554 555
		} else if (dev->bus->resume) {
			pm_dev_dbg(dev, state, "legacy ");
556
			error = legacy_resume(dev, dev->bus->resume);
557 558 559
		}
		if (error)
			goto End;
560 561
	}

562 563 564 565 566 567 568
	if (dev->type) {
		if (dev->type->pm) {
			pm_dev_dbg(dev, state, "type ");
			error = pm_op(dev, dev->type->pm, state);
		}
		if (error)
			goto End;
569 570
	}

571 572 573 574 575 576
	if (dev->class) {
		if (dev->class->pm) {
			pm_dev_dbg(dev, state, "class ");
			error = pm_op(dev, dev->class->pm, state);
		} else if (dev->class->resume) {
			pm_dev_dbg(dev, state, "legacy class ");
577
			error = legacy_resume(dev, dev->class->resume);
578
		}
579
	}
580
 End:
581
	device_unlock(dev);
582
	complete_all(&dev->power.completion);
583

584 585 586 587
	TRACE_RESUME(error);
	return error;
}

588 589 590 591 592
static void async_resume(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

593
	error = device_resume(dev, pm_transition, true);
594 595 596 597 598
	if (error)
		pm_dev_err(dev, pm_transition, " async", error);
	put_device(dev);
}

599
static bool is_async(struct device *dev)
600
{
601 602
	return dev->power.async_suspend && pm_async_enabled
		&& !pm_trace_is_enabled();
603 604
}

605
/**
606 607
 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
 * @state: PM transition of the system being carried out.
608
 *
609 610
 * Execute the appropriate "resume" callback for all devices whose status
 * indicates that they are suspended.
611 612 613 614
 */
static void dpm_resume(pm_message_t state)
{
	struct list_head list;
615
	struct device *dev;
616
	ktime_t starttime = ktime_get();
617 618 619

	INIT_LIST_HEAD(&list);
	mutex_lock(&dpm_list_mtx);
620
	pm_transition = state;
621
	async_error = 0;
622

623 624 625 626 627 628 629 630 631 632 633 634 635
	list_for_each_entry(dev, &dpm_list, power.entry) {
		if (dev->power.status < DPM_OFF)
			continue;

		INIT_COMPLETION(dev->power.completion);
		if (is_async(dev)) {
			get_device(dev);
			async_schedule(async_resume, dev);
		}
	}

	while (!list_empty(&dpm_list)) {
		dev = to_device(dpm_list.next);
636
		get_device(dev);
637
		if (dev->power.status >= DPM_OFF && !is_async(dev)) {
638 639 640 641
			int error;

			mutex_unlock(&dpm_list_mtx);

642
			error = device_resume(dev, state, false);
643 644 645 646 647 648 649 650 651 652 653 654 655 656

			mutex_lock(&dpm_list_mtx);
			if (error)
				pm_dev_err(dev, state, "", error);
		} else if (dev->power.status == DPM_SUSPENDING) {
			/* Allow new children of the device to be registered */
			dev->power.status = DPM_RESUMING;
		}
		if (!list_empty(&dev->power.entry))
			list_move_tail(&dev->power.entry, &list);
		put_device(dev);
	}
	list_splice(&list, &dpm_list);
	mutex_unlock(&dpm_list_mtx);
657
	async_synchronize_full();
658
	dpm_show_time(starttime, state, NULL);
659 660 661
}

/**
662 663 664
 * device_complete - Complete a PM transition for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
665
 */
666
static void device_complete(struct device *dev, pm_message_t state)
667
{
668
	device_lock(dev);
669 670 671 672 673 674 675 676 677 678 679

	if (dev->class && dev->class->pm && dev->class->pm->complete) {
		pm_dev_dbg(dev, state, "completing class ");
		dev->class->pm->complete(dev);
	}

	if (dev->type && dev->type->pm && dev->type->pm->complete) {
		pm_dev_dbg(dev, state, "completing type ");
		dev->type->pm->complete(dev);
	}

680
	if (dev->bus && dev->bus->pm && dev->bus->pm->complete) {
681
		pm_dev_dbg(dev, state, "completing ");
682
		dev->bus->pm->complete(dev);
683 684
	}

685
	device_unlock(dev);
686 687 688
}

/**
689 690
 * dpm_complete - Complete a PM transition for all non-sysdev devices.
 * @state: PM transition of the system being carried out.
691
 *
692 693
 * Execute the ->complete() callbacks for all devices whose PM status is not
 * DPM_ON (this allows new devices to be registered).
694
 */
695
static void dpm_complete(pm_message_t state)
696
{
697 698 699
	struct list_head list;

	INIT_LIST_HEAD(&list);
700
	mutex_lock(&dpm_list_mtx);
R
Romit Dasgupta 已提交
701
	transition_started = false;
702 703
	while (!list_empty(&dpm_list)) {
		struct device *dev = to_device(dpm_list.prev);
704

705 706 707 708 709
		get_device(dev);
		if (dev->power.status > DPM_ON) {
			dev->power.status = DPM_ON;
			mutex_unlock(&dpm_list_mtx);

710
			device_complete(dev, state);
711
			pm_runtime_put_sync(dev);
712 713 714 715 716 717

			mutex_lock(&dpm_list_mtx);
		}
		if (!list_empty(&dev->power.entry))
			list_move(&dev->power.entry, &list);
		put_device(dev);
718
	}
719
	list_splice(&list, &dpm_list);
720 721 722 723
	mutex_unlock(&dpm_list_mtx);
}

/**
724 725
 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
 * @state: PM transition of the system being carried out.
726
 *
727 728
 * Execute "resume" callbacks for all devices and complete the PM transition of
 * the system.
729
 */
730
void dpm_resume_end(pm_message_t state)
731
{
732
	might_sleep();
733 734
	dpm_resume(state);
	dpm_complete(state);
735
}
736
EXPORT_SYMBOL_GPL(dpm_resume_end);
737 738 739 740


/*------------------------- Suspend routines -------------------------*/

741
/**
742 743 744 745 746
 * resume_event - Return a "resume" message for given "suspend" sleep state.
 * @sleep_state: PM message representing a sleep state.
 *
 * Return a PM message representing the resume event corresponding to given
 * sleep state.
747 748
 */
static pm_message_t resume_event(pm_message_t sleep_state)
749
{
750 751 752 753 754 755 756 757
	switch (sleep_state.event) {
	case PM_EVENT_SUSPEND:
		return PMSG_RESUME;
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		return PMSG_RECOVER;
	case PM_EVENT_HIBERNATE:
		return PMSG_RESTORE;
758
	}
759
	return PMSG_ON;
760 761 762
}

/**
763 764 765
 * device_suspend_noirq - Execute a "late suspend" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
766
 *
767 768
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
769
 */
770
static int device_suspend_noirq(struct device *dev, pm_message_t state)
771 772
{
	int error = 0;
773

774 775 776 777 778 779 780 781 782 783 784 785 786 787
	if (dev->class && dev->class->pm) {
		pm_dev_dbg(dev, state, "LATE class ");
		error = pm_noirq_op(dev, dev->class->pm, state);
		if (error)
			goto End;
	}

	if (dev->type && dev->type->pm) {
		pm_dev_dbg(dev, state, "LATE type ");
		error = pm_noirq_op(dev, dev->type->pm, state);
		if (error)
			goto End;
	}

788
	if (dev->bus && dev->bus->pm) {
789 790
		pm_dev_dbg(dev, state, "LATE ");
		error = pm_noirq_op(dev, dev->bus->pm, state);
791
	}
792 793

End:
794 795 796 797
	return error;
}

/**
798 799
 * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices.
 * @state: PM transition of the system being carried out.
800
 *
801 802
 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
 * handlers for all non-sysdev devices.
803
 */
804
int dpm_suspend_noirq(pm_message_t state)
805
{
806
	struct list_head list;
807
	ktime_t starttime = ktime_get();
808 809
	int error = 0;

810
	INIT_LIST_HEAD(&list);
811
	suspend_device_irqs();
812
	mutex_lock(&dpm_list_mtx);
813 814 815 816 817 818
	while (!list_empty(&dpm_list)) {
		struct device *dev = to_device(dpm_list.prev);

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

819
		error = device_suspend_noirq(dev, state);
820 821

		mutex_lock(&dpm_list_mtx);
822
		if (error) {
823
			pm_dev_err(dev, state, " late", error);
824
			put_device(dev);
825 826
			break;
		}
827
		dev->power.status = DPM_OFF_IRQ;
828 829 830
		if (!list_empty(&dev->power.entry))
			list_move(&dev->power.entry, &list);
		put_device(dev);
831
	}
832
	list_splice_tail(&list, &dpm_list);
833
	mutex_unlock(&dpm_list_mtx);
834
	if (error)
835
		dpm_resume_noirq(resume_event(state));
836 837
	else
		dpm_show_time(starttime, state, "late");
838 839
	return error;
}
840
EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
841

842 843
/**
 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
R
Randy Dunlap 已提交
844 845 846
 * @dev: Device to suspend.
 * @state: PM transition of the system being carried out.
 * @cb: Suspend callback to execute.
847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863
 */
static int legacy_suspend(struct device *dev, pm_message_t state,
			  int (*cb)(struct device *dev, pm_message_t state))
{
	int error;
	ktime_t calltime;

	calltime = initcall_debug_start(dev);

	error = cb(dev, state);
	suspend_report_result(cb, error);

	initcall_debug_report(dev, calltime, error);

	return error;
}

864
/**
865 866 867
 * device_suspend - Execute "suspend" callbacks for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
868
 * @async: If true, the device is being suspended asynchronously.
869
 */
870
static int __device_suspend(struct device *dev, pm_message_t state, bool async)
871 872 873
{
	int error = 0;

874
	dpm_wait_for_children(dev, async);
875
	device_lock(dev);
876

877 878 879
	if (async_error)
		goto End;

880 881 882 883 884 885
	if (dev->class) {
		if (dev->class->pm) {
			pm_dev_dbg(dev, state, "class ");
			error = pm_op(dev, dev->class->pm, state);
		} else if (dev->class->suspend) {
			pm_dev_dbg(dev, state, "legacy class ");
886
			error = legacy_suspend(dev, state, dev->class->suspend);
887 888 889
		}
		if (error)
			goto End;
890 891
	}

892 893 894 895 896 897 898
	if (dev->type) {
		if (dev->type->pm) {
			pm_dev_dbg(dev, state, "type ");
			error = pm_op(dev, dev->type->pm, state);
		}
		if (error)
			goto End;
899 900
	}

901 902 903
	if (dev->bus) {
		if (dev->bus->pm) {
			pm_dev_dbg(dev, state, "");
904
			error = pm_op(dev, dev->bus->pm, state);
905 906
		} else if (dev->bus->suspend) {
			pm_dev_dbg(dev, state, "legacy ");
907
			error = legacy_suspend(dev, state, dev->bus->suspend);
908
		}
909
	}
910 911 912 913

	if (!error)
		dev->power.status = DPM_OFF;

914
 End:
915
	device_unlock(dev);
916
	complete_all(&dev->power.completion);
917

918 919 920
	if (error)
		async_error = error;

921 922 923
	return error;
}

924 925 926 927 928 929
static void async_suspend(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = __device_suspend(dev, pm_transition, true);
930
	if (error)
931 932 933 934 935 936 937 938 939
		pm_dev_err(dev, pm_transition, " async", error);

	put_device(dev);
}

static int device_suspend(struct device *dev)
{
	INIT_COMPLETION(dev->power.completion);

940
	if (pm_async_enabled && dev->power.async_suspend) {
941 942 943 944 945 946 947 948
		get_device(dev);
		async_schedule(async_suspend, dev);
		return 0;
	}

	return __device_suspend(dev, pm_transition, false);
}

949
/**
950 951
 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
 * @state: PM transition of the system being carried out.
952
 */
953
static int dpm_suspend(pm_message_t state)
954
{
955
	struct list_head list;
956
	ktime_t starttime = ktime_get();
957 958
	int error = 0;

959
	INIT_LIST_HEAD(&list);
960
	mutex_lock(&dpm_list_mtx);
961 962
	pm_transition = state;
	async_error = 0;
963 964
	while (!list_empty(&dpm_list)) {
		struct device *dev = to_device(dpm_list.prev);
965

966
		get_device(dev);
967
		mutex_unlock(&dpm_list_mtx);
968

969
		error = device_suspend(dev);
970

971
		mutex_lock(&dpm_list_mtx);
972
		if (error) {
973 974
			pm_dev_err(dev, state, "", error);
			put_device(dev);
975 976
			break;
		}
977
		if (!list_empty(&dev->power.entry))
978 979
			list_move(&dev->power.entry, &list);
		put_device(dev);
980 981
		if (async_error)
			break;
982
	}
983
	list_splice(&list, dpm_list.prev);
984
	mutex_unlock(&dpm_list_mtx);
985 986 987
	async_synchronize_full();
	if (!error)
		error = async_error;
988 989
	if (!error)
		dpm_show_time(starttime, state, NULL);
990 991 992 993
	return error;
}

/**
994 995 996 997 998 999
 * device_prepare - Prepare a device for system power transition.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
 *
 * Execute the ->prepare() callback(s) for given device.  No new children of the
 * device may be registered after this function has returned.
1000
 */
1001
static int device_prepare(struct device *dev, pm_message_t state)
1002 1003 1004
{
	int error = 0;

1005
	device_lock(dev);
1006

1007
	if (dev->bus && dev->bus->pm && dev->bus->pm->prepare) {
1008
		pm_dev_dbg(dev, state, "preparing ");
1009 1010
		error = dev->bus->pm->prepare(dev);
		suspend_report_result(dev->bus->pm->prepare, error);
1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028
		if (error)
			goto End;
	}

	if (dev->type && dev->type->pm && dev->type->pm->prepare) {
		pm_dev_dbg(dev, state, "preparing type ");
		error = dev->type->pm->prepare(dev);
		suspend_report_result(dev->type->pm->prepare, error);
		if (error)
			goto End;
	}

	if (dev->class && dev->class->pm && dev->class->pm->prepare) {
		pm_dev_dbg(dev, state, "preparing class ");
		error = dev->class->pm->prepare(dev);
		suspend_report_result(dev->class->pm->prepare, error);
	}
 End:
1029
	device_unlock(dev);
1030 1031 1032

	return error;
}
1033

1034
/**
1035 1036
 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
 * @state: PM transition of the system being carried out.
1037
 *
1038
 * Execute the ->prepare() callback(s) for all devices.
1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054
 */
static int dpm_prepare(pm_message_t state)
{
	struct list_head list;
	int error = 0;

	INIT_LIST_HEAD(&list);
	mutex_lock(&dpm_list_mtx);
	transition_started = true;
	while (!list_empty(&dpm_list)) {
		struct device *dev = to_device(dpm_list.next);

		get_device(dev);
		dev->power.status = DPM_PREPARING;
		mutex_unlock(&dpm_list_mtx);

1055
		pm_runtime_get_noresume(dev);
1056 1057 1058 1059
		if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
			pm_wakeup_event(dev, 0);

		if (!pm_check_wakeup_events()) {
1060
			pm_runtime_put_sync(dev);
1061 1062 1063 1064
			error = -EBUSY;
		} else {
			error = device_prepare(dev, state);
		}
1065 1066 1067 1068 1069 1070

		mutex_lock(&dpm_list_mtx);
		if (error) {
			dev->power.status = DPM_ON;
			if (error == -EAGAIN) {
				put_device(dev);
S
Sebastian Ott 已提交
1071
				error = 0;
1072 1073
				continue;
			}
1074 1075
			printk(KERN_INFO "PM: Device %s not prepared "
				"for power transition: code %d\n",
1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086
				kobject_name(&dev->kobj), error);
			put_device(dev);
			break;
		}
		dev->power.status = DPM_SUSPENDING;
		if (!list_empty(&dev->power.entry))
			list_move_tail(&dev->power.entry, &list);
		put_device(dev);
	}
	list_splice(&list, &dpm_list);
	mutex_unlock(&dpm_list_mtx);
1087 1088 1089
	return error;
}

1090
/**
1091 1092
 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
 * @state: PM transition of the system being carried out.
1093
 *
1094 1095
 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
 * callbacks for them.
1096
 */
1097
int dpm_suspend_start(pm_message_t state)
1098 1099
{
	int error;
1100

1101
	might_sleep();
1102 1103 1104
	error = dpm_prepare(state);
	if (!error)
		error = dpm_suspend(state);
1105 1106
	return error;
}
1107
EXPORT_SYMBOL_GPL(dpm_suspend_start);
1108 1109 1110

void __suspend_report_result(const char *function, void *fn, int ret)
{
1111 1112
	if (ret)
		printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1113 1114
}
EXPORT_SYMBOL_GPL(__suspend_report_result);
1115 1116 1117 1118 1119 1120

/**
 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
 * @dev: Device to wait for.
 * @subordinate: Device that needs to wait for @dev.
 */
1121
int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1122 1123
{
	dpm_wait(dev, subordinate->power.async_suspend);
1124
	return async_error;
1125 1126
}
EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);