main.c 33.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/*
 * drivers/base/power/main.c - Where the driver meets power management.
 *
 * Copyright (c) 2003 Patrick Mochel
 * Copyright (c) 2003 Open Source Development Lab
 *
 * This file is released under the GPLv2
 *
 *
 * The driver model core calls device_pm_add() when a device is registered.
11
 * This will initialize the embedded device_pm_info object in the device
L
Linus Torvalds 已提交
12 13 14
 * and add it to the list of power-controlled devices. sysfs entries for
 * controlling device power management will also be added.
 *
15 16 17
 * A separate list is used for keeping track of power info, because the power
 * domain dependencies may differ from the ancestral dependencies that the
 * subsystem list maintains.
L
Linus Torvalds 已提交
18 19 20
 */

#include <linux/device.h>
21
#include <linux/kallsyms.h>
22
#include <linux/export.h>
23
#include <linux/mutex.h>
24
#include <linux/pm.h>
25
#include <linux/pm_runtime.h>
26
#include <linux/resume-trace.h>
27
#include <linux/interrupt.h>
28
#include <linux/sched.h>
29
#include <linux/async.h>
30
#include <linux/suspend.h>
31
#include <trace/events/power.h>
32
#include <linux/cpuidle.h>
33
#include "../base.h"
L
Linus Torvalds 已提交
34 35
#include "power.h"

36 37
typedef int (*pm_callback_t)(struct device *);

38
/*
39
 * The entries in the dpm_list list are in a depth first order, simply
40 41 42
 * because children are guaranteed to be discovered after parents, and
 * are inserted at the back of the list on discovery.
 *
43 44
 * Since device_pm_add() may be called with a device lock held,
 * we must never try to acquire a device lock while holding
45 46 47
 * dpm_list_mutex.
 */

48
LIST_HEAD(dpm_list);
49 50 51 52
static LIST_HEAD(dpm_prepared_list);
static LIST_HEAD(dpm_suspended_list);
static LIST_HEAD(dpm_late_early_list);
static LIST_HEAD(dpm_noirq_list);
L
Linus Torvalds 已提交
53

54
struct suspend_stats suspend_stats;
55
static DEFINE_MUTEX(dpm_list_mtx);
56
static pm_message_t pm_transition;
L
Linus Torvalds 已提交
57

58 59
static int async_error;

60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
static char *pm_verb(int event)
{
	switch (event) {
	case PM_EVENT_SUSPEND:
		return "suspend";
	case PM_EVENT_RESUME:
		return "resume";
	case PM_EVENT_FREEZE:
		return "freeze";
	case PM_EVENT_QUIESCE:
		return "quiesce";
	case PM_EVENT_HIBERNATE:
		return "hibernate";
	case PM_EVENT_THAW:
		return "thaw";
	case PM_EVENT_RESTORE:
		return "restore";
	case PM_EVENT_RECOVER:
		return "recover";
	default:
		return "(unknown PM event)";
	}
}

84
/**
85
 * device_pm_sleep_init - Initialize system suspend-related device fields.
86 87
 * @dev: Device object being initialized.
 */
88
void device_pm_sleep_init(struct device *dev)
89
{
90
	dev->power.is_prepared = false;
91
	dev->power.is_suspended = false;
92
	init_completion(&dev->power.completion);
93
	complete_all(&dev->power.completion);
94
	dev->power.wakeup = NULL;
95
	INIT_LIST_HEAD(&dev->power.entry);
96 97
}

98
/**
99
 * device_pm_lock - Lock the list of active devices used by the PM core.
100 101 102 103 104 105 106
 */
void device_pm_lock(void)
{
	mutex_lock(&dpm_list_mtx);
}

/**
107
 * device_pm_unlock - Unlock the list of active devices used by the PM core.
108 109 110 111 112
 */
void device_pm_unlock(void)
{
	mutex_unlock(&dpm_list_mtx);
}
113

114
/**
115 116
 * device_pm_add - Add a device to the PM core's list of active devices.
 * @dev: Device to add to the list.
117
 */
118
void device_pm_add(struct device *dev)
L
Linus Torvalds 已提交
119 120
{
	pr_debug("PM: Adding info for %s:%s\n",
121
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
122
	mutex_lock(&dpm_list_mtx);
123
	if (dev->parent && dev->parent->power.is_prepared)
124 125
		dev_warn(dev, "parent %s should not be sleeping\n",
			dev_name(dev->parent));
126
	list_add_tail(&dev->power.entry, &dpm_list);
127
	mutex_unlock(&dpm_list_mtx);
L
Linus Torvalds 已提交
128 129
}

130
/**
131 132
 * device_pm_remove - Remove a device from the PM core's list of active devices.
 * @dev: Device to be removed from the list.
133
 */
134
void device_pm_remove(struct device *dev)
L
Linus Torvalds 已提交
135 136
{
	pr_debug("PM: Removing info for %s:%s\n",
137
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
138
	complete_all(&dev->power.completion);
139
	mutex_lock(&dpm_list_mtx);
L
Linus Torvalds 已提交
140
	list_del_init(&dev->power.entry);
141
	mutex_unlock(&dpm_list_mtx);
142
	device_wakeup_disable(dev);
143
	pm_runtime_remove(dev);
144 145
}

146
/**
147 148 149
 * device_pm_move_before - Move device in the PM core's list of active devices.
 * @deva: Device to move in dpm_list.
 * @devb: Device @deva should come before.
150 151 152 153
 */
void device_pm_move_before(struct device *deva, struct device *devb)
{
	pr_debug("PM: Moving %s:%s before %s:%s\n",
154 155
		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
156 157 158 159 160
	/* Delete deva from dpm_list and reinsert before devb. */
	list_move_tail(&deva->power.entry, &devb->power.entry);
}

/**
161 162 163
 * device_pm_move_after - Move device in the PM core's list of active devices.
 * @deva: Device to move in dpm_list.
 * @devb: Device @deva should come after.
164 165 166 167
 */
void device_pm_move_after(struct device *deva, struct device *devb)
{
	pr_debug("PM: Moving %s:%s after %s:%s\n",
168 169
		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
170 171 172 173 174
	/* Delete deva from dpm_list and reinsert after devb. */
	list_move(&deva->power.entry, &devb->power.entry);
}

/**
175 176
 * device_pm_move_last - Move device to end of the PM core's list of devices.
 * @dev: Device to move in dpm_list.
177 178 179 180
 */
void device_pm_move_last(struct device *dev)
{
	pr_debug("PM: Moving %s:%s to end of list\n",
181
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
182 183 184
	list_move_tail(&dev->power.entry, &dpm_list);
}

185 186 187 188
static ktime_t initcall_debug_start(struct device *dev)
{
	ktime_t calltime = ktime_set(0, 0);

189
	if (pm_print_times_enabled) {
190 191 192
		pr_info("calling  %s+ @ %i, parent: %s\n",
			dev_name(dev), task_pid_nr(current),
			dev->parent ? dev_name(dev->parent) : "none");
193 194 195 196 197 198 199
		calltime = ktime_get();
	}

	return calltime;
}

static void initcall_debug_report(struct device *dev, ktime_t calltime,
200
				  int error, pm_message_t state, char *info)
201
{
202 203 204 205 206
	ktime_t rettime;
	s64 nsecs;

	rettime = ktime_get();
	nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
207

208
	if (pm_print_times_enabled) {
209
		pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
210
			error, (unsigned long long)nsecs >> 10);
211
	}
212 213 214

	trace_device_pm_report_time(dev, info, nsecs, pm_verb(state.event),
				    error);
215 216
}

217 218 219 220 221 222 223 224 225 226
/**
 * dpm_wait - Wait for a PM operation to complete.
 * @dev: Device to wait for.
 * @async: If unset, wait only if the device's power.async_suspend flag is set.
 */
static void dpm_wait(struct device *dev, bool async)
{
	if (!dev)
		return;

227
	if (async || (pm_async_enabled && dev->power.async_suspend))
228 229 230 231 232 233 234 235 236 237 238 239 240 241
		wait_for_completion(&dev->power.completion);
}

static int dpm_wait_fn(struct device *dev, void *async_ptr)
{
	dpm_wait(dev, *((bool *)async_ptr));
	return 0;
}

static void dpm_wait_for_children(struct device *dev, bool async)
{
       device_for_each_child(dev, &async, dpm_wait_fn);
}

242
/**
243
 * pm_op - Return the PM operation appropriate for given PM event.
244 245
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
246
 */
247
static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
248 249 250 251
{
	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
252
		return ops->suspend;
253
	case PM_EVENT_RESUME:
254
		return ops->resume;
255
#endif /* CONFIG_SUSPEND */
256
#ifdef CONFIG_HIBERNATE_CALLBACKS
257 258
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
259
		return ops->freeze;
260
	case PM_EVENT_HIBERNATE:
261
		return ops->poweroff;
262 263
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
264
		return ops->thaw;
265 266
		break;
	case PM_EVENT_RESTORE:
267
		return ops->restore;
268
#endif /* CONFIG_HIBERNATE_CALLBACKS */
269
	}
270

271
	return NULL;
272 273
}

274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307
/**
 * pm_late_early_op - Return the PM operation appropriate for given PM event.
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
 *
 * Runtime PM is disabled for @dev while this function is being executed.
 */
static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
				      pm_message_t state)
{
	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
		return ops->suspend_late;
	case PM_EVENT_RESUME:
		return ops->resume_early;
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATE_CALLBACKS
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		return ops->freeze_late;
	case PM_EVENT_HIBERNATE:
		return ops->poweroff_late;
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
		return ops->thaw_early;
	case PM_EVENT_RESTORE:
		return ops->restore_early;
#endif /* CONFIG_HIBERNATE_CALLBACKS */
	}

	return NULL;
}

308
/**
309
 * pm_noirq_op - Return the PM operation appropriate for given PM event.
310 311
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
312
 *
313 314
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
315
 */
316
static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
317 318 319 320
{
	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
321
		return ops->suspend_noirq;
322
	case PM_EVENT_RESUME:
323
		return ops->resume_noirq;
324
#endif /* CONFIG_SUSPEND */
325
#ifdef CONFIG_HIBERNATE_CALLBACKS
326 327
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
328
		return ops->freeze_noirq;
329
	case PM_EVENT_HIBERNATE:
330
		return ops->poweroff_noirq;
331 332
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
333
		return ops->thaw_noirq;
334
	case PM_EVENT_RESTORE:
335
		return ops->restore_noirq;
336
#endif /* CONFIG_HIBERNATE_CALLBACKS */
337
	}
338

339
	return NULL;
340 341 342 343 344 345 346 347 348 349 350 351 352
}

static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
{
	dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
		", may wakeup" : "");
}

static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
			int error)
{
	printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
353
		dev_name(dev), pm_verb(state.event), info, error);
354 355
}

356 357 358
static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
{
	ktime_t calltime;
359
	u64 usecs64;
360 361 362 363 364 365 366 367 368 369 370 371 372
	int usecs;

	calltime = ktime_get();
	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
	do_div(usecs64, NSEC_PER_USEC);
	usecs = usecs64;
	if (usecs == 0)
		usecs = 1;
	pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
		info ?: "", info ? " " : "", pm_verb(state.event),
		usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
}

373 374 375 376 377 378 379 380 381 382 383 384 385 386 387
static int dpm_run_callback(pm_callback_t cb, struct device *dev,
			    pm_message_t state, char *info)
{
	ktime_t calltime;
	int error;

	if (!cb)
		return 0;

	calltime = initcall_debug_start(dev);

	pm_dev_dbg(dev, state, info);
	error = cb(dev);
	suspend_report_result(cb, error);

388
	initcall_debug_report(dev, calltime, error, state, info);
389 390 391 392

	return error;
}

393 394 395
/*------------------------- Resume routines -------------------------*/

/**
396 397 398
 * device_resume_noirq - Execute an "early resume" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
399
 *
400 401
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
402
 */
403
static int device_resume_noirq(struct device *dev, pm_message_t state)
404
{
405 406
	pm_callback_t callback = NULL;
	char *info = NULL;
407 408 409 410 411
	int error = 0;

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);

412 413 414
	if (dev->power.syscore)
		goto Out;

415
	if (dev->pm_domain) {
416
		info = "noirq power domain ";
417
		callback = pm_noirq_op(&dev->pm_domain->ops, state);
418
	} else if (dev->type && dev->type->pm) {
419
		info = "noirq type ";
420
		callback = pm_noirq_op(dev->type->pm, state);
421
	} else if (dev->class && dev->class->pm) {
422
		info = "noirq class ";
423
		callback = pm_noirq_op(dev->class->pm, state);
424
	} else if (dev->bus && dev->bus->pm) {
425
		info = "noirq bus ";
426
		callback = pm_noirq_op(dev->bus->pm, state);
427 428
	}

429
	if (!callback && dev->driver && dev->driver->pm) {
430
		info = "noirq driver ";
431 432 433
		callback = pm_noirq_op(dev->driver->pm, state);
	}

434 435
	error = dpm_run_callback(callback, dev, state, info);

436
 Out:
437 438 439 440 441
	TRACE_RESUME(error);
	return error;
}

/**
442
 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
443
 * @state: PM transition of the system being carried out.
444
 *
445
 * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
446
 * enable device drivers to receive interrupts.
447
 */
448
static void dpm_resume_noirq(pm_message_t state)
449
{
450
	ktime_t starttime = ktime_get();
451

452
	mutex_lock(&dpm_list_mtx);
453 454
	while (!list_empty(&dpm_noirq_list)) {
		struct device *dev = to_device(dpm_noirq_list.next);
455
		int error;
456 457

		get_device(dev);
458
		list_move_tail(&dev->power.entry, &dpm_late_early_list);
459
		mutex_unlock(&dpm_list_mtx);
460

461
		error = device_resume_noirq(dev, state);
462 463 464 465
		if (error) {
			suspend_stats.failed_resume_noirq++;
			dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
			dpm_save_failed_dev(dev_name(dev));
466 467 468 469 470 471 472 473 474
			pm_dev_err(dev, state, " noirq", error);
		}

		mutex_lock(&dpm_list_mtx);
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
	dpm_show_time(starttime, state, "noirq");
	resume_device_irqs();
475
	cpuidle_resume();
476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493
}

/**
 * device_resume_early - Execute an "early resume" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
 *
 * Runtime PM is disabled for @dev while this function is being executed.
 */
static int device_resume_early(struct device *dev, pm_message_t state)
{
	pm_callback_t callback = NULL;
	char *info = NULL;
	int error = 0;

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);

494 495 496
	if (dev->power.syscore)
		goto Out;

497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517
	if (dev->pm_domain) {
		info = "early power domain ";
		callback = pm_late_early_op(&dev->pm_domain->ops, state);
	} else if (dev->type && dev->type->pm) {
		info = "early type ";
		callback = pm_late_early_op(dev->type->pm, state);
	} else if (dev->class && dev->class->pm) {
		info = "early class ";
		callback = pm_late_early_op(dev->class->pm, state);
	} else if (dev->bus && dev->bus->pm) {
		info = "early bus ";
		callback = pm_late_early_op(dev->bus->pm, state);
	}

	if (!callback && dev->driver && dev->driver->pm) {
		info = "early driver ";
		callback = pm_late_early_op(dev->driver->pm, state);
	}

	error = dpm_run_callback(callback, dev, state, info);

518
 Out:
519
	TRACE_RESUME(error);
520 521

	pm_runtime_enable(dev);
522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546
	return error;
}

/**
 * dpm_resume_early - Execute "early resume" callbacks for all devices.
 * @state: PM transition of the system being carried out.
 */
static void dpm_resume_early(pm_message_t state)
{
	ktime_t starttime = ktime_get();

	mutex_lock(&dpm_list_mtx);
	while (!list_empty(&dpm_late_early_list)) {
		struct device *dev = to_device(dpm_late_early_list.next);
		int error;

		get_device(dev);
		list_move_tail(&dev->power.entry, &dpm_suspended_list);
		mutex_unlock(&dpm_list_mtx);

		error = device_resume_early(dev, state);
		if (error) {
			suspend_stats.failed_resume_early++;
			dpm_save_failed_step(SUSPEND_RESUME_EARLY);
			dpm_save_failed_dev(dev_name(dev));
547
			pm_dev_err(dev, state, " early", error);
548
		}
549

550
		mutex_lock(&dpm_list_mtx);
551 552
		put_device(dev);
	}
553
	mutex_unlock(&dpm_list_mtx);
554
	dpm_show_time(starttime, state, "early");
555
}
556 557 558 559 560 561 562 563 564 565 566

/**
 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
 * @state: PM transition of the system being carried out.
 */
void dpm_resume_start(pm_message_t state)
{
	dpm_resume_noirq(state);
	dpm_resume_early(state);
}
EXPORT_SYMBOL_GPL(dpm_resume_start);
567 568

/**
569
 * device_resume - Execute "resume" callbacks for given device.
570 571
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
572
 * @async: If true, the device is being resumed asynchronously.
573
 */
574
static int device_resume(struct device *dev, pm_message_t state, bool async)
575
{
576 577
	pm_callback_t callback = NULL;
	char *info = NULL;
578 579 580 581
	int error = 0;

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);
582

583 584 585
	if (dev->power.syscore)
		goto Complete;

586
	dpm_wait(dev->parent, async);
587
	device_lock(dev);
588

589 590 591 592 593
	/*
	 * This is a fib.  But we'll allow new children to be added below
	 * a resumed device, even if the device hasn't been completed yet.
	 */
	dev->power.is_prepared = false;
594

595 596 597
	if (!dev->power.is_suspended)
		goto Unlock;

598
	if (dev->pm_domain) {
599 600
		info = "power domain ";
		callback = pm_op(&dev->pm_domain->ops, state);
601
		goto Driver;
602 603
	}

604
	if (dev->type && dev->type->pm) {
605 606
		info = "type ";
		callback = pm_op(dev->type->pm, state);
607
		goto Driver;
608 609
	}

610 611
	if (dev->class) {
		if (dev->class->pm) {
612 613
			info = "class ";
			callback = pm_op(dev->class->pm, state);
614
			goto Driver;
615
		} else if (dev->class->resume) {
616 617
			info = "legacy class ";
			callback = dev->class->resume;
618
			goto End;
619
		}
620
	}
621 622 623

	if (dev->bus) {
		if (dev->bus->pm) {
624
			info = "bus ";
625
			callback = pm_op(dev->bus->pm, state);
626
		} else if (dev->bus->resume) {
627
			info = "legacy bus ";
628
			callback = dev->bus->resume;
629
			goto End;
630 631 632
		}
	}

633 634 635 636 637 638
 Driver:
	if (!callback && dev->driver && dev->driver->pm) {
		info = "driver ";
		callback = pm_op(dev->driver->pm, state);
	}

639
 End:
640
	error = dpm_run_callback(callback, dev, state, info);
641 642 643
	dev->power.is_suspended = false;

 Unlock:
644
	device_unlock(dev);
645 646

 Complete:
647
	complete_all(&dev->power.completion);
648

649
	TRACE_RESUME(error);
650

651 652 653
	return error;
}

654 655 656 657 658
static void async_resume(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

659
	error = device_resume(dev, pm_transition, true);
660 661 662 663 664
	if (error)
		pm_dev_err(dev, pm_transition, " async", error);
	put_device(dev);
}

665
static bool is_async(struct device *dev)
666
{
667 668
	return dev->power.async_suspend && pm_async_enabled
		&& !pm_trace_is_enabled();
669 670
}

671
/**
672 673
 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
 * @state: PM transition of the system being carried out.
674
 *
675 676
 * Execute the appropriate "resume" callback for all devices whose status
 * indicates that they are suspended.
677
 */
678
void dpm_resume(pm_message_t state)
679
{
680
	struct device *dev;
681
	ktime_t starttime = ktime_get();
682

683 684
	might_sleep();

685
	mutex_lock(&dpm_list_mtx);
686
	pm_transition = state;
687
	async_error = 0;
688

689
	list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
690 691 692 693 694 695 696
		INIT_COMPLETION(dev->power.completion);
		if (is_async(dev)) {
			get_device(dev);
			async_schedule(async_resume, dev);
		}
	}

697 698
	while (!list_empty(&dpm_suspended_list)) {
		dev = to_device(dpm_suspended_list.next);
699
		get_device(dev);
700
		if (!is_async(dev)) {
701 702 703 704
			int error;

			mutex_unlock(&dpm_list_mtx);

705
			error = device_resume(dev, state, false);
706 707 708 709
			if (error) {
				suspend_stats.failed_resume++;
				dpm_save_failed_step(SUSPEND_RESUME);
				dpm_save_failed_dev(dev_name(dev));
710
				pm_dev_err(dev, state, "", error);
711
			}
712 713

			mutex_lock(&dpm_list_mtx);
714 715
		}
		if (!list_empty(&dev->power.entry))
716
			list_move_tail(&dev->power.entry, &dpm_prepared_list);
717 718 719
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
720
	async_synchronize_full();
721
	dpm_show_time(starttime, state, NULL);
722 723 724
}

/**
725 726 727
 * device_complete - Complete a PM transition for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
728
 */
729
static void device_complete(struct device *dev, pm_message_t state)
730
{
731 732 733
	void (*callback)(struct device *) = NULL;
	char *info = NULL;

734 735 736
	if (dev->power.syscore)
		return;

737
	device_lock(dev);
738

739
	if (dev->pm_domain) {
740 741
		info = "completing power domain ";
		callback = dev->pm_domain->ops.complete;
742
	} else if (dev->type && dev->type->pm) {
743 744
		info = "completing type ";
		callback = dev->type->pm->complete;
745
	} else if (dev->class && dev->class->pm) {
746 747
		info = "completing class ";
		callback = dev->class->pm->complete;
748
	} else if (dev->bus && dev->bus->pm) {
749 750 751 752 753 754 755 756 757 758 759 760
		info = "completing bus ";
		callback = dev->bus->pm->complete;
	}

	if (!callback && dev->driver && dev->driver->pm) {
		info = "completing driver ";
		callback = dev->driver->pm->complete;
	}

	if (callback) {
		pm_dev_dbg(dev, state, info);
		callback(dev);
761 762
	}

763
	device_unlock(dev);
764

765
	pm_runtime_put(dev);
766 767 768
}

/**
769 770
 * dpm_complete - Complete a PM transition for all non-sysdev devices.
 * @state: PM transition of the system being carried out.
771
 *
772 773
 * Execute the ->complete() callbacks for all devices whose PM status is not
 * DPM_ON (this allows new devices to be registered).
774
 */
775
void dpm_complete(pm_message_t state)
776
{
777 778
	struct list_head list;

779 780
	might_sleep();

781
	INIT_LIST_HEAD(&list);
782
	mutex_lock(&dpm_list_mtx);
783 784
	while (!list_empty(&dpm_prepared_list)) {
		struct device *dev = to_device(dpm_prepared_list.prev);
785

786
		get_device(dev);
787
		dev->power.is_prepared = false;
788 789
		list_move(&dev->power.entry, &list);
		mutex_unlock(&dpm_list_mtx);
790

791
		device_complete(dev, state);
792

793
		mutex_lock(&dpm_list_mtx);
794
		put_device(dev);
795
	}
796
	list_splice(&list, &dpm_list);
797 798 799 800
	mutex_unlock(&dpm_list_mtx);
}

/**
801 802
 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
 * @state: PM transition of the system being carried out.
803
 *
804 805
 * Execute "resume" callbacks for all devices and complete the PM transition of
 * the system.
806
 */
807
void dpm_resume_end(pm_message_t state)
808
{
809 810
	dpm_resume(state);
	dpm_complete(state);
811
}
812
EXPORT_SYMBOL_GPL(dpm_resume_end);
813 814 815 816


/*------------------------- Suspend routines -------------------------*/

817
/**
818 819 820 821 822
 * resume_event - Return a "resume" message for given "suspend" sleep state.
 * @sleep_state: PM message representing a sleep state.
 *
 * Return a PM message representing the resume event corresponding to given
 * sleep state.
823 824
 */
static pm_message_t resume_event(pm_message_t sleep_state)
825
{
826 827 828 829 830 831 832 833
	switch (sleep_state.event) {
	case PM_EVENT_SUSPEND:
		return PMSG_RESUME;
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		return PMSG_RECOVER;
	case PM_EVENT_HIBERNATE:
		return PMSG_RESTORE;
834
	}
835
	return PMSG_ON;
836 837 838
}

/**
839 840 841
 * device_suspend_noirq - Execute a "late suspend" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
842
 *
843 844
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
845
 */
846
static int device_suspend_noirq(struct device *dev, pm_message_t state)
847
{
848 849
	pm_callback_t callback = NULL;
	char *info = NULL;
850

851 852 853
	if (dev->power.syscore)
		return 0;

854
	if (dev->pm_domain) {
855
		info = "noirq power domain ";
856
		callback = pm_noirq_op(&dev->pm_domain->ops, state);
857
	} else if (dev->type && dev->type->pm) {
858
		info = "noirq type ";
859
		callback = pm_noirq_op(dev->type->pm, state);
860
	} else if (dev->class && dev->class->pm) {
861
		info = "noirq class ";
862
		callback = pm_noirq_op(dev->class->pm, state);
863
	} else if (dev->bus && dev->bus->pm) {
864
		info = "noirq bus ";
865
		callback = pm_noirq_op(dev->bus->pm, state);
866 867
	}

868
	if (!callback && dev->driver && dev->driver->pm) {
869
		info = "noirq driver ";
870 871 872
		callback = pm_noirq_op(dev->driver->pm, state);
	}

873
	return dpm_run_callback(callback, dev, state, info);
874 875 876
}

/**
877
 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
878
 * @state: PM transition of the system being carried out.
879
 *
880 881
 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
 * handlers for all non-sysdev devices.
882
 */
883
static int dpm_suspend_noirq(pm_message_t state)
884
{
885
	ktime_t starttime = ktime_get();
886 887
	int error = 0;

888
	cpuidle_pause();
889
	suspend_device_irqs();
890
	mutex_lock(&dpm_list_mtx);
891 892
	while (!list_empty(&dpm_late_early_list)) {
		struct device *dev = to_device(dpm_late_early_list.prev);
893 894 895 896

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

897
		error = device_suspend_noirq(dev, state);
898 899

		mutex_lock(&dpm_list_mtx);
900
		if (error) {
901
			pm_dev_err(dev, state, " noirq", error);
902 903 904
			suspend_stats.failed_suspend_noirq++;
			dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
			dpm_save_failed_dev(dev_name(dev));
905
			put_device(dev);
906 907
			break;
		}
908
		if (!list_empty(&dev->power.entry))
909
			list_move(&dev->power.entry, &dpm_noirq_list);
910
		put_device(dev);
911 912 913 914 915

		if (pm_wakeup_pending()) {
			error = -EBUSY;
			break;
		}
916
	}
917
	mutex_unlock(&dpm_list_mtx);
918
	if (error)
919
		dpm_resume_noirq(resume_event(state));
920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936
	else
		dpm_show_time(starttime, state, "noirq");
	return error;
}

/**
 * device_suspend_late - Execute a "late suspend" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
 *
 * Runtime PM is disabled for @dev while this function is being executed.
 */
static int device_suspend_late(struct device *dev, pm_message_t state)
{
	pm_callback_t callback = NULL;
	char *info = NULL;

937 938
	__pm_runtime_disable(dev, false);

939 940 941
	if (dev->power.syscore)
		return 0;

942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993
	if (dev->pm_domain) {
		info = "late power domain ";
		callback = pm_late_early_op(&dev->pm_domain->ops, state);
	} else if (dev->type && dev->type->pm) {
		info = "late type ";
		callback = pm_late_early_op(dev->type->pm, state);
	} else if (dev->class && dev->class->pm) {
		info = "late class ";
		callback = pm_late_early_op(dev->class->pm, state);
	} else if (dev->bus && dev->bus->pm) {
		info = "late bus ";
		callback = pm_late_early_op(dev->bus->pm, state);
	}

	if (!callback && dev->driver && dev->driver->pm) {
		info = "late driver ";
		callback = pm_late_early_op(dev->driver->pm, state);
	}

	return dpm_run_callback(callback, dev, state, info);
}

/**
 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
 * @state: PM transition of the system being carried out.
 */
static int dpm_suspend_late(pm_message_t state)
{
	ktime_t starttime = ktime_get();
	int error = 0;

	mutex_lock(&dpm_list_mtx);
	while (!list_empty(&dpm_suspended_list)) {
		struct device *dev = to_device(dpm_suspended_list.prev);

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

		error = device_suspend_late(dev, state);

		mutex_lock(&dpm_list_mtx);
		if (error) {
			pm_dev_err(dev, state, " late", error);
			suspend_stats.failed_suspend_late++;
			dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
			dpm_save_failed_dev(dev_name(dev));
			put_device(dev);
			break;
		}
		if (!list_empty(&dev->power.entry))
			list_move(&dev->power.entry, &dpm_late_early_list);
		put_device(dev);
994 995 996 997 998

		if (pm_wakeup_pending()) {
			error = -EBUSY;
			break;
		}
999 1000 1001 1002
	}
	mutex_unlock(&dpm_list_mtx);
	if (error)
		dpm_resume_early(resume_event(state));
1003 1004
	else
		dpm_show_time(starttime, state, "late");
1005

1006 1007
	return error;
}
1008 1009 1010 1011 1012 1013 1014 1015

/**
 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
 * @state: PM transition of the system being carried out.
 */
int dpm_suspend_end(pm_message_t state)
{
	int error = dpm_suspend_late(state);
1016 1017 1018 1019 1020
	if (error)
		return error;

	error = dpm_suspend_noirq(state);
	if (error) {
1021
		dpm_resume_early(resume_event(state));
1022 1023
		return error;
	}
1024

1025
	return 0;
1026 1027
}
EXPORT_SYMBOL_GPL(dpm_suspend_end);
1028

1029 1030
/**
 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
R
Randy Dunlap 已提交
1031 1032 1033
 * @dev: Device to suspend.
 * @state: PM transition of the system being carried out.
 * @cb: Suspend callback to execute.
1034 1035
 */
static int legacy_suspend(struct device *dev, pm_message_t state,
1036 1037
			  int (*cb)(struct device *dev, pm_message_t state),
			  char *info)
1038 1039 1040 1041 1042 1043 1044 1045 1046
{
	int error;
	ktime_t calltime;

	calltime = initcall_debug_start(dev);

	error = cb(dev, state);
	suspend_report_result(cb, error);

1047
	initcall_debug_report(dev, calltime, error, state, info);
1048 1049 1050 1051

	return error;
}

1052
/**
1053 1054 1055
 * device_suspend - Execute "suspend" callbacks for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
1056
 * @async: If true, the device is being suspended asynchronously.
1057
 */
1058
static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1059
{
1060 1061
	pm_callback_t callback = NULL;
	char *info = NULL;
1062 1063
	int error = 0;

1064
	dpm_wait_for_children(dev, async);
1065

1066
	if (async_error)
1067
		goto Complete;
1068

1069 1070 1071 1072 1073 1074
	/*
	 * If a device configured to wake up the system from sleep states
	 * has been suspended at run time and there's a resume request pending
	 * for it, this is equivalent to the device signaling wakeup, so the
	 * system suspend operation should be aborted.
	 */
1075 1076
	if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
		pm_wakeup_event(dev, 0);
1077

1078 1079
	if (pm_wakeup_pending()) {
		async_error = -EBUSY;
1080
		goto Complete;
1081 1082
	}

1083 1084 1085
	if (dev->power.syscore)
		goto Complete;

1086 1087
	device_lock(dev);

1088
	if (dev->pm_domain) {
1089 1090 1091
		info = "power domain ";
		callback = pm_op(&dev->pm_domain->ops, state);
		goto Run;
1092 1093
	}

1094
	if (dev->type && dev->type->pm) {
1095 1096 1097
		info = "type ";
		callback = pm_op(dev->type->pm, state);
		goto Run;
1098 1099
	}

1100 1101
	if (dev->class) {
		if (dev->class->pm) {
1102 1103 1104
			info = "class ";
			callback = pm_op(dev->class->pm, state);
			goto Run;
1105 1106
		} else if (dev->class->suspend) {
			pm_dev_dbg(dev, state, "legacy class ");
1107 1108
			error = legacy_suspend(dev, state, dev->class->suspend,
						"legacy class ");
1109
			goto End;
1110
		}
1111 1112
	}

1113 1114
	if (dev->bus) {
		if (dev->bus->pm) {
1115
			info = "bus ";
1116
			callback = pm_op(dev->bus->pm, state);
1117
		} else if (dev->bus->suspend) {
1118
			pm_dev_dbg(dev, state, "legacy bus ");
1119 1120
			error = legacy_suspend(dev, state, dev->bus->suspend,
						"legacy bus ");
1121
			goto End;
1122
		}
1123 1124
	}

1125
 Run:
1126 1127 1128 1129 1130
	if (!callback && dev->driver && dev->driver->pm) {
		info = "driver ";
		callback = pm_op(dev->driver->pm, state);
	}

1131 1132
	error = dpm_run_callback(callback, dev, state, info);

1133
 End:
1134 1135
	if (!error) {
		dev->power.is_suspended = true;
1136 1137
		if (dev->power.wakeup_path
		    && dev->parent && !dev->parent->power.ignore_children)
1138 1139
			dev->parent->power.wakeup_path = true;
	}
1140

1141
	device_unlock(dev);
1142 1143

 Complete:
1144
	complete_all(&dev->power.completion);
1145
	if (error)
1146 1147
		async_error = error;

1148 1149 1150
	return error;
}

1151 1152 1153 1154 1155 1156
static void async_suspend(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = __device_suspend(dev, pm_transition, true);
1157 1158
	if (error) {
		dpm_save_failed_dev(dev_name(dev));
1159
		pm_dev_err(dev, pm_transition, " async", error);
1160
	}
1161 1162 1163 1164 1165 1166 1167 1168

	put_device(dev);
}

static int device_suspend(struct device *dev)
{
	INIT_COMPLETION(dev->power.completion);

1169
	if (pm_async_enabled && dev->power.async_suspend) {
1170 1171 1172 1173 1174 1175 1176 1177
		get_device(dev);
		async_schedule(async_suspend, dev);
		return 0;
	}

	return __device_suspend(dev, pm_transition, false);
}

1178
/**
1179 1180
 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
 * @state: PM transition of the system being carried out.
1181
 */
1182
int dpm_suspend(pm_message_t state)
1183
{
1184
	ktime_t starttime = ktime_get();
1185 1186
	int error = 0;

1187 1188
	might_sleep();

1189
	mutex_lock(&dpm_list_mtx);
1190 1191
	pm_transition = state;
	async_error = 0;
1192 1193
	while (!list_empty(&dpm_prepared_list)) {
		struct device *dev = to_device(dpm_prepared_list.prev);
1194

1195
		get_device(dev);
1196
		mutex_unlock(&dpm_list_mtx);
1197

1198
		error = device_suspend(dev);
1199

1200
		mutex_lock(&dpm_list_mtx);
1201
		if (error) {
1202
			pm_dev_err(dev, state, "", error);
1203
			dpm_save_failed_dev(dev_name(dev));
1204
			put_device(dev);
1205 1206
			break;
		}
1207
		if (!list_empty(&dev->power.entry))
1208
			list_move(&dev->power.entry, &dpm_suspended_list);
1209
		put_device(dev);
1210 1211
		if (async_error)
			break;
1212 1213
	}
	mutex_unlock(&dpm_list_mtx);
1214 1215 1216
	async_synchronize_full();
	if (!error)
		error = async_error;
1217 1218 1219 1220
	if (error) {
		suspend_stats.failed_suspend++;
		dpm_save_failed_step(SUSPEND_SUSPEND);
	} else
1221
		dpm_show_time(starttime, state, NULL);
1222 1223 1224 1225
	return error;
}

/**
1226 1227 1228 1229 1230 1231
 * device_prepare - Prepare a device for system power transition.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
 *
 * Execute the ->prepare() callback(s) for given device.  No new children of the
 * device may be registered after this function has returned.
1232
 */
1233
static int device_prepare(struct device *dev, pm_message_t state)
1234
{
1235 1236
	int (*callback)(struct device *) = NULL;
	char *info = NULL;
1237 1238
	int error = 0;

1239 1240 1241
	if (dev->power.syscore)
		return 0;

1242 1243 1244 1245 1246 1247 1248 1249
	/*
	 * If a device's parent goes into runtime suspend at the wrong time,
	 * it won't be possible to resume the device.  To prevent this we
	 * block runtime suspend here, during the prepare phase, and allow
	 * it again during the complete phase.
	 */
	pm_runtime_get_noresume(dev);

1250
	device_lock(dev);
1251

1252 1253
	dev->power.wakeup_path = device_may_wakeup(dev);

1254
	if (dev->pm_domain) {
1255 1256
		info = "preparing power domain ";
		callback = dev->pm_domain->ops.prepare;
1257
	} else if (dev->type && dev->type->pm) {
1258 1259
		info = "preparing type ";
		callback = dev->type->pm->prepare;
1260
	} else if (dev->class && dev->class->pm) {
1261 1262
		info = "preparing class ";
		callback = dev->class->pm->prepare;
1263
	} else if (dev->bus && dev->bus->pm) {
1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275
		info = "preparing bus ";
		callback = dev->bus->pm->prepare;
	}

	if (!callback && dev->driver && dev->driver->pm) {
		info = "preparing driver ";
		callback = dev->driver->pm->prepare;
	}

	if (callback) {
		error = callback(dev);
		suspend_report_result(callback, error);
1276
	}
1277

1278
	device_unlock(dev);
1279 1280 1281

	return error;
}
1282

1283
/**
1284 1285
 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
 * @state: PM transition of the system being carried out.
1286
 *
1287
 * Execute the ->prepare() callback(s) for all devices.
1288
 */
1289
int dpm_prepare(pm_message_t state)
1290 1291 1292
{
	int error = 0;

1293 1294
	might_sleep();

1295 1296 1297 1298 1299 1300 1301
	mutex_lock(&dpm_list_mtx);
	while (!list_empty(&dpm_list)) {
		struct device *dev = to_device(dpm_list.next);

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

1302
		error = device_prepare(dev, state);
1303 1304 1305 1306 1307

		mutex_lock(&dpm_list_mtx);
		if (error) {
			if (error == -EAGAIN) {
				put_device(dev);
S
Sebastian Ott 已提交
1308
				error = 0;
1309 1310
				continue;
			}
1311 1312
			printk(KERN_INFO "PM: Device %s not prepared "
				"for power transition: code %d\n",
1313
				dev_name(dev), error);
1314 1315 1316
			put_device(dev);
			break;
		}
1317
		dev->power.is_prepared = true;
1318
		if (!list_empty(&dev->power.entry))
1319
			list_move_tail(&dev->power.entry, &dpm_prepared_list);
1320 1321 1322
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
1323 1324 1325
	return error;
}

1326
/**
1327 1328
 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
 * @state: PM transition of the system being carried out.
1329
 *
1330 1331
 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
 * callbacks for them.
1332
 */
1333
int dpm_suspend_start(pm_message_t state)
1334 1335
{
	int error;
1336

1337
	error = dpm_prepare(state);
1338 1339 1340 1341
	if (error) {
		suspend_stats.failed_prepare++;
		dpm_save_failed_step(SUSPEND_PREPARE);
	} else
1342
		error = dpm_suspend(state);
1343 1344
	return error;
}
1345
EXPORT_SYMBOL_GPL(dpm_suspend_start);
1346 1347 1348

void __suspend_report_result(const char *function, void *fn, int ret)
{
1349 1350
	if (ret)
		printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1351 1352
}
EXPORT_SYMBOL_GPL(__suspend_report_result);
1353 1354 1355 1356 1357 1358

/**
 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
 * @dev: Device to wait for.
 * @subordinate: Device that needs to wait for @dev.
 */
1359
int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1360 1361
{
	dpm_wait(dev, subordinate->power.async_suspend);
1362
	return async_error;
1363 1364
}
EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386

/**
 * dpm_for_each_dev - device iterator.
 * @data: data for the callback.
 * @fn: function to be called for each device.
 *
 * Iterate over devices in dpm_list, and call @fn for each device,
 * passing it @data.
 */
void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
{
	struct device *dev;

	if (!fn)
		return;

	device_pm_lock();
	list_for_each_entry(dev, &dpm_list, power.entry)
		fn(dev, data);
	device_pm_unlock();
}
EXPORT_SYMBOL_GPL(dpm_for_each_dev);