main.c 26.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/*
 * drivers/base/power/main.c - Where the driver meets power management.
 *
 * Copyright (c) 2003 Patrick Mochel
 * Copyright (c) 2003 Open Source Development Lab
 *
 * This file is released under the GPLv2
 *
 *
 * The driver model core calls device_pm_add() when a device is registered.
11
 * This will initialize the embedded device_pm_info object in the device
L
Linus Torvalds 已提交
12 13 14
 * and add it to the list of power-controlled devices. sysfs entries for
 * controlling device power management will also be added.
 *
15 16 17
 * A separate list is used for keeping track of power info, because the power
 * domain dependencies may differ from the ancestral dependencies that the
 * subsystem list maintains.
L
Linus Torvalds 已提交
18 19 20
 */

#include <linux/device.h>
21
#include <linux/kallsyms.h>
22
#include <linux/export.h>
23
#include <linux/mutex.h>
24
#include <linux/pm.h>
25
#include <linux/pm_runtime.h>
26
#include <linux/resume-trace.h>
27
#include <linux/interrupt.h>
28
#include <linux/sched.h>
29
#include <linux/async.h>
30
#include <linux/suspend.h>
31

32
#include "../base.h"
L
Linus Torvalds 已提交
33 34
#include "power.h"

35 36
typedef int (*pm_callback_t)(struct device *);

37
/*
38
 * The entries in the dpm_list list are in a depth first order, simply
39 40 41
 * because children are guaranteed to be discovered after parents, and
 * are inserted at the back of the list on discovery.
 *
42 43
 * Since device_pm_add() may be called with a device lock held,
 * we must never try to acquire a device lock while holding
44 45 46
 * dpm_list_mutex.
 */

47
LIST_HEAD(dpm_list);
48 49 50
LIST_HEAD(dpm_prepared_list);
LIST_HEAD(dpm_suspended_list);
LIST_HEAD(dpm_noirq_list);
L
Linus Torvalds 已提交
51

52
struct suspend_stats suspend_stats;
53
static DEFINE_MUTEX(dpm_list_mtx);
54
static pm_message_t pm_transition;
L
Linus Torvalds 已提交
55

56 57
static int async_error;

58
/**
59
 * device_pm_init - Initialize the PM-related part of a device object.
60 61 62 63
 * @dev: Device object being initialized.
 */
void device_pm_init(struct device *dev)
{
64
	dev->power.is_prepared = false;
65
	dev->power.is_suspended = false;
66
	init_completion(&dev->power.completion);
67
	complete_all(&dev->power.completion);
68 69
	dev->power.wakeup = NULL;
	spin_lock_init(&dev->power.lock);
70
	pm_runtime_init(dev);
71
	INIT_LIST_HEAD(&dev->power.entry);
72
	dev->power.power_state = PMSG_INVALID;
73 74
}

75
/**
76
 * device_pm_lock - Lock the list of active devices used by the PM core.
77 78 79 80 81 82 83
 */
void device_pm_lock(void)
{
	mutex_lock(&dpm_list_mtx);
}

/**
84
 * device_pm_unlock - Unlock the list of active devices used by the PM core.
85 86 87 88 89
 */
void device_pm_unlock(void)
{
	mutex_unlock(&dpm_list_mtx);
}
90

91
/**
92 93
 * device_pm_add - Add a device to the PM core's list of active devices.
 * @dev: Device to add to the list.
94
 */
95
void device_pm_add(struct device *dev)
L
Linus Torvalds 已提交
96 97
{
	pr_debug("PM: Adding info for %s:%s\n",
98
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
99
	mutex_lock(&dpm_list_mtx);
100
	if (dev->parent && dev->parent->power.is_prepared)
101 102
		dev_warn(dev, "parent %s should not be sleeping\n",
			dev_name(dev->parent));
103
	list_add_tail(&dev->power.entry, &dpm_list);
104
	dev_pm_qos_constraints_init(dev);
105
	mutex_unlock(&dpm_list_mtx);
L
Linus Torvalds 已提交
106 107
}

108
/**
109 110
 * device_pm_remove - Remove a device from the PM core's list of active devices.
 * @dev: Device to be removed from the list.
111
 */
112
void device_pm_remove(struct device *dev)
L
Linus Torvalds 已提交
113 114
{
	pr_debug("PM: Removing info for %s:%s\n",
115
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
116
	complete_all(&dev->power.completion);
117
	mutex_lock(&dpm_list_mtx);
118
	dev_pm_qos_constraints_destroy(dev);
L
Linus Torvalds 已提交
119
	list_del_init(&dev->power.entry);
120
	mutex_unlock(&dpm_list_mtx);
121
	device_wakeup_disable(dev);
122
	pm_runtime_remove(dev);
123 124
}

125
/**
126 127 128
 * device_pm_move_before - Move device in the PM core's list of active devices.
 * @deva: Device to move in dpm_list.
 * @devb: Device @deva should come before.
129 130 131 132
 */
void device_pm_move_before(struct device *deva, struct device *devb)
{
	pr_debug("PM: Moving %s:%s before %s:%s\n",
133 134
		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
135 136 137 138 139
	/* Delete deva from dpm_list and reinsert before devb. */
	list_move_tail(&deva->power.entry, &devb->power.entry);
}

/**
140 141 142
 * device_pm_move_after - Move device in the PM core's list of active devices.
 * @deva: Device to move in dpm_list.
 * @devb: Device @deva should come after.
143 144 145 146
 */
void device_pm_move_after(struct device *deva, struct device *devb)
{
	pr_debug("PM: Moving %s:%s after %s:%s\n",
147 148
		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
149 150 151 152 153
	/* Delete deva from dpm_list and reinsert after devb. */
	list_move(&deva->power.entry, &devb->power.entry);
}

/**
154 155
 * device_pm_move_last - Move device to end of the PM core's list of devices.
 * @dev: Device to move in dpm_list.
156 157 158 159
 */
void device_pm_move_last(struct device *dev)
{
	pr_debug("PM: Moving %s:%s to end of list\n",
160
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
161 162 163
	list_move_tail(&dev->power.entry, &dpm_list);
}

164 165 166 167 168
static ktime_t initcall_debug_start(struct device *dev)
{
	ktime_t calltime = ktime_set(0, 0);

	if (initcall_debug) {
169 170 171
		pr_info("calling  %s+ @ %i, parent: %s\n",
			dev_name(dev), task_pid_nr(current),
			dev->parent ? dev_name(dev->parent) : "none");
172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
		calltime = ktime_get();
	}

	return calltime;
}

static void initcall_debug_report(struct device *dev, ktime_t calltime,
				  int error)
{
	ktime_t delta, rettime;

	if (initcall_debug) {
		rettime = ktime_get();
		delta = ktime_sub(rettime, calltime);
		pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
			error, (unsigned long long)ktime_to_ns(delta) >> 10);
	}
}

191 192 193 194 195 196 197 198 199 200
/**
 * dpm_wait - Wait for a PM operation to complete.
 * @dev: Device to wait for.
 * @async: If unset, wait only if the device's power.async_suspend flag is set.
 */
static void dpm_wait(struct device *dev, bool async)
{
	if (!dev)
		return;

201
	if (async || (pm_async_enabled && dev->power.async_suspend))
202 203 204 205 206 207 208 209 210 211 212 213 214 215
		wait_for_completion(&dev->power.completion);
}

static int dpm_wait_fn(struct device *dev, void *async_ptr)
{
	dpm_wait(dev, *((bool *)async_ptr));
	return 0;
}

static void dpm_wait_for_children(struct device *dev, bool async)
{
       device_for_each_child(dev, &async, dpm_wait_fn);
}

216
/**
217
 * pm_op - Return the PM operation appropriate for given PM event.
218 219
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
220
 */
221
static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
222 223 224 225
{
	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
226
		return ops->suspend;
227
	case PM_EVENT_RESUME:
228
		return ops->resume;
229
#endif /* CONFIG_SUSPEND */
230
#ifdef CONFIG_HIBERNATE_CALLBACKS
231 232
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
233
		return ops->freeze;
234
	case PM_EVENT_HIBERNATE:
235
		return ops->poweroff;
236 237
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
238
		return ops->thaw;
239 240
		break;
	case PM_EVENT_RESTORE:
241
		return ops->restore;
242
#endif /* CONFIG_HIBERNATE_CALLBACKS */
243
	}
244

245
	return NULL;
246 247 248
}

/**
249
 * pm_noirq_op - Return the PM operation appropriate for given PM event.
250 251
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
252
 *
253 254
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
255
 */
256
static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
257 258 259 260
{
	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
261
		return ops->suspend_noirq;
262
	case PM_EVENT_RESUME:
263
		return ops->resume_noirq;
264
#endif /* CONFIG_SUSPEND */
265
#ifdef CONFIG_HIBERNATE_CALLBACKS
266 267
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
268
		return ops->freeze_noirq;
269
	case PM_EVENT_HIBERNATE:
270
		return ops->poweroff_noirq;
271 272
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
273
		return ops->thaw_noirq;
274
	case PM_EVENT_RESTORE:
275
		return ops->restore_noirq;
276
#endif /* CONFIG_HIBERNATE_CALLBACKS */
277
	}
278

279
	return NULL;
280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316
}

static char *pm_verb(int event)
{
	switch (event) {
	case PM_EVENT_SUSPEND:
		return "suspend";
	case PM_EVENT_RESUME:
		return "resume";
	case PM_EVENT_FREEZE:
		return "freeze";
	case PM_EVENT_QUIESCE:
		return "quiesce";
	case PM_EVENT_HIBERNATE:
		return "hibernate";
	case PM_EVENT_THAW:
		return "thaw";
	case PM_EVENT_RESTORE:
		return "restore";
	case PM_EVENT_RECOVER:
		return "recover";
	default:
		return "(unknown PM event)";
	}
}

static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
{
	dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
		", may wakeup" : "");
}

static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
			int error)
{
	printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
317
		dev_name(dev), pm_verb(state.event), info, error);
318 319
}

320 321 322
static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
{
	ktime_t calltime;
323
	u64 usecs64;
324 325 326 327 328 329 330 331 332 333 334 335 336
	int usecs;

	calltime = ktime_get();
	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
	do_div(usecs64, NSEC_PER_USEC);
	usecs = usecs64;
	if (usecs == 0)
		usecs = 1;
	pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
		info ?: "", info ? " " : "", pm_verb(state.event),
		usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
}

337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356
static int dpm_run_callback(pm_callback_t cb, struct device *dev,
			    pm_message_t state, char *info)
{
	ktime_t calltime;
	int error;

	if (!cb)
		return 0;

	calltime = initcall_debug_start(dev);

	pm_dev_dbg(dev, state, info);
	error = cb(dev);
	suspend_report_result(cb, error);

	initcall_debug_report(dev, calltime, error);

	return error;
}

357 358 359
/*------------------------- Resume routines -------------------------*/

/**
360 361 362
 * device_resume_noirq - Execute an "early resume" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
363
 *
364 365
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
366
 */
367
static int device_resume_noirq(struct device *dev, pm_message_t state)
368
{
369 370
	pm_callback_t callback = NULL;
	char *info = NULL;
371 372 373 374 375
	int error = 0;

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);

376
	if (dev->pm_domain) {
377 378
		info = "EARLY power domain ";
		callback = pm_noirq_op(&dev->pm_domain->ops, state);
379
	} else if (dev->type && dev->type->pm) {
380 381
		info = "EARLY type ";
		callback = pm_noirq_op(dev->type->pm, state);
382
	} else if (dev->class && dev->class->pm) {
383 384
		info = "EARLY class ";
		callback = pm_noirq_op(dev->class->pm, state);
385
	} else if (dev->bus && dev->bus->pm) {
386
		info = "EARLY bus ";
387
		callback = pm_noirq_op(dev->bus->pm, state);
388 389
	}

390 391 392 393 394
	if (!callback && dev->driver && dev->driver->pm) {
		info = "EARLY driver ";
		callback = pm_noirq_op(dev->driver->pm, state);
	}

395 396
	error = dpm_run_callback(callback, dev, state, info);

397 398 399 400 401
	TRACE_RESUME(error);
	return error;
}

/**
402 403
 * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices.
 * @state: PM transition of the system being carried out.
404
 *
405 406
 * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and
 * enable device drivers to receive interrupts.
407
 */
408
void dpm_resume_noirq(pm_message_t state)
409
{
410
	ktime_t starttime = ktime_get();
411

412
	mutex_lock(&dpm_list_mtx);
413 414
	while (!list_empty(&dpm_noirq_list)) {
		struct device *dev = to_device(dpm_noirq_list.next);
415
		int error;
416 417

		get_device(dev);
418 419
		list_move_tail(&dev->power.entry, &dpm_suspended_list);
		mutex_unlock(&dpm_list_mtx);
420

421
		error = device_resume_noirq(dev, state);
422 423 424 425
		if (error) {
			suspend_stats.failed_resume_noirq++;
			dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
			dpm_save_failed_dev(dev_name(dev));
426
			pm_dev_err(dev, state, " early", error);
427
		}
428

429
		mutex_lock(&dpm_list_mtx);
430 431
		put_device(dev);
	}
432
	mutex_unlock(&dpm_list_mtx);
433
	dpm_show_time(starttime, state, "early");
434
	resume_device_irqs();
435
}
436
EXPORT_SYMBOL_GPL(dpm_resume_noirq);
437 438

/**
439
 * device_resume - Execute "resume" callbacks for given device.
440 441
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
442
 * @async: If true, the device is being resumed asynchronously.
443
 */
444
static int device_resume(struct device *dev, pm_message_t state, bool async)
445
{
446 447
	pm_callback_t callback = NULL;
	char *info = NULL;
448
	int error = 0;
449
	bool put = false;
450 451 452

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);
453

454
	dpm_wait(dev->parent, async);
455
	device_lock(dev);
456

457 458 459 460 461
	/*
	 * This is a fib.  But we'll allow new children to be added below
	 * a resumed device, even if the device hasn't been completed yet.
	 */
	dev->power.is_prepared = false;
462

463 464 465
	if (!dev->power.is_suspended)
		goto Unlock;

466 467 468
	pm_runtime_enable(dev);
	put = true;

469
	if (dev->pm_domain) {
470 471
		info = "power domain ";
		callback = pm_op(&dev->pm_domain->ops, state);
472
		goto Driver;
473 474
	}

475
	if (dev->type && dev->type->pm) {
476 477
		info = "type ";
		callback = pm_op(dev->type->pm, state);
478
		goto Driver;
479 480
	}

481 482
	if (dev->class) {
		if (dev->class->pm) {
483 484
			info = "class ";
			callback = pm_op(dev->class->pm, state);
485
			goto Driver;
486
		} else if (dev->class->resume) {
487 488
			info = "legacy class ";
			callback = dev->class->resume;
489
			goto End;
490
		}
491
	}
492 493 494

	if (dev->bus) {
		if (dev->bus->pm) {
495
			info = "bus ";
496
			callback = pm_op(dev->bus->pm, state);
497
		} else if (dev->bus->resume) {
498
			info = "legacy bus ";
499
			callback = dev->bus->resume;
500
			goto End;
501 502 503
		}
	}

504 505 506 507 508 509
 Driver:
	if (!callback && dev->driver && dev->driver->pm) {
		info = "driver ";
		callback = pm_op(dev->driver->pm, state);
	}

510
 End:
511
	error = dpm_run_callback(callback, dev, state, info);
512 513 514
	dev->power.is_suspended = false;

 Unlock:
515
	device_unlock(dev);
516
	complete_all(&dev->power.completion);
517

518
	TRACE_RESUME(error);
519 520 521 522

	if (put)
		pm_runtime_put_sync(dev);

523 524 525
	return error;
}

526 527 528 529 530
static void async_resume(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

531
	error = device_resume(dev, pm_transition, true);
532 533 534 535 536
	if (error)
		pm_dev_err(dev, pm_transition, " async", error);
	put_device(dev);
}

537
static bool is_async(struct device *dev)
538
{
539 540
	return dev->power.async_suspend && pm_async_enabled
		&& !pm_trace_is_enabled();
541 542
}

543
/**
544 545
 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
 * @state: PM transition of the system being carried out.
546
 *
547 548
 * Execute the appropriate "resume" callback for all devices whose status
 * indicates that they are suspended.
549
 */
550
void dpm_resume(pm_message_t state)
551
{
552
	struct device *dev;
553
	ktime_t starttime = ktime_get();
554

555 556
	might_sleep();

557
	mutex_lock(&dpm_list_mtx);
558
	pm_transition = state;
559
	async_error = 0;
560

561
	list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
562 563 564 565 566 567 568
		INIT_COMPLETION(dev->power.completion);
		if (is_async(dev)) {
			get_device(dev);
			async_schedule(async_resume, dev);
		}
	}

569 570
	while (!list_empty(&dpm_suspended_list)) {
		dev = to_device(dpm_suspended_list.next);
571
		get_device(dev);
572
		if (!is_async(dev)) {
573 574 575 576
			int error;

			mutex_unlock(&dpm_list_mtx);

577
			error = device_resume(dev, state, false);
578 579 580 581
			if (error) {
				suspend_stats.failed_resume++;
				dpm_save_failed_step(SUSPEND_RESUME);
				dpm_save_failed_dev(dev_name(dev));
582
				pm_dev_err(dev, state, "", error);
583
			}
584 585

			mutex_lock(&dpm_list_mtx);
586 587
		}
		if (!list_empty(&dev->power.entry))
588
			list_move_tail(&dev->power.entry, &dpm_prepared_list);
589 590 591
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
592
	async_synchronize_full();
593
	dpm_show_time(starttime, state, NULL);
594 595 596
}

/**
597 598 599
 * device_complete - Complete a PM transition for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
600
 */
601
static void device_complete(struct device *dev, pm_message_t state)
602
{
603 604 605
	void (*callback)(struct device *) = NULL;
	char *info = NULL;

606
	device_lock(dev);
607

608
	if (dev->pm_domain) {
609 610
		info = "completing power domain ";
		callback = dev->pm_domain->ops.complete;
611
	} else if (dev->type && dev->type->pm) {
612 613
		info = "completing type ";
		callback = dev->type->pm->complete;
614
	} else if (dev->class && dev->class->pm) {
615 616
		info = "completing class ";
		callback = dev->class->pm->complete;
617
	} else if (dev->bus && dev->bus->pm) {
618 619 620 621 622 623 624 625 626 627 628 629
		info = "completing bus ";
		callback = dev->bus->pm->complete;
	}

	if (!callback && dev->driver && dev->driver->pm) {
		info = "completing driver ";
		callback = dev->driver->pm->complete;
	}

	if (callback) {
		pm_dev_dbg(dev, state, info);
		callback(dev);
630 631
	}

632
	device_unlock(dev);
633 634 635
}

/**
636 637
 * dpm_complete - Complete a PM transition for all non-sysdev devices.
 * @state: PM transition of the system being carried out.
638
 *
639 640
 * Execute the ->complete() callbacks for all devices whose PM status is not
 * DPM_ON (this allows new devices to be registered).
641
 */
642
void dpm_complete(pm_message_t state)
643
{
644 645
	struct list_head list;

646 647
	might_sleep();

648
	INIT_LIST_HEAD(&list);
649
	mutex_lock(&dpm_list_mtx);
650 651
	while (!list_empty(&dpm_prepared_list)) {
		struct device *dev = to_device(dpm_prepared_list.prev);
652

653
		get_device(dev);
654
		dev->power.is_prepared = false;
655 656
		list_move(&dev->power.entry, &list);
		mutex_unlock(&dpm_list_mtx);
657

658
		device_complete(dev, state);
659

660
		mutex_lock(&dpm_list_mtx);
661
		put_device(dev);
662
	}
663
	list_splice(&list, &dpm_list);
664 665 666 667
	mutex_unlock(&dpm_list_mtx);
}

/**
668 669
 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
 * @state: PM transition of the system being carried out.
670
 *
671 672
 * Execute "resume" callbacks for all devices and complete the PM transition of
 * the system.
673
 */
674
void dpm_resume_end(pm_message_t state)
675
{
676 677
	dpm_resume(state);
	dpm_complete(state);
678
}
679
EXPORT_SYMBOL_GPL(dpm_resume_end);
680 681 682 683


/*------------------------- Suspend routines -------------------------*/

684
/**
685 686 687 688 689
 * resume_event - Return a "resume" message for given "suspend" sleep state.
 * @sleep_state: PM message representing a sleep state.
 *
 * Return a PM message representing the resume event corresponding to given
 * sleep state.
690 691
 */
static pm_message_t resume_event(pm_message_t sleep_state)
692
{
693 694 695 696 697 698 699 700
	switch (sleep_state.event) {
	case PM_EVENT_SUSPEND:
		return PMSG_RESUME;
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		return PMSG_RECOVER;
	case PM_EVENT_HIBERNATE:
		return PMSG_RESTORE;
701
	}
702
	return PMSG_ON;
703 704 705
}

/**
706 707 708
 * device_suspend_noirq - Execute a "late suspend" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
709
 *
710 711
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
712
 */
713
static int device_suspend_noirq(struct device *dev, pm_message_t state)
714
{
715 716
	pm_callback_t callback = NULL;
	char *info = NULL;
717

718
	if (dev->pm_domain) {
719 720
		info = "LATE power domain ";
		callback = pm_noirq_op(&dev->pm_domain->ops, state);
721
	} else if (dev->type && dev->type->pm) {
722 723
		info = "LATE type ";
		callback = pm_noirq_op(dev->type->pm, state);
724
	} else if (dev->class && dev->class->pm) {
725 726
		info = "LATE class ";
		callback = pm_noirq_op(dev->class->pm, state);
727
	} else if (dev->bus && dev->bus->pm) {
728
		info = "LATE bus ";
729
		callback = pm_noirq_op(dev->bus->pm, state);
730 731
	}

732 733 734 735 736
	if (!callback && dev->driver && dev->driver->pm) {
		info = "LATE driver ";
		callback = pm_noirq_op(dev->driver->pm, state);
	}

737
	return dpm_run_callback(callback, dev, state, info);
738 739 740
}

/**
741 742
 * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices.
 * @state: PM transition of the system being carried out.
743
 *
744 745
 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
 * handlers for all non-sysdev devices.
746
 */
747
int dpm_suspend_noirq(pm_message_t state)
748
{
749
	ktime_t starttime = ktime_get();
750 751
	int error = 0;

752
	suspend_device_irqs();
753
	mutex_lock(&dpm_list_mtx);
754 755
	while (!list_empty(&dpm_suspended_list)) {
		struct device *dev = to_device(dpm_suspended_list.prev);
756 757 758 759

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

760
		error = device_suspend_noirq(dev, state);
761 762

		mutex_lock(&dpm_list_mtx);
763
		if (error) {
764
			pm_dev_err(dev, state, " late", error);
765 766 767
			suspend_stats.failed_suspend_noirq++;
			dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
			dpm_save_failed_dev(dev_name(dev));
768
			put_device(dev);
769 770
			break;
		}
771
		if (!list_empty(&dev->power.entry))
772
			list_move(&dev->power.entry, &dpm_noirq_list);
773
		put_device(dev);
774
	}
775
	mutex_unlock(&dpm_list_mtx);
776
	if (error)
777
		dpm_resume_noirq(resume_event(state));
778 779
	else
		dpm_show_time(starttime, state, "late");
780 781
	return error;
}
782
EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
783

784 785
/**
 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
R
Randy Dunlap 已提交
786 787 788
 * @dev: Device to suspend.
 * @state: PM transition of the system being carried out.
 * @cb: Suspend callback to execute.
789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805
 */
static int legacy_suspend(struct device *dev, pm_message_t state,
			  int (*cb)(struct device *dev, pm_message_t state))
{
	int error;
	ktime_t calltime;

	calltime = initcall_debug_start(dev);

	error = cb(dev, state);
	suspend_report_result(cb, error);

	initcall_debug_report(dev, calltime, error);

	return error;
}

806
/**
807 808 809
 * device_suspend - Execute "suspend" callbacks for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
810
 * @async: If true, the device is being suspended asynchronously.
811
 */
812
static int __device_suspend(struct device *dev, pm_message_t state, bool async)
813
{
814 815
	pm_callback_t callback = NULL;
	char *info = NULL;
816 817
	int error = 0;

818
	dpm_wait_for_children(dev, async);
819

820
	if (async_error)
821 822 823 824 825
		return 0;

	pm_runtime_get_noresume(dev);
	if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
		pm_wakeup_event(dev, 0);
826

827
	if (pm_wakeup_pending()) {
828
		pm_runtime_put_sync(dev);
829
		async_error = -EBUSY;
830
		return 0;
831 832
	}

833 834
	device_lock(dev);

835
	if (dev->pm_domain) {
836 837 838
		info = "power domain ";
		callback = pm_op(&dev->pm_domain->ops, state);
		goto Run;
839 840
	}

841
	if (dev->type && dev->type->pm) {
842 843 844
		info = "type ";
		callback = pm_op(dev->type->pm, state);
		goto Run;
845 846
	}

847 848
	if (dev->class) {
		if (dev->class->pm) {
849 850 851
			info = "class ";
			callback = pm_op(dev->class->pm, state);
			goto Run;
852 853
		} else if (dev->class->suspend) {
			pm_dev_dbg(dev, state, "legacy class ");
854
			error = legacy_suspend(dev, state, dev->class->suspend);
855
			goto End;
856
		}
857 858
	}

859 860
	if (dev->bus) {
		if (dev->bus->pm) {
861
			info = "bus ";
862
			callback = pm_op(dev->bus->pm, state);
863
		} else if (dev->bus->suspend) {
864
			pm_dev_dbg(dev, state, "legacy bus ");
865
			error = legacy_suspend(dev, state, dev->bus->suspend);
866
			goto End;
867
		}
868 869
	}

870
 Run:
871 872 873 874 875
	if (!callback && dev->driver && dev->driver->pm) {
		info = "driver ";
		callback = pm_op(dev->driver->pm, state);
	}

876 877
	error = dpm_run_callback(callback, dev, state, info);

878
 End:
879 880
	if (!error) {
		dev->power.is_suspended = true;
881 882
		if (dev->power.wakeup_path
		    && dev->parent && !dev->parent->power.ignore_children)
883 884
			dev->parent->power.wakeup_path = true;
	}
885

886
	device_unlock(dev);
887
	complete_all(&dev->power.completion);
888

889 890
	if (error) {
		pm_runtime_put_sync(dev);
891
		async_error = error;
892 893 894
	} else if (dev->power.is_suspended) {
		__pm_runtime_disable(dev, false);
	}
895

896 897 898
	return error;
}

899 900 901 902 903 904
static void async_suspend(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = __device_suspend(dev, pm_transition, true);
905 906
	if (error) {
		dpm_save_failed_dev(dev_name(dev));
907
		pm_dev_err(dev, pm_transition, " async", error);
908
	}
909 910 911 912 913 914 915 916

	put_device(dev);
}

static int device_suspend(struct device *dev)
{
	INIT_COMPLETION(dev->power.completion);

917
	if (pm_async_enabled && dev->power.async_suspend) {
918 919 920 921 922 923 924 925
		get_device(dev);
		async_schedule(async_suspend, dev);
		return 0;
	}

	return __device_suspend(dev, pm_transition, false);
}

926
/**
927 928
 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
 * @state: PM transition of the system being carried out.
929
 */
930
int dpm_suspend(pm_message_t state)
931
{
932
	ktime_t starttime = ktime_get();
933 934
	int error = 0;

935 936
	might_sleep();

937
	mutex_lock(&dpm_list_mtx);
938 939
	pm_transition = state;
	async_error = 0;
940 941
	while (!list_empty(&dpm_prepared_list)) {
		struct device *dev = to_device(dpm_prepared_list.prev);
942

943
		get_device(dev);
944
		mutex_unlock(&dpm_list_mtx);
945

946
		error = device_suspend(dev);
947

948
		mutex_lock(&dpm_list_mtx);
949
		if (error) {
950
			pm_dev_err(dev, state, "", error);
951
			dpm_save_failed_dev(dev_name(dev));
952
			put_device(dev);
953 954
			break;
		}
955
		if (!list_empty(&dev->power.entry))
956
			list_move(&dev->power.entry, &dpm_suspended_list);
957
		put_device(dev);
958 959
		if (async_error)
			break;
960 961
	}
	mutex_unlock(&dpm_list_mtx);
962 963 964
	async_synchronize_full();
	if (!error)
		error = async_error;
965 966 967 968
	if (error) {
		suspend_stats.failed_suspend++;
		dpm_save_failed_step(SUSPEND_SUSPEND);
	} else
969
		dpm_show_time(starttime, state, NULL);
970 971 972 973
	return error;
}

/**
974 975 976 977 978 979
 * device_prepare - Prepare a device for system power transition.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
 *
 * Execute the ->prepare() callback(s) for given device.  No new children of the
 * device may be registered after this function has returned.
980
 */
981
static int device_prepare(struct device *dev, pm_message_t state)
982
{
983 984
	int (*callback)(struct device *) = NULL;
	char *info = NULL;
985 986
	int error = 0;

987
	device_lock(dev);
988

989 990
	dev->power.wakeup_path = device_may_wakeup(dev);

991
	if (dev->pm_domain) {
992 993
		info = "preparing power domain ";
		callback = dev->pm_domain->ops.prepare;
994
	} else if (dev->type && dev->type->pm) {
995 996
		info = "preparing type ";
		callback = dev->type->pm->prepare;
997
	} else if (dev->class && dev->class->pm) {
998 999
		info = "preparing class ";
		callback = dev->class->pm->prepare;
1000
	} else if (dev->bus && dev->bus->pm) {
1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012
		info = "preparing bus ";
		callback = dev->bus->pm->prepare;
	}

	if (!callback && dev->driver && dev->driver->pm) {
		info = "preparing driver ";
		callback = dev->driver->pm->prepare;
	}

	if (callback) {
		error = callback(dev);
		suspend_report_result(callback, error);
1013
	}
1014

1015
	device_unlock(dev);
1016 1017 1018

	return error;
}
1019

1020
/**
1021 1022
 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
 * @state: PM transition of the system being carried out.
1023
 *
1024
 * Execute the ->prepare() callback(s) for all devices.
1025
 */
1026
int dpm_prepare(pm_message_t state)
1027 1028 1029
{
	int error = 0;

1030 1031
	might_sleep();

1032 1033 1034 1035 1036 1037 1038
	mutex_lock(&dpm_list_mtx);
	while (!list_empty(&dpm_list)) {
		struct device *dev = to_device(dpm_list.next);

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

1039
		error = device_prepare(dev, state);
1040 1041 1042 1043 1044

		mutex_lock(&dpm_list_mtx);
		if (error) {
			if (error == -EAGAIN) {
				put_device(dev);
S
Sebastian Ott 已提交
1045
				error = 0;
1046 1047
				continue;
			}
1048 1049
			printk(KERN_INFO "PM: Device %s not prepared "
				"for power transition: code %d\n",
1050
				dev_name(dev), error);
1051 1052 1053
			put_device(dev);
			break;
		}
1054
		dev->power.is_prepared = true;
1055
		if (!list_empty(&dev->power.entry))
1056
			list_move_tail(&dev->power.entry, &dpm_prepared_list);
1057 1058 1059
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
1060 1061 1062
	return error;
}

1063
/**
1064 1065
 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
 * @state: PM transition of the system being carried out.
1066
 *
1067 1068
 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
 * callbacks for them.
1069
 */
1070
int dpm_suspend_start(pm_message_t state)
1071 1072
{
	int error;
1073

1074
	error = dpm_prepare(state);
1075 1076 1077 1078
	if (error) {
		suspend_stats.failed_prepare++;
		dpm_save_failed_step(SUSPEND_PREPARE);
	} else
1079
		error = dpm_suspend(state);
1080 1081
	return error;
}
1082
EXPORT_SYMBOL_GPL(dpm_suspend_start);
1083 1084 1085

void __suspend_report_result(const char *function, void *fn, int ret)
{
1086 1087
	if (ret)
		printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1088 1089
}
EXPORT_SYMBOL_GPL(__suspend_report_result);
1090 1091 1092 1093 1094 1095

/**
 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
 * @dev: Device to wait for.
 * @subordinate: Device that needs to wait for @dev.
 */
1096
int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1097 1098
{
	dpm_wait(dev, subordinate->power.async_suspend);
1099
	return async_error;
1100 1101
}
EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);