main.c 26.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/*
 * drivers/base/power/main.c - Where the driver meets power management.
 *
 * Copyright (c) 2003 Patrick Mochel
 * Copyright (c) 2003 Open Source Development Lab
 *
 * This file is released under the GPLv2
 *
 *
 * The driver model core calls device_pm_add() when a device is registered.
11
 * This will initialize the embedded device_pm_info object in the device
L
Linus Torvalds 已提交
12 13 14
 * and add it to the list of power-controlled devices. sysfs entries for
 * controlling device power management will also be added.
 *
15 16 17
 * A separate list is used for keeping track of power info, because the power
 * domain dependencies may differ from the ancestral dependencies that the
 * subsystem list maintains.
L
Linus Torvalds 已提交
18 19 20
 */

#include <linux/device.h>
21
#include <linux/kallsyms.h>
22
#include <linux/export.h>
23
#include <linux/mutex.h>
24
#include <linux/pm.h>
25
#include <linux/pm_runtime.h>
26
#include <linux/resume-trace.h>
27
#include <linux/interrupt.h>
28
#include <linux/sched.h>
29
#include <linux/async.h>
30
#include <linux/suspend.h>
31

32
#include "../base.h"
L
Linus Torvalds 已提交
33 34
#include "power.h"

35
/*
36
 * The entries in the dpm_list list are in a depth first order, simply
37 38 39
 * because children are guaranteed to be discovered after parents, and
 * are inserted at the back of the list on discovery.
 *
40 41
 * Since device_pm_add() may be called with a device lock held,
 * we must never try to acquire a device lock while holding
42 43 44
 * dpm_list_mutex.
 */

45
LIST_HEAD(dpm_list);
46 47 48
LIST_HEAD(dpm_prepared_list);
LIST_HEAD(dpm_suspended_list);
LIST_HEAD(dpm_noirq_list);
L
Linus Torvalds 已提交
49

50
struct suspend_stats suspend_stats;
51
static DEFINE_MUTEX(dpm_list_mtx);
52
static pm_message_t pm_transition;
L
Linus Torvalds 已提交
53

54 55
static int async_error;

56
/**
57
 * device_pm_init - Initialize the PM-related part of a device object.
58 59 60 61
 * @dev: Device object being initialized.
 */
void device_pm_init(struct device *dev)
{
62
	dev->power.is_prepared = false;
63
	dev->power.is_suspended = false;
64
	init_completion(&dev->power.completion);
65
	complete_all(&dev->power.completion);
66 67
	dev->power.wakeup = NULL;
	spin_lock_init(&dev->power.lock);
68
	pm_runtime_init(dev);
69
	INIT_LIST_HEAD(&dev->power.entry);
70
	dev->power.power_state = PMSG_INVALID;
71 72
}

73
/**
74
 * device_pm_lock - Lock the list of active devices used by the PM core.
75 76 77 78 79 80 81
 */
void device_pm_lock(void)
{
	mutex_lock(&dpm_list_mtx);
}

/**
82
 * device_pm_unlock - Unlock the list of active devices used by the PM core.
83 84 85 86 87
 */
void device_pm_unlock(void)
{
	mutex_unlock(&dpm_list_mtx);
}
88

89
/**
90 91
 * device_pm_add - Add a device to the PM core's list of active devices.
 * @dev: Device to add to the list.
92
 */
93
void device_pm_add(struct device *dev)
L
Linus Torvalds 已提交
94 95
{
	pr_debug("PM: Adding info for %s:%s\n",
96
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
97
	mutex_lock(&dpm_list_mtx);
98
	if (dev->parent && dev->parent->power.is_prepared)
99 100
		dev_warn(dev, "parent %s should not be sleeping\n",
			dev_name(dev->parent));
101
	list_add_tail(&dev->power.entry, &dpm_list);
102
	dev_pm_qos_constraints_init(dev);
103
	mutex_unlock(&dpm_list_mtx);
L
Linus Torvalds 已提交
104 105
}

106
/**
107 108
 * device_pm_remove - Remove a device from the PM core's list of active devices.
 * @dev: Device to be removed from the list.
109
 */
110
void device_pm_remove(struct device *dev)
L
Linus Torvalds 已提交
111 112
{
	pr_debug("PM: Removing info for %s:%s\n",
113
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
114
	complete_all(&dev->power.completion);
115
	mutex_lock(&dpm_list_mtx);
116
	dev_pm_qos_constraints_destroy(dev);
L
Linus Torvalds 已提交
117
	list_del_init(&dev->power.entry);
118
	mutex_unlock(&dpm_list_mtx);
119
	device_wakeup_disable(dev);
120
	pm_runtime_remove(dev);
121 122
}

123
/**
124 125 126
 * device_pm_move_before - Move device in the PM core's list of active devices.
 * @deva: Device to move in dpm_list.
 * @devb: Device @deva should come before.
127 128 129 130
 */
void device_pm_move_before(struct device *deva, struct device *devb)
{
	pr_debug("PM: Moving %s:%s before %s:%s\n",
131 132
		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
133 134 135 136 137
	/* Delete deva from dpm_list and reinsert before devb. */
	list_move_tail(&deva->power.entry, &devb->power.entry);
}

/**
138 139 140
 * device_pm_move_after - Move device in the PM core's list of active devices.
 * @deva: Device to move in dpm_list.
 * @devb: Device @deva should come after.
141 142 143 144
 */
void device_pm_move_after(struct device *deva, struct device *devb)
{
	pr_debug("PM: Moving %s:%s after %s:%s\n",
145 146
		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
147 148 149 150 151
	/* Delete deva from dpm_list and reinsert after devb. */
	list_move(&deva->power.entry, &devb->power.entry);
}

/**
152 153
 * device_pm_move_last - Move device to end of the PM core's list of devices.
 * @dev: Device to move in dpm_list.
154 155 156 157
 */
void device_pm_move_last(struct device *dev)
{
	pr_debug("PM: Moving %s:%s to end of list\n",
158
		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
159 160 161
	list_move_tail(&dev->power.entry, &dpm_list);
}

162 163 164 165 166
static ktime_t initcall_debug_start(struct device *dev)
{
	ktime_t calltime = ktime_set(0, 0);

	if (initcall_debug) {
167 168 169
		pr_info("calling  %s+ @ %i, parent: %s\n",
			dev_name(dev), task_pid_nr(current),
			dev->parent ? dev_name(dev->parent) : "none");
170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
		calltime = ktime_get();
	}

	return calltime;
}

static void initcall_debug_report(struct device *dev, ktime_t calltime,
				  int error)
{
	ktime_t delta, rettime;

	if (initcall_debug) {
		rettime = ktime_get();
		delta = ktime_sub(rettime, calltime);
		pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
			error, (unsigned long long)ktime_to_ns(delta) >> 10);
	}
}

189 190 191 192 193 194 195 196 197 198
/**
 * dpm_wait - Wait for a PM operation to complete.
 * @dev: Device to wait for.
 * @async: If unset, wait only if the device's power.async_suspend flag is set.
 */
static void dpm_wait(struct device *dev, bool async)
{
	if (!dev)
		return;

199
	if (async || (pm_async_enabled && dev->power.async_suspend))
200 201 202 203 204 205 206 207 208 209 210 211 212 213
		wait_for_completion(&dev->power.completion);
}

static int dpm_wait_fn(struct device *dev, void *async_ptr)
{
	dpm_wait(dev, *((bool *)async_ptr));
	return 0;
}

static void dpm_wait_for_children(struct device *dev, bool async)
{
       device_for_each_child(dev, &async, dpm_wait_fn);
}

214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231
static int dpm_run_callback(struct device *dev, int (*cb)(struct device *))
{
	ktime_t calltime;
	int error;

	if (!cb)
		return 0;

	calltime = initcall_debug_start(dev);

	error = cb(dev);
	suspend_report_result(cb, error);

	initcall_debug_report(dev, calltime, error);

	return error;
}

232
/**
233 234 235 236
 * pm_op - Execute the PM operation appropriate for given PM event.
 * @dev: Device to handle.
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
237
 */
238 239 240
static int pm_op(struct device *dev,
		 const struct dev_pm_ops *ops,
		 pm_message_t state)
241 242 243 244 245 246
{
	int error = 0;

	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
247
		error = dpm_run_callback(dev, ops->suspend);
248 249
		break;
	case PM_EVENT_RESUME:
250
		error = dpm_run_callback(dev, ops->resume);
251 252
		break;
#endif /* CONFIG_SUSPEND */
253
#ifdef CONFIG_HIBERNATE_CALLBACKS
254 255
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
256
		error = dpm_run_callback(dev, ops->freeze);
257 258
		break;
	case PM_EVENT_HIBERNATE:
259
		error = dpm_run_callback(dev, ops->poweroff);
260 261 262
		break;
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
263
		error = dpm_run_callback(dev, ops->thaw);
264 265
		break;
	case PM_EVENT_RESTORE:
266
		error = dpm_run_callback(dev, ops->restore);
267
		break;
268
#endif /* CONFIG_HIBERNATE_CALLBACKS */
269 270 271
	default:
		error = -EINVAL;
	}
272

273 274 275 276
	return error;
}

/**
277 278 279 280
 * pm_noirq_op - Execute the PM operation appropriate for given PM event.
 * @dev: Device to handle.
 * @ops: PM operations to choose from.
 * @state: PM transition of the system being carried out.
281
 *
282 283
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
284
 */
285 286
static int pm_noirq_op(struct device *dev,
			const struct dev_pm_ops *ops,
287 288 289 290 291 292 293
			pm_message_t state)
{
	int error = 0;

	switch (state.event) {
#ifdef CONFIG_SUSPEND
	case PM_EVENT_SUSPEND:
294
		error = dpm_run_callback(dev, ops->suspend_noirq);
295 296
		break;
	case PM_EVENT_RESUME:
297
		error = dpm_run_callback(dev, ops->resume_noirq);
298 299
		break;
#endif /* CONFIG_SUSPEND */
300
#ifdef CONFIG_HIBERNATE_CALLBACKS
301 302
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
303
		error = dpm_run_callback(dev, ops->freeze_noirq);
304 305
		break;
	case PM_EVENT_HIBERNATE:
306
		error = dpm_run_callback(dev, ops->poweroff_noirq);
307 308 309
		break;
	case PM_EVENT_THAW:
	case PM_EVENT_RECOVER:
310
		error = dpm_run_callback(dev, ops->thaw_noirq);
311 312
		break;
	case PM_EVENT_RESTORE:
313
		error = dpm_run_callback(dev, ops->restore_noirq);
314
		break;
315
#endif /* CONFIG_HIBERNATE_CALLBACKS */
316 317 318
	default:
		error = -EINVAL;
	}
319

320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357
	return error;
}

static char *pm_verb(int event)
{
	switch (event) {
	case PM_EVENT_SUSPEND:
		return "suspend";
	case PM_EVENT_RESUME:
		return "resume";
	case PM_EVENT_FREEZE:
		return "freeze";
	case PM_EVENT_QUIESCE:
		return "quiesce";
	case PM_EVENT_HIBERNATE:
		return "hibernate";
	case PM_EVENT_THAW:
		return "thaw";
	case PM_EVENT_RESTORE:
		return "restore";
	case PM_EVENT_RECOVER:
		return "recover";
	default:
		return "(unknown PM event)";
	}
}

static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
{
	dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
		", may wakeup" : "");
}

static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
			int error)
{
	printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
358
		dev_name(dev), pm_verb(state.event), info, error);
359 360
}

361 362 363
static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
{
	ktime_t calltime;
364
	u64 usecs64;
365 366 367 368 369 370 371 372 373 374 375 376 377
	int usecs;

	calltime = ktime_get();
	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
	do_div(usecs64, NSEC_PER_USEC);
	usecs = usecs64;
	if (usecs == 0)
		usecs = 1;
	pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
		info ?: "", info ? " " : "", pm_verb(state.event),
		usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
}

378 379 380
/*------------------------- Resume routines -------------------------*/

/**
381 382 383
 * device_resume_noirq - Execute an "early resume" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
384
 *
385 386
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
387
 */
388
static int device_resume_noirq(struct device *dev, pm_message_t state)
389 390 391 392 393 394
{
	int error = 0;

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);

395
	if (dev->pm_domain) {
396
		pm_dev_dbg(dev, state, "EARLY power domain ");
397
		error = pm_noirq_op(dev, &dev->pm_domain->ops, state);
398
	} else if (dev->type && dev->type->pm) {
399 400
		pm_dev_dbg(dev, state, "EARLY type ");
		error = pm_noirq_op(dev, dev->type->pm, state);
401
	} else if (dev->class && dev->class->pm) {
402 403
		pm_dev_dbg(dev, state, "EARLY class ");
		error = pm_noirq_op(dev, dev->class->pm, state);
404 405 406
	} else if (dev->bus && dev->bus->pm) {
		pm_dev_dbg(dev, state, "EARLY ");
		error = pm_noirq_op(dev, dev->bus->pm, state);
407 408
	}

409 410 411 412 413
	TRACE_RESUME(error);
	return error;
}

/**
414 415
 * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices.
 * @state: PM transition of the system being carried out.
416
 *
417 418
 * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and
 * enable device drivers to receive interrupts.
419
 */
420
void dpm_resume_noirq(pm_message_t state)
421
{
422
	ktime_t starttime = ktime_get();
423

424
	mutex_lock(&dpm_list_mtx);
425 426
	while (!list_empty(&dpm_noirq_list)) {
		struct device *dev = to_device(dpm_noirq_list.next);
427
		int error;
428 429

		get_device(dev);
430 431
		list_move_tail(&dev->power.entry, &dpm_suspended_list);
		mutex_unlock(&dpm_list_mtx);
432

433
		error = device_resume_noirq(dev, state);
434 435 436 437
		if (error) {
			suspend_stats.failed_resume_noirq++;
			dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
			dpm_save_failed_dev(dev_name(dev));
438
			pm_dev_err(dev, state, " early", error);
439
		}
440

441
		mutex_lock(&dpm_list_mtx);
442 443
		put_device(dev);
	}
444
	mutex_unlock(&dpm_list_mtx);
445
	dpm_show_time(starttime, state, "early");
446
	resume_device_irqs();
447
}
448
EXPORT_SYMBOL_GPL(dpm_resume_noirq);
449 450

/**
451
 * device_resume - Execute "resume" callbacks for given device.
452 453
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
454
 * @async: If true, the device is being resumed asynchronously.
455
 */
456
static int device_resume(struct device *dev, pm_message_t state, bool async)
457 458
{
	int error = 0;
459
	bool put = false;
460 461 462

	TRACE_DEVICE(dev);
	TRACE_RESUME(0);
463

464
	dpm_wait(dev->parent, async);
465
	device_lock(dev);
466

467 468 469 470 471
	/*
	 * This is a fib.  But we'll allow new children to be added below
	 * a resumed device, even if the device hasn't been completed yet.
	 */
	dev->power.is_prepared = false;
472

473 474 475
	if (!dev->power.is_suspended)
		goto Unlock;

476 477 478
	pm_runtime_enable(dev);
	put = true;

479
	if (dev->pm_domain) {
480
		pm_dev_dbg(dev, state, "power domain ");
481
		error = pm_op(dev, &dev->pm_domain->ops, state);
482
		goto End;
483 484
	}

485 486 487 488
	if (dev->type && dev->type->pm) {
		pm_dev_dbg(dev, state, "type ");
		error = pm_op(dev, dev->type->pm, state);
		goto End;
489 490
	}

491 492 493 494
	if (dev->class) {
		if (dev->class->pm) {
			pm_dev_dbg(dev, state, "class ");
			error = pm_op(dev, dev->class->pm, state);
495
			goto End;
496 497
		} else if (dev->class->resume) {
			pm_dev_dbg(dev, state, "legacy class ");
498
			error = dpm_run_callback(dev, dev->class->resume);
499
			goto End;
500
		}
501
	}
502 503 504 505 506 507 508

	if (dev->bus) {
		if (dev->bus->pm) {
			pm_dev_dbg(dev, state, "");
			error = pm_op(dev, dev->bus->pm, state);
		} else if (dev->bus->resume) {
			pm_dev_dbg(dev, state, "legacy ");
509
			error = dpm_run_callback(dev, dev->bus->resume);
510 511 512
		}
	}

513
 End:
514 515 516
	dev->power.is_suspended = false;

 Unlock:
517
	device_unlock(dev);
518
	complete_all(&dev->power.completion);
519

520
	TRACE_RESUME(error);
521 522 523 524

	if (put)
		pm_runtime_put_sync(dev);

525 526 527
	return error;
}

528 529 530 531 532
static void async_resume(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

533
	error = device_resume(dev, pm_transition, true);
534 535 536 537 538
	if (error)
		pm_dev_err(dev, pm_transition, " async", error);
	put_device(dev);
}

539
static bool is_async(struct device *dev)
540
{
541 542
	return dev->power.async_suspend && pm_async_enabled
		&& !pm_trace_is_enabled();
543 544
}

545
/**
546 547
 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
 * @state: PM transition of the system being carried out.
548
 *
549 550
 * Execute the appropriate "resume" callback for all devices whose status
 * indicates that they are suspended.
551
 */
552
void dpm_resume(pm_message_t state)
553
{
554
	struct device *dev;
555
	ktime_t starttime = ktime_get();
556

557 558
	might_sleep();

559
	mutex_lock(&dpm_list_mtx);
560
	pm_transition = state;
561
	async_error = 0;
562

563
	list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
564 565 566 567 568 569 570
		INIT_COMPLETION(dev->power.completion);
		if (is_async(dev)) {
			get_device(dev);
			async_schedule(async_resume, dev);
		}
	}

571 572
	while (!list_empty(&dpm_suspended_list)) {
		dev = to_device(dpm_suspended_list.next);
573
		get_device(dev);
574
		if (!is_async(dev)) {
575 576 577 578
			int error;

			mutex_unlock(&dpm_list_mtx);

579
			error = device_resume(dev, state, false);
580 581 582 583
			if (error) {
				suspend_stats.failed_resume++;
				dpm_save_failed_step(SUSPEND_RESUME);
				dpm_save_failed_dev(dev_name(dev));
584
				pm_dev_err(dev, state, "", error);
585
			}
586 587

			mutex_lock(&dpm_list_mtx);
588 589
		}
		if (!list_empty(&dev->power.entry))
590
			list_move_tail(&dev->power.entry, &dpm_prepared_list);
591 592 593
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
594
	async_synchronize_full();
595
	dpm_show_time(starttime, state, NULL);
596 597 598
}

/**
599 600 601
 * device_complete - Complete a PM transition for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
602
 */
603
static void device_complete(struct device *dev, pm_message_t state)
604
{
605
	device_lock(dev);
606

607
	if (dev->pm_domain) {
608
		pm_dev_dbg(dev, state, "completing power domain ");
609 610
		if (dev->pm_domain->ops.complete)
			dev->pm_domain->ops.complete(dev);
611
	} else if (dev->type && dev->type->pm) {
612
		pm_dev_dbg(dev, state, "completing type ");
613 614 615 616 617 618 619
		if (dev->type->pm->complete)
			dev->type->pm->complete(dev);
	} else if (dev->class && dev->class->pm) {
		pm_dev_dbg(dev, state, "completing class ");
		if (dev->class->pm->complete)
			dev->class->pm->complete(dev);
	} else if (dev->bus && dev->bus->pm) {
620
		pm_dev_dbg(dev, state, "completing ");
621 622
		if (dev->bus->pm->complete)
			dev->bus->pm->complete(dev);
623 624
	}

625
	device_unlock(dev);
626 627 628
}

/**
629 630
 * dpm_complete - Complete a PM transition for all non-sysdev devices.
 * @state: PM transition of the system being carried out.
631
 *
632 633
 * Execute the ->complete() callbacks for all devices whose PM status is not
 * DPM_ON (this allows new devices to be registered).
634
 */
635
void dpm_complete(pm_message_t state)
636
{
637 638
	struct list_head list;

639 640
	might_sleep();

641
	INIT_LIST_HEAD(&list);
642
	mutex_lock(&dpm_list_mtx);
643 644
	while (!list_empty(&dpm_prepared_list)) {
		struct device *dev = to_device(dpm_prepared_list.prev);
645

646
		get_device(dev);
647
		dev->power.is_prepared = false;
648 649
		list_move(&dev->power.entry, &list);
		mutex_unlock(&dpm_list_mtx);
650

651
		device_complete(dev, state);
652

653
		mutex_lock(&dpm_list_mtx);
654
		put_device(dev);
655
	}
656
	list_splice(&list, &dpm_list);
657 658 659 660
	mutex_unlock(&dpm_list_mtx);
}

/**
661 662
 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
 * @state: PM transition of the system being carried out.
663
 *
664 665
 * Execute "resume" callbacks for all devices and complete the PM transition of
 * the system.
666
 */
667
void dpm_resume_end(pm_message_t state)
668
{
669 670
	dpm_resume(state);
	dpm_complete(state);
671
}
672
EXPORT_SYMBOL_GPL(dpm_resume_end);
673 674 675 676


/*------------------------- Suspend routines -------------------------*/

677
/**
678 679 680 681 682
 * resume_event - Return a "resume" message for given "suspend" sleep state.
 * @sleep_state: PM message representing a sleep state.
 *
 * Return a PM message representing the resume event corresponding to given
 * sleep state.
683 684
 */
static pm_message_t resume_event(pm_message_t sleep_state)
685
{
686 687 688 689 690 691 692 693
	switch (sleep_state.event) {
	case PM_EVENT_SUSPEND:
		return PMSG_RESUME;
	case PM_EVENT_FREEZE:
	case PM_EVENT_QUIESCE:
		return PMSG_RECOVER;
	case PM_EVENT_HIBERNATE:
		return PMSG_RESTORE;
694
	}
695
	return PMSG_ON;
696 697 698
}

/**
699 700 701
 * device_suspend_noirq - Execute a "late suspend" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
702
 *
703 704
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
705
 */
706
static int device_suspend_noirq(struct device *dev, pm_message_t state)
707
{
708
	int error = 0;
709

710
	if (dev->pm_domain) {
711
		pm_dev_dbg(dev, state, "LATE power domain ");
712
		error = pm_noirq_op(dev, &dev->pm_domain->ops, state);
713
	} else if (dev->type && dev->type->pm) {
714 715
		pm_dev_dbg(dev, state, "LATE type ");
		error = pm_noirq_op(dev, dev->type->pm, state);
716 717 718 719
	} else if (dev->class && dev->class->pm) {
		pm_dev_dbg(dev, state, "LATE class ");
		error = pm_noirq_op(dev, dev->class->pm, state);
	} else if (dev->bus && dev->bus->pm) {
720 721
		pm_dev_dbg(dev, state, "LATE ");
		error = pm_noirq_op(dev, dev->bus->pm, state);
722 723
	}

724
	return error;
725 726 727
}

/**
728 729
 * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices.
 * @state: PM transition of the system being carried out.
730
 *
731 732
 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
 * handlers for all non-sysdev devices.
733
 */
734
int dpm_suspend_noirq(pm_message_t state)
735
{
736
	ktime_t starttime = ktime_get();
737 738
	int error = 0;

739
	suspend_device_irqs();
740
	mutex_lock(&dpm_list_mtx);
741 742
	while (!list_empty(&dpm_suspended_list)) {
		struct device *dev = to_device(dpm_suspended_list.prev);
743 744 745 746

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

747
		error = device_suspend_noirq(dev, state);
748 749

		mutex_lock(&dpm_list_mtx);
750
		if (error) {
751
			pm_dev_err(dev, state, " late", error);
752 753 754
			suspend_stats.failed_suspend_noirq++;
			dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
			dpm_save_failed_dev(dev_name(dev));
755
			put_device(dev);
756 757
			break;
		}
758
		if (!list_empty(&dev->power.entry))
759
			list_move(&dev->power.entry, &dpm_noirq_list);
760
		put_device(dev);
761
	}
762
	mutex_unlock(&dpm_list_mtx);
763
	if (error)
764
		dpm_resume_noirq(resume_event(state));
765 766
	else
		dpm_show_time(starttime, state, "late");
767 768
	return error;
}
769
EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
770

771 772
/**
 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
R
Randy Dunlap 已提交
773 774 775
 * @dev: Device to suspend.
 * @state: PM transition of the system being carried out.
 * @cb: Suspend callback to execute.
776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792
 */
static int legacy_suspend(struct device *dev, pm_message_t state,
			  int (*cb)(struct device *dev, pm_message_t state))
{
	int error;
	ktime_t calltime;

	calltime = initcall_debug_start(dev);

	error = cb(dev, state);
	suspend_report_result(cb, error);

	initcall_debug_report(dev, calltime, error);

	return error;
}

793
/**
794 795 796
 * device_suspend - Execute "suspend" callbacks for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
797
 * @async: If true, the device is being suspended asynchronously.
798
 */
799
static int __device_suspend(struct device *dev, pm_message_t state, bool async)
800 801 802
{
	int error = 0;

803
	dpm_wait_for_children(dev, async);
804

805
	if (async_error)
806 807 808 809 810
		return 0;

	pm_runtime_get_noresume(dev);
	if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
		pm_wakeup_event(dev, 0);
811

812
	if (pm_wakeup_pending()) {
813
		pm_runtime_put_sync(dev);
814
		async_error = -EBUSY;
815
		return 0;
816 817
	}

818 819
	device_lock(dev);

820
	if (dev->pm_domain) {
821
		pm_dev_dbg(dev, state, "power domain ");
822
		error = pm_op(dev, &dev->pm_domain->ops, state);
823 824 825
		goto End;
	}

826 827 828
	if (dev->type && dev->type->pm) {
		pm_dev_dbg(dev, state, "type ");
		error = pm_op(dev, dev->type->pm, state);
829
		goto End;
830 831
	}

832 833 834 835
	if (dev->class) {
		if (dev->class->pm) {
			pm_dev_dbg(dev, state, "class ");
			error = pm_op(dev, dev->class->pm, state);
836
			goto End;
837 838
		} else if (dev->class->suspend) {
			pm_dev_dbg(dev, state, "legacy class ");
839
			error = legacy_suspend(dev, state, dev->class->suspend);
840
			goto End;
841
		}
842 843
	}

844 845 846
	if (dev->bus) {
		if (dev->bus->pm) {
			pm_dev_dbg(dev, state, "");
847
			error = pm_op(dev, dev->bus->pm, state);
848 849
		} else if (dev->bus->suspend) {
			pm_dev_dbg(dev, state, "legacy ");
850
			error = legacy_suspend(dev, state, dev->bus->suspend);
851
		}
852 853
	}

854
 End:
855 856
	if (!error) {
		dev->power.is_suspended = true;
857 858
		if (dev->power.wakeup_path
		    && dev->parent && !dev->parent->power.ignore_children)
859 860
			dev->parent->power.wakeup_path = true;
	}
861

862
	device_unlock(dev);
863
	complete_all(&dev->power.completion);
864

865 866
	if (error) {
		pm_runtime_put_sync(dev);
867
		async_error = error;
868 869 870
	} else if (dev->power.is_suspended) {
		__pm_runtime_disable(dev, false);
	}
871

872 873 874
	return error;
}

875 876 877 878 879 880
static void async_suspend(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = __device_suspend(dev, pm_transition, true);
881 882
	if (error) {
		dpm_save_failed_dev(dev_name(dev));
883
		pm_dev_err(dev, pm_transition, " async", error);
884
	}
885 886 887 888 889 890 891 892

	put_device(dev);
}

static int device_suspend(struct device *dev)
{
	INIT_COMPLETION(dev->power.completion);

893
	if (pm_async_enabled && dev->power.async_suspend) {
894 895 896 897 898 899 900 901
		get_device(dev);
		async_schedule(async_suspend, dev);
		return 0;
	}

	return __device_suspend(dev, pm_transition, false);
}

902
/**
903 904
 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
 * @state: PM transition of the system being carried out.
905
 */
906
int dpm_suspend(pm_message_t state)
907
{
908
	ktime_t starttime = ktime_get();
909 910
	int error = 0;

911 912
	might_sleep();

913
	mutex_lock(&dpm_list_mtx);
914 915
	pm_transition = state;
	async_error = 0;
916 917
	while (!list_empty(&dpm_prepared_list)) {
		struct device *dev = to_device(dpm_prepared_list.prev);
918

919
		get_device(dev);
920
		mutex_unlock(&dpm_list_mtx);
921

922
		error = device_suspend(dev);
923

924
		mutex_lock(&dpm_list_mtx);
925
		if (error) {
926
			pm_dev_err(dev, state, "", error);
927
			dpm_save_failed_dev(dev_name(dev));
928
			put_device(dev);
929 930
			break;
		}
931
		if (!list_empty(&dev->power.entry))
932
			list_move(&dev->power.entry, &dpm_suspended_list);
933
		put_device(dev);
934 935
		if (async_error)
			break;
936 937
	}
	mutex_unlock(&dpm_list_mtx);
938 939 940
	async_synchronize_full();
	if (!error)
		error = async_error;
941 942 943 944
	if (error) {
		suspend_stats.failed_suspend++;
		dpm_save_failed_step(SUSPEND_SUSPEND);
	} else
945
		dpm_show_time(starttime, state, NULL);
946 947 948 949
	return error;
}

/**
950 951 952 953 954 955
 * device_prepare - Prepare a device for system power transition.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
 *
 * Execute the ->prepare() callback(s) for given device.  No new children of the
 * device may be registered after this function has returned.
956
 */
957
static int device_prepare(struct device *dev, pm_message_t state)
958 959 960
{
	int error = 0;

961
	device_lock(dev);
962

963 964
	dev->power.wakeup_path = device_may_wakeup(dev);

965
	if (dev->pm_domain) {
966
		pm_dev_dbg(dev, state, "preparing power domain ");
967 968 969
		if (dev->pm_domain->ops.prepare)
			error = dev->pm_domain->ops.prepare(dev);
		suspend_report_result(dev->pm_domain->ops.prepare, error);
970
	} else if (dev->type && dev->type->pm) {
971
		pm_dev_dbg(dev, state, "preparing type ");
972 973
		if (dev->type->pm->prepare)
			error = dev->type->pm->prepare(dev);
974
		suspend_report_result(dev->type->pm->prepare, error);
975
	} else if (dev->class && dev->class->pm) {
976
		pm_dev_dbg(dev, state, "preparing class ");
977 978
		if (dev->class->pm->prepare)
			error = dev->class->pm->prepare(dev);
979
		suspend_report_result(dev->class->pm->prepare, error);
980 981 982 983 984
	} else if (dev->bus && dev->bus->pm) {
		pm_dev_dbg(dev, state, "preparing ");
		if (dev->bus->pm->prepare)
			error = dev->bus->pm->prepare(dev);
		suspend_report_result(dev->bus->pm->prepare, error);
985
	}
986

987
	device_unlock(dev);
988 989 990

	return error;
}
991

992
/**
993 994
 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
 * @state: PM transition of the system being carried out.
995
 *
996
 * Execute the ->prepare() callback(s) for all devices.
997
 */
998
int dpm_prepare(pm_message_t state)
999 1000 1001
{
	int error = 0;

1002 1003
	might_sleep();

1004 1005 1006 1007 1008 1009 1010
	mutex_lock(&dpm_list_mtx);
	while (!list_empty(&dpm_list)) {
		struct device *dev = to_device(dpm_list.next);

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

1011
		error = device_prepare(dev, state);
1012 1013 1014 1015 1016

		mutex_lock(&dpm_list_mtx);
		if (error) {
			if (error == -EAGAIN) {
				put_device(dev);
S
Sebastian Ott 已提交
1017
				error = 0;
1018 1019
				continue;
			}
1020 1021
			printk(KERN_INFO "PM: Device %s not prepared "
				"for power transition: code %d\n",
1022
				dev_name(dev), error);
1023 1024 1025
			put_device(dev);
			break;
		}
1026
		dev->power.is_prepared = true;
1027
		if (!list_empty(&dev->power.entry))
1028
			list_move_tail(&dev->power.entry, &dpm_prepared_list);
1029 1030 1031
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
1032 1033 1034
	return error;
}

1035
/**
1036 1037
 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
 * @state: PM transition of the system being carried out.
1038
 *
1039 1040
 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
 * callbacks for them.
1041
 */
1042
int dpm_suspend_start(pm_message_t state)
1043 1044
{
	int error;
1045

1046
	error = dpm_prepare(state);
1047 1048 1049 1050
	if (error) {
		suspend_stats.failed_prepare++;
		dpm_save_failed_step(SUSPEND_PREPARE);
	} else
1051
		error = dpm_suspend(state);
1052 1053
	return error;
}
1054
EXPORT_SYMBOL_GPL(dpm_suspend_start);
1055 1056 1057

void __suspend_report_result(const char *function, void *fn, int ret)
{
1058 1059
	if (ret)
		printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1060 1061
}
EXPORT_SYMBOL_GPL(__suspend_report_result);
1062 1063 1064 1065 1066 1067

/**
 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
 * @dev: Device to wait for.
 * @subordinate: Device that needs to wait for @dev.
 */
1068
int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1069 1070
{
	dpm_wait(dev, subordinate->power.async_suspend);
1071
	return async_error;
1072 1073
}
EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);