runtime.c 33.7 KB
Newer Older
1 2 3 4
/*
 * drivers/base/power/runtime.c - Helper functions for device run-time PM
 *
 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5
 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
6 7 8 9 10 11
 *
 * This file is released under the GPLv2.
 */

#include <linux/sched.h>
#include <linux/pm_runtime.h>
A
Alan Stern 已提交
12
#include "power.h"
13

14
static int rpm_resume(struct device *dev, int rpmflags);
A
Alan Stern 已提交
15
static int rpm_suspend(struct device *dev, int rpmflags);
16

17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
/**
 * update_pm_runtime_accounting - Update the time accounting of power states
 * @dev: Device to update the accounting for
 *
 * In order to be able to have time accounting of the various power states
 * (as used by programs such as PowerTOP to show the effectiveness of runtime
 * PM), we need to track the time spent in each state.
 * update_pm_runtime_accounting must be called each time before the
 * runtime_status field is updated, to account the time in the old state
 * correctly.
 */
void update_pm_runtime_accounting(struct device *dev)
{
	unsigned long now = jiffies;
	int delta;

	delta = now - dev->power.accounting_timestamp;

	if (delta < 0)
		delta = 0;

	dev->power.accounting_timestamp = now;

	if (dev->power.disable_depth > 0)
		return;

	if (dev->power.runtime_status == RPM_SUSPENDED)
		dev->power.suspended_jiffies += delta;
	else
		dev->power.active_jiffies += delta;
}

static void __update_runtime_status(struct device *dev, enum rpm_status status)
{
	update_pm_runtime_accounting(dev);
	dev->power.runtime_status = status;
}

55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
/**
 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
 * @dev: Device to handle.
 */
static void pm_runtime_deactivate_timer(struct device *dev)
{
	if (dev->power.timer_expires > 0) {
		del_timer(&dev->power.suspend_timer);
		dev->power.timer_expires = 0;
	}
}

/**
 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
 * @dev: Device to handle.
 */
static void pm_runtime_cancel_pending(struct device *dev)
{
	pm_runtime_deactivate_timer(dev);
	/*
	 * In case there's a request pending, make sure its work function will
	 * return without doing anything.
	 */
	dev->power.request = RPM_REQ_NONE;
}

81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
/*
 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
 * @dev: Device to handle.
 *
 * Compute the autosuspend-delay expiration time based on the device's
 * power.last_busy time.  If the delay has already expired or is disabled
 * (negative) or the power.use_autosuspend flag isn't set, return 0.
 * Otherwise return the expiration time in jiffies (adjusted to be nonzero).
 *
 * This function may be called either with or without dev->power.lock held.
 * Either way it can be racy, since power.last_busy may be updated at any time.
 */
unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
{
	int autosuspend_delay;
	long elapsed;
	unsigned long last_busy;
	unsigned long expires = 0;

	if (!dev->power.use_autosuspend)
		goto out;

	autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay);
	if (autosuspend_delay < 0)
		goto out;

	last_busy = ACCESS_ONCE(dev->power.last_busy);
	elapsed = jiffies - last_busy;
	if (elapsed < 0)
		goto out;	/* jiffies has wrapped around. */

	/*
	 * If the autosuspend_delay is >= 1 second, align the timer by rounding
	 * up to the nearest second.
	 */
	expires = last_busy + msecs_to_jiffies(autosuspend_delay);
	if (autosuspend_delay >= 1000)
		expires = round_jiffies(expires);
	expires += !expires;
	if (elapsed >= expires - last_busy)
		expires = 0;	/* Already expired. */

 out:
	return expires;
}
EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);

128
/**
129 130
 * rpm_check_suspend_allowed - Test whether a device may be suspended.
 * @dev: Device to test.
131
 */
132
static int rpm_check_suspend_allowed(struct device *dev)
133 134 135 136 137 138
{
	int retval = 0;

	if (dev->power.runtime_error)
		retval = -EINVAL;
	else if (atomic_read(&dev->power.usage_count) > 0
139
	    || dev->power.disable_depth > 0)
140 141 142
		retval = -EAGAIN;
	else if (!pm_children_suspended(dev))
		retval = -EBUSY;
143 144 145

	/* Pending resume requests take precedence over suspends. */
	else if ((dev->power.deferred_resume
146
			&& dev->power.runtime_status == RPM_SUSPENDING)
147 148 149 150 151 152 153 154 155 156
	    || (dev->power.request_pending
			&& dev->power.request == RPM_REQ_RESUME))
		retval = -EAGAIN;
	else if (dev->power.runtime_status == RPM_SUSPENDED)
		retval = 1;

	return retval;
}

/**
157
 * rpm_idle - Notify device bus type if the device can be suspended.
158 159 160 161 162 163 164 165 166 167
 * @dev: Device to notify the bus type about.
 * @rpmflags: Flag bits.
 *
 * Check if the device's run-time PM status allows it to be suspended.  If
 * another idle notification has been started earlier, return immediately.  If
 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
 * run the ->runtime_idle() callback directly.
 *
 * This function must be called under dev->power.lock with interrupts disabled.
 */
168
static int rpm_idle(struct device *dev, int rpmflags)
169
{
170
	int (*callback)(struct device *);
171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191
	int retval;

	retval = rpm_check_suspend_allowed(dev);
	if (retval < 0)
		;	/* Conditions are wrong. */

	/* Idle notifications are allowed only in the RPM_ACTIVE state. */
	else if (dev->power.runtime_status != RPM_ACTIVE)
		retval = -EAGAIN;

	/*
	 * Any pending request other than an idle notification takes
	 * precedence over us, except that the timer may be running.
	 */
	else if (dev->power.request_pending &&
	    dev->power.request > RPM_REQ_IDLE)
		retval = -EAGAIN;

	/* Act as though RPM_NOWAIT is always set. */
	else if (dev->power.idle_notification)
		retval = -EINPROGRESS;
192 193 194
	if (retval)
		goto out;

195 196 197
	/* Pending requests need to be canceled. */
	dev->power.request = RPM_REQ_NONE;

A
Alan Stern 已提交
198 199 200 201 202 203
	if (dev->power.no_callbacks) {
		/* Assume ->runtime_idle() callback would have suspended. */
		retval = rpm_suspend(dev, rpmflags);
		goto out;
	}

204 205 206 207 208 209
	/* Carry out an asynchronous or a synchronous idle notification. */
	if (rpmflags & RPM_ASYNC) {
		dev->power.request = RPM_REQ_IDLE;
		if (!dev->power.request_pending) {
			dev->power.request_pending = true;
			queue_work(pm_wq, &dev->power.work);
210
		}
211
		goto out;
212 213 214 215
	}

	dev->power.idle_notification = true;

216 217 218 219 220 221 222 223
	if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_idle)
		callback = dev->bus->pm->runtime_idle;
	else if (dev->type && dev->type->pm && dev->type->pm->runtime_idle)
		callback = dev->type->pm->runtime_idle;
	else if (dev->class && dev->class->pm)
		callback = dev->class->pm->runtime_idle;
	else
		callback = NULL;
224

225
	if (callback) {
226 227
		spin_unlock_irq(&dev->power.lock);

228
		callback(dev);
229

230 231 232 233 234 235 236 237 238 239
		spin_lock_irq(&dev->power.lock);
	}

	dev->power.idle_notification = false;
	wake_up_all(&dev->power.wait_queue);

 out:
	return retval;
}

240 241 242 243 244 245 246 247 248 249 250 251 252
/**
 * rpm_callback - Run a given runtime PM callback for a given device.
 * @cb: Runtime PM callback to run.
 * @dev: Device to run the callback for.
 */
static int rpm_callback(int (*cb)(struct device *), struct device *dev)
	__releases(&dev->power.lock) __acquires(&dev->power.lock)
{
	int retval;

	if (!cb)
		return -ENOSYS;

253 254 255 256
	if (dev->power.irq_safe) {
		retval = cb(dev);
	} else {
		spin_unlock_irq(&dev->power.lock);
257

258
		retval = cb(dev);
259

260 261
		spin_lock_irq(&dev->power.lock);
	}
262 263 264 265
	dev->power.runtime_error = retval;
	return retval;
}

266
/**
267
 * rpm_suspend - Carry out run-time suspend of given device.
268
 * @dev: Device to suspend.
269
 * @rpmflags: Flag bits.
270
 *
271 272 273 274 275 276 277 278
 * Check if the device's run-time PM status allows it to be suspended.  If
 * another suspend has been started earlier, either return immediately or wait
 * for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC flags.  Cancel a
 * pending idle notification.  If the RPM_ASYNC flag is set then queue a
 * suspend request; otherwise run the ->runtime_suspend() callback directly.
 * If a deferred resume was requested while the callback was running then carry
 * it out; otherwise send an idle notification for the device (if the suspend
 * failed) or for its parent (if the suspend succeeded).
279 280 281
 *
 * This function must be called under dev->power.lock with interrupts disabled.
 */
282
static int rpm_suspend(struct device *dev, int rpmflags)
283 284
	__releases(&dev->power.lock) __acquires(&dev->power.lock)
{
285
	int (*callback)(struct device *);
286
	struct device *parent = NULL;
287
	int retval;
288

289
	dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags);
290 291

 repeat:
292
	retval = rpm_check_suspend_allowed(dev);
293

294 295 296 297 298 299
	if (retval < 0)
		;	/* Conditions are wrong. */

	/* Synchronous suspends are not allowed in the RPM_RESUMING state. */
	else if (dev->power.runtime_status == RPM_RESUMING &&
	    !(rpmflags & RPM_ASYNC))
300
		retval = -EAGAIN;
301
	if (retval)
302 303
		goto out;

304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329
	/* If the autosuspend_delay time hasn't expired yet, reschedule. */
	if ((rpmflags & RPM_AUTO)
	    && dev->power.runtime_status != RPM_SUSPENDING) {
		unsigned long expires = pm_runtime_autosuspend_expiration(dev);

		if (expires != 0) {
			/* Pending requests need to be canceled. */
			dev->power.request = RPM_REQ_NONE;

			/*
			 * Optimization: If the timer is already running and is
			 * set to expire at or before the autosuspend delay,
			 * avoid the overhead of resetting it.  Just let it
			 * expire; pm_suspend_timer_fn() will take care of the
			 * rest.
			 */
			if (!(dev->power.timer_expires && time_before_eq(
			    dev->power.timer_expires, expires))) {
				dev->power.timer_expires = expires;
				mod_timer(&dev->power.suspend_timer, expires);
			}
			dev->power.timer_autosuspends = 1;
			goto out;
		}
	}

330 331 332 333 334 335
	/* Other scheduled or pending requests need to be canceled. */
	pm_runtime_cancel_pending(dev);

	if (dev->power.runtime_status == RPM_SUSPENDING) {
		DEFINE_WAIT(wait);

336
		if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357
			retval = -EINPROGRESS;
			goto out;
		}

		/* Wait for the other suspend running in parallel with us. */
		for (;;) {
			prepare_to_wait(&dev->power.wait_queue, &wait,
					TASK_UNINTERRUPTIBLE);
			if (dev->power.runtime_status != RPM_SUSPENDING)
				break;

			spin_unlock_irq(&dev->power.lock);

			schedule();

			spin_lock_irq(&dev->power.lock);
		}
		finish_wait(&dev->power.wait_queue, &wait);
		goto repeat;
	}

A
Alan Stern 已提交
358 359 360 361
	dev->power.deferred_resume = false;
	if (dev->power.no_callbacks)
		goto no_callback;	/* Assume success. */

362 363
	/* Carry out an asynchronous or a synchronous suspend. */
	if (rpmflags & RPM_ASYNC) {
364 365
		dev->power.request = (rpmflags & RPM_AUTO) ?
		    RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
366 367 368 369 370 371 372
		if (!dev->power.request_pending) {
			dev->power.request_pending = true;
			queue_work(pm_wq, &dev->power.work);
		}
		goto out;
	}

373
	__update_runtime_status(dev, RPM_SUSPENDING);
374

375 376 377 378 379 380 381 382
	if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend)
		callback = dev->bus->pm->runtime_suspend;
	else if (dev->type && dev->type->pm && dev->type->pm->runtime_suspend)
		callback = dev->type->pm->runtime_suspend;
	else if (dev->class && dev->class->pm)
		callback = dev->class->pm->runtime_suspend;
	else
		callback = NULL;
383

384
	retval = rpm_callback(callback, dev);
385
	if (retval) {
386
		__update_runtime_status(dev, RPM_ACTIVE);
387
		dev->power.deferred_resume = 0;
388
		if (retval == -EAGAIN || retval == -EBUSY)
389
			dev->power.runtime_error = 0;
390
		else
391
			pm_runtime_cancel_pending(dev);
392
	} else {
A
Alan Stern 已提交
393
 no_callback:
394
		__update_runtime_status(dev, RPM_SUSPENDED);
395
		pm_runtime_deactivate_timer(dev);
396 397 398 399 400 401 402 403 404

		if (dev->parent) {
			parent = dev->parent;
			atomic_add_unless(&parent->power.child_count, -1, 0);
		}
	}
	wake_up_all(&dev->power.wait_queue);

	if (dev->power.deferred_resume) {
405
		rpm_resume(dev, 0);
406 407 408 409
		retval = -EAGAIN;
		goto out;
	}

410
	if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
411 412 413 414 415 416 417 418
		spin_unlock_irq(&dev->power.lock);

		pm_request_idle(parent);

		spin_lock_irq(&dev->power.lock);
	}

 out:
419
	dev_dbg(dev, "%s returns %d\n", __func__, retval);
420 421 422 423 424

	return retval;
}

/**
425
 * rpm_resume - Carry out run-time resume of given device.
426
 * @dev: Device to resume.
427
 * @rpmflags: Flag bits.
428
 *
429 430 431 432 433 434 435 436 437
 * Check if the device's run-time PM status allows it to be resumed.  Cancel
 * any scheduled or pending requests.  If another resume has been started
 * earlier, either return imediately or wait for it to finish, depending on the
 * RPM_NOWAIT and RPM_ASYNC flags.  Similarly, if there's a suspend running in
 * parallel with this function, either tell the other process to resume after
 * suspending (deferred_resume) or wait for it to finish.  If the RPM_ASYNC
 * flag is set then queue a resume request; otherwise run the
 * ->runtime_resume() callback directly.  Queue an idle notification for the
 * device if the resume succeeded.
438 439 440
 *
 * This function must be called under dev->power.lock with interrupts disabled.
 */
441
static int rpm_resume(struct device *dev, int rpmflags)
442 443
	__releases(&dev->power.lock) __acquires(&dev->power.lock)
{
444
	int (*callback)(struct device *);
445 446 447
	struct device *parent = NULL;
	int retval = 0;

448
	dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags);
449 450

 repeat:
451
	if (dev->power.runtime_error)
452
		retval = -EINVAL;
453 454 455
	else if (dev->power.disable_depth > 0)
		retval = -EAGAIN;
	if (retval)
456 457
		goto out;

458 459 460 461 462 463 464 465 466
	/*
	 * Other scheduled or pending requests need to be canceled.  Small
	 * optimization: If an autosuspend timer is running, leave it running
	 * rather than cancelling it now only to restart it again in the near
	 * future.
	 */
	dev->power.request = RPM_REQ_NONE;
	if (!dev->power.timer_autosuspends)
		pm_runtime_deactivate_timer(dev);
467

468
	if (dev->power.runtime_status == RPM_ACTIVE) {
469 470
		retval = 1;
		goto out;
471
	}
472 473 474 475 476

	if (dev->power.runtime_status == RPM_RESUMING
	    || dev->power.runtime_status == RPM_SUSPENDING) {
		DEFINE_WAIT(wait);

477
		if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
478 479
			if (dev->power.runtime_status == RPM_SUSPENDING)
				dev->power.deferred_resume = true;
480 481
			else
				retval = -EINPROGRESS;
482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502
			goto out;
		}

		/* Wait for the operation carried out in parallel with us. */
		for (;;) {
			prepare_to_wait(&dev->power.wait_queue, &wait,
					TASK_UNINTERRUPTIBLE);
			if (dev->power.runtime_status != RPM_RESUMING
			    && dev->power.runtime_status != RPM_SUSPENDING)
				break;

			spin_unlock_irq(&dev->power.lock);

			schedule();

			spin_lock_irq(&dev->power.lock);
		}
		finish_wait(&dev->power.wait_queue, &wait);
		goto repeat;
	}

A
Alan Stern 已提交
503 504 505 506 507 508
	/*
	 * See if we can skip waking up the parent.  This is safe only if
	 * power.no_callbacks is set, because otherwise we don't know whether
	 * the resume will actually succeed.
	 */
	if (dev->power.no_callbacks && !parent && dev->parent) {
509
		spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
A
Alan Stern 已提交
510 511 512 513 514 515 516 517 518 519
		if (dev->parent->power.disable_depth > 0
		    || dev->parent->power.ignore_children
		    || dev->parent->power.runtime_status == RPM_ACTIVE) {
			atomic_inc(&dev->parent->power.child_count);
			spin_unlock(&dev->parent->power.lock);
			goto no_callback;	/* Assume success. */
		}
		spin_unlock(&dev->parent->power.lock);
	}

520 521 522 523 524 525 526 527 528 529 530
	/* Carry out an asynchronous or a synchronous resume. */
	if (rpmflags & RPM_ASYNC) {
		dev->power.request = RPM_REQ_RESUME;
		if (!dev->power.request_pending) {
			dev->power.request_pending = true;
			queue_work(pm_wq, &dev->power.work);
		}
		retval = 0;
		goto out;
	}

531 532
	if (!parent && dev->parent) {
		/*
533 534 535
		 * Increment the parent's usage counter and resume it if
		 * necessary.  Not needed if dev is irq-safe; then the
		 * parent is permanently resumed.
536 537
		 */
		parent = dev->parent;
538 539
		if (dev->power.irq_safe)
			goto skip_parent;
540
		spin_unlock(&dev->power.lock);
541 542 543

		pm_runtime_get_noresume(parent);

544
		spin_lock(&parent->power.lock);
545 546 547 548 549 550
		/*
		 * We can resume if the parent's run-time PM is disabled or it
		 * is set to ignore children.
		 */
		if (!parent->power.disable_depth
		    && !parent->power.ignore_children) {
551
			rpm_resume(parent, 0);
552 553 554
			if (parent->power.runtime_status != RPM_ACTIVE)
				retval = -EBUSY;
		}
555
		spin_unlock(&parent->power.lock);
556

557
		spin_lock(&dev->power.lock);
558 559 560 561
		if (retval)
			goto out;
		goto repeat;
	}
562
 skip_parent:
563

A
Alan Stern 已提交
564 565 566
	if (dev->power.no_callbacks)
		goto no_callback;	/* Assume success. */

567
	__update_runtime_status(dev, RPM_RESUMING);
568

569 570 571 572 573 574 575 576
	if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume)
		callback = dev->bus->pm->runtime_resume;
	else if (dev->type && dev->type->pm && dev->type->pm->runtime_resume)
		callback = dev->type->pm->runtime_resume;
	else if (dev->class && dev->class->pm)
		callback = dev->class->pm->runtime_resume;
	else
		callback = NULL;
577

578
	retval = rpm_callback(callback, dev);
579
	if (retval) {
580
		__update_runtime_status(dev, RPM_SUSPENDED);
581 582
		pm_runtime_cancel_pending(dev);
	} else {
A
Alan Stern 已提交
583
 no_callback:
584
		__update_runtime_status(dev, RPM_ACTIVE);
585 586 587 588 589 590
		if (parent)
			atomic_inc(&parent->power.child_count);
	}
	wake_up_all(&dev->power.wait_queue);

	if (!retval)
591
		rpm_idle(dev, RPM_ASYNC);
592 593

 out:
594
	if (parent && !dev->power.irq_safe) {
595 596 597 598 599 600 601
		spin_unlock_irq(&dev->power.lock);

		pm_runtime_put(parent);

		spin_lock_irq(&dev->power.lock);
	}

602
	dev_dbg(dev, "%s returns %d\n", __func__, retval);
603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631

	return retval;
}

/**
 * pm_runtime_work - Universal run-time PM work function.
 * @work: Work structure used for scheduling the execution of this function.
 *
 * Use @work to get the device object the work is to be done for, determine what
 * is to be done and execute the appropriate run-time PM function.
 */
static void pm_runtime_work(struct work_struct *work)
{
	struct device *dev = container_of(work, struct device, power.work);
	enum rpm_request req;

	spin_lock_irq(&dev->power.lock);

	if (!dev->power.request_pending)
		goto out;

	req = dev->power.request;
	dev->power.request = RPM_REQ_NONE;
	dev->power.request_pending = false;

	switch (req) {
	case RPM_REQ_NONE:
		break;
	case RPM_REQ_IDLE:
632
		rpm_idle(dev, RPM_NOWAIT);
633 634
		break;
	case RPM_REQ_SUSPEND:
635
		rpm_suspend(dev, RPM_NOWAIT);
636
		break;
637 638 639
	case RPM_REQ_AUTOSUSPEND:
		rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
		break;
640
	case RPM_REQ_RESUME:
641
		rpm_resume(dev, RPM_NOWAIT);
642 643 644 645 646 647 648 649 650 651 652
		break;
	}

 out:
	spin_unlock_irq(&dev->power.lock);
}

/**
 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
 * @data: Device pointer passed by pm_schedule_suspend().
 *
653
 * Check if the time is right and queue a suspend request.
654 655 656 657 658 659 660 661 662 663 664 665 666
 */
static void pm_suspend_timer_fn(unsigned long data)
{
	struct device *dev = (struct device *)data;
	unsigned long flags;
	unsigned long expires;

	spin_lock_irqsave(&dev->power.lock, flags);

	expires = dev->power.timer_expires;
	/* If 'expire' is after 'jiffies' we've been called too early. */
	if (expires > 0 && !time_after(expires, jiffies)) {
		dev->power.timer_expires = 0;
667 668
		rpm_suspend(dev, dev->power.timer_autosuspends ?
		    (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
669 670 671 672 673 674 675 676 677 678 679 680 681
	}

	spin_unlock_irqrestore(&dev->power.lock, flags);
}

/**
 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
 * @dev: Device to suspend.
 * @delay: Time to wait before submitting a suspend request, in milliseconds.
 */
int pm_schedule_suspend(struct device *dev, unsigned int delay)
{
	unsigned long flags;
682
	int retval;
683 684 685 686

	spin_lock_irqsave(&dev->power.lock, flags);

	if (!delay) {
687
		retval = rpm_suspend(dev, RPM_ASYNC);
688 689 690
		goto out;
	}

691
	retval = rpm_check_suspend_allowed(dev);
692 693 694
	if (retval)
		goto out;

695 696 697
	/* Other scheduled or pending requests need to be canceled. */
	pm_runtime_cancel_pending(dev);

698
	dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
699
	dev->power.timer_expires += !dev->power.timer_expires;
700
	dev->power.timer_autosuspends = 0;
701 702 703 704 705 706 707 708 709 710
	mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);

 out:
	spin_unlock_irqrestore(&dev->power.lock, flags);

	return retval;
}
EXPORT_SYMBOL_GPL(pm_schedule_suspend);

/**
711 712 713 714 715 716 717 718 719
 * __pm_runtime_idle - Entry point for run-time idle operations.
 * @dev: Device to send idle notification for.
 * @rpmflags: Flag bits.
 *
 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
 * return immediately if it is larger than zero.  Then carry out an idle
 * notification, either synchronous or asynchronous.
 *
 * This routine may be called in atomic context if the RPM_ASYNC flag is set.
720
 */
721
int __pm_runtime_idle(struct device *dev, int rpmflags)
722 723 724 725
{
	unsigned long flags;
	int retval;

726 727 728 729 730
	if (rpmflags & RPM_GET_PUT) {
		if (!atomic_dec_and_test(&dev->power.usage_count))
			return 0;
	}

731
	spin_lock_irqsave(&dev->power.lock, flags);
732
	retval = rpm_idle(dev, rpmflags);
733 734 735 736
	spin_unlock_irqrestore(&dev->power.lock, flags);

	return retval;
}
737
EXPORT_SYMBOL_GPL(__pm_runtime_idle);
738 739

/**
740 741
 * __pm_runtime_suspend - Entry point for run-time put/suspend operations.
 * @dev: Device to suspend.
742
 * @rpmflags: Flag bits.
743
 *
744 745 746
 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
 * return immediately if it is larger than zero.  Then carry out a suspend,
 * either synchronous or asynchronous.
747 748
 *
 * This routine may be called in atomic context if the RPM_ASYNC flag is set.
749
 */
750
int __pm_runtime_suspend(struct device *dev, int rpmflags)
751
{
752
	unsigned long flags;
753
	int retval;
754

755 756 757 758 759
	if (rpmflags & RPM_GET_PUT) {
		if (!atomic_dec_and_test(&dev->power.usage_count))
			return 0;
	}

760 761 762
	spin_lock_irqsave(&dev->power.lock, flags);
	retval = rpm_suspend(dev, rpmflags);
	spin_unlock_irqrestore(&dev->power.lock, flags);
763 764 765

	return retval;
}
766
EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
767 768

/**
769 770
 * __pm_runtime_resume - Entry point for run-time resume operations.
 * @dev: Device to resume.
771
 * @rpmflags: Flag bits.
772
 *
773 774 775 776
 * If the RPM_GET_PUT flag is set, increment the device's usage count.  Then
 * carry out a resume, either synchronous or asynchronous.
 *
 * This routine may be called in atomic context if the RPM_ASYNC flag is set.
777
 */
778
int __pm_runtime_resume(struct device *dev, int rpmflags)
779
{
780 781
	unsigned long flags;
	int retval;
782

783 784 785 786 787 788
	if (rpmflags & RPM_GET_PUT)
		atomic_inc(&dev->power.usage_count);

	spin_lock_irqsave(&dev->power.lock, flags);
	retval = rpm_resume(dev, rpmflags);
	spin_unlock_irqrestore(&dev->power.lock, flags);
789 790 791

	return retval;
}
792
EXPORT_SYMBOL_GPL(__pm_runtime_resume);
793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840

/**
 * __pm_runtime_set_status - Set run-time PM status of a device.
 * @dev: Device to handle.
 * @status: New run-time PM status of the device.
 *
 * If run-time PM of the device is disabled or its power.runtime_error field is
 * different from zero, the status may be changed either to RPM_ACTIVE, or to
 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
 * However, if the device has a parent and the parent is not active, and the
 * parent's power.ignore_children flag is unset, the device's status cannot be
 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
 *
 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
 * and the device parent's counter of unsuspended children is modified to
 * reflect the new status.  If the new status is RPM_SUSPENDED, an idle
 * notification request for the parent is submitted.
 */
int __pm_runtime_set_status(struct device *dev, unsigned int status)
{
	struct device *parent = dev->parent;
	unsigned long flags;
	bool notify_parent = false;
	int error = 0;

	if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
		return -EINVAL;

	spin_lock_irqsave(&dev->power.lock, flags);

	if (!dev->power.runtime_error && !dev->power.disable_depth) {
		error = -EAGAIN;
		goto out;
	}

	if (dev->power.runtime_status == status)
		goto out_set;

	if (status == RPM_SUSPENDED) {
		/* It always is possible to set the status to 'suspended'. */
		if (parent) {
			atomic_add_unless(&parent->power.child_count, -1, 0);
			notify_parent = !parent->power.ignore_children;
		}
		goto out_set;
	}

	if (parent) {
841
		spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
842 843 844 845 846 847 848 849

		/*
		 * It is invalid to put an active child under a parent that is
		 * not active, has run-time PM enabled and the
		 * 'power.ignore_children' flag unset.
		 */
		if (!parent->power.disable_depth
		    && !parent->power.ignore_children
850
		    && parent->power.runtime_status != RPM_ACTIVE)
851
			error = -EBUSY;
852 853
		else if (dev->power.runtime_status == RPM_SUSPENDED)
			atomic_inc(&parent->power.child_count);
854

855
		spin_unlock(&parent->power.lock);
856 857 858 859 860 861

		if (error)
			goto out;
	}

 out_set:
862
	__update_runtime_status(dev, status);
863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942
	dev->power.runtime_error = 0;
 out:
	spin_unlock_irqrestore(&dev->power.lock, flags);

	if (notify_parent)
		pm_request_idle(parent);

	return error;
}
EXPORT_SYMBOL_GPL(__pm_runtime_set_status);

/**
 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
 * @dev: Device to handle.
 *
 * Flush all pending requests for the device from pm_wq and wait for all
 * run-time PM operations involving the device in progress to complete.
 *
 * Should be called under dev->power.lock with interrupts disabled.
 */
static void __pm_runtime_barrier(struct device *dev)
{
	pm_runtime_deactivate_timer(dev);

	if (dev->power.request_pending) {
		dev->power.request = RPM_REQ_NONE;
		spin_unlock_irq(&dev->power.lock);

		cancel_work_sync(&dev->power.work);

		spin_lock_irq(&dev->power.lock);
		dev->power.request_pending = false;
	}

	if (dev->power.runtime_status == RPM_SUSPENDING
	    || dev->power.runtime_status == RPM_RESUMING
	    || dev->power.idle_notification) {
		DEFINE_WAIT(wait);

		/* Suspend, wake-up or idle notification in progress. */
		for (;;) {
			prepare_to_wait(&dev->power.wait_queue, &wait,
					TASK_UNINTERRUPTIBLE);
			if (dev->power.runtime_status != RPM_SUSPENDING
			    && dev->power.runtime_status != RPM_RESUMING
			    && !dev->power.idle_notification)
				break;
			spin_unlock_irq(&dev->power.lock);

			schedule();

			spin_lock_irq(&dev->power.lock);
		}
		finish_wait(&dev->power.wait_queue, &wait);
	}
}

/**
 * pm_runtime_barrier - Flush pending requests and wait for completions.
 * @dev: Device to handle.
 *
 * Prevent the device from being suspended by incrementing its usage counter and
 * if there's a pending resume request for the device, wake the device up.
 * Next, make sure that all pending requests for the device have been flushed
 * from pm_wq and wait for all run-time PM operations involving the device in
 * progress to complete.
 *
 * Return value:
 * 1, if there was a resume request pending and the device had to be woken up,
 * 0, otherwise
 */
int pm_runtime_barrier(struct device *dev)
{
	int retval = 0;

	pm_runtime_get_noresume(dev);
	spin_lock_irq(&dev->power.lock);

	if (dev->power.request_pending
	    && dev->power.request == RPM_REQ_RESUME) {
943
		rpm_resume(dev, 0);
944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991
		retval = 1;
	}

	__pm_runtime_barrier(dev);

	spin_unlock_irq(&dev->power.lock);
	pm_runtime_put_noidle(dev);

	return retval;
}
EXPORT_SYMBOL_GPL(pm_runtime_barrier);

/**
 * __pm_runtime_disable - Disable run-time PM of a device.
 * @dev: Device to handle.
 * @check_resume: If set, check if there's a resume request for the device.
 *
 * Increment power.disable_depth for the device and if was zero previously,
 * cancel all pending run-time PM requests for the device and wait for all
 * operations in progress to complete.  The device can be either active or
 * suspended after its run-time PM has been disabled.
 *
 * If @check_resume is set and there's a resume request pending when
 * __pm_runtime_disable() is called and power.disable_depth is zero, the
 * function will wake up the device before disabling its run-time PM.
 */
void __pm_runtime_disable(struct device *dev, bool check_resume)
{
	spin_lock_irq(&dev->power.lock);

	if (dev->power.disable_depth > 0) {
		dev->power.disable_depth++;
		goto out;
	}

	/*
	 * Wake up the device if there's a resume request pending, because that
	 * means there probably is some I/O to process and disabling run-time PM
	 * shouldn't prevent the device from processing the I/O.
	 */
	if (check_resume && dev->power.request_pending
	    && dev->power.request == RPM_REQ_RESUME) {
		/*
		 * Prevent suspends and idle notifications from being carried
		 * out after we have woken up the device.
		 */
		pm_runtime_get_noresume(dev);

992
		rpm_resume(dev, 0);
993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023

		pm_runtime_put_noidle(dev);
	}

	if (!dev->power.disable_depth++)
		__pm_runtime_barrier(dev);

 out:
	spin_unlock_irq(&dev->power.lock);
}
EXPORT_SYMBOL_GPL(__pm_runtime_disable);

/**
 * pm_runtime_enable - Enable run-time PM of a device.
 * @dev: Device to handle.
 */
void pm_runtime_enable(struct device *dev)
{
	unsigned long flags;

	spin_lock_irqsave(&dev->power.lock, flags);

	if (dev->power.disable_depth > 0)
		dev->power.disable_depth--;
	else
		dev_warn(dev, "Unbalanced %s!\n", __func__);

	spin_unlock_irqrestore(&dev->power.lock, flags);
}
EXPORT_SYMBOL_GPL(pm_runtime_enable);

1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039
/**
 * pm_runtime_forbid - Block run-time PM of a device.
 * @dev: Device to handle.
 *
 * Increase the device's usage count and clear its power.runtime_auto flag,
 * so that it cannot be suspended at run time until pm_runtime_allow() is called
 * for it.
 */
void pm_runtime_forbid(struct device *dev)
{
	spin_lock_irq(&dev->power.lock);
	if (!dev->power.runtime_auto)
		goto out;

	dev->power.runtime_auto = false;
	atomic_inc(&dev->power.usage_count);
1040
	rpm_resume(dev, 0);
1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060

 out:
	spin_unlock_irq(&dev->power.lock);
}
EXPORT_SYMBOL_GPL(pm_runtime_forbid);

/**
 * pm_runtime_allow - Unblock run-time PM of a device.
 * @dev: Device to handle.
 *
 * Decrease the device's usage count and set its power.runtime_auto flag.
 */
void pm_runtime_allow(struct device *dev)
{
	spin_lock_irq(&dev->power.lock);
	if (dev->power.runtime_auto)
		goto out;

	dev->power.runtime_auto = true;
	if (atomic_dec_and_test(&dev->power.usage_count))
1061
		rpm_idle(dev, RPM_AUTO);
1062 1063 1064 1065 1066 1067

 out:
	spin_unlock_irq(&dev->power.lock);
}
EXPORT_SYMBOL_GPL(pm_runtime_allow);

A
Alan Stern 已提交
1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085
/**
 * pm_runtime_no_callbacks - Ignore run-time PM callbacks for a device.
 * @dev: Device to handle.
 *
 * Set the power.no_callbacks flag, which tells the PM core that this
 * device is power-managed through its parent and has no run-time PM
 * callbacks of its own.  The run-time sysfs attributes will be removed.
 */
void pm_runtime_no_callbacks(struct device *dev)
{
	spin_lock_irq(&dev->power.lock);
	dev->power.no_callbacks = 1;
	spin_unlock_irq(&dev->power.lock);
	if (device_is_registered(dev))
		rpm_sysfs_remove(dev);
}
EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);

1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106
/**
 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
 * @dev: Device to handle
 *
 * Set the power.irq_safe flag, which tells the PM core that the
 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
 * always be invoked with the spinlock held and interrupts disabled.  It also
 * causes the parent's usage counter to be permanently incremented, preventing
 * the parent from runtime suspending -- otherwise an irq-safe child might have
 * to wait for a non-irq-safe parent.
 */
void pm_runtime_irq_safe(struct device *dev)
{
	if (dev->parent)
		pm_runtime_get_sync(dev->parent);
	spin_lock_irq(&dev->power.lock);
	dev->power.irq_safe = 1;
	spin_unlock_irq(&dev->power.lock);
}
EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);

1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186
/**
 * update_autosuspend - Handle a change to a device's autosuspend settings.
 * @dev: Device to handle.
 * @old_delay: The former autosuspend_delay value.
 * @old_use: The former use_autosuspend value.
 *
 * Prevent runtime suspend if the new delay is negative and use_autosuspend is
 * set; otherwise allow it.  Send an idle notification if suspends are allowed.
 *
 * This function must be called under dev->power.lock with interrupts disabled.
 */
static void update_autosuspend(struct device *dev, int old_delay, int old_use)
{
	int delay = dev->power.autosuspend_delay;

	/* Should runtime suspend be prevented now? */
	if (dev->power.use_autosuspend && delay < 0) {

		/* If it used to be allowed then prevent it. */
		if (!old_use || old_delay >= 0) {
			atomic_inc(&dev->power.usage_count);
			rpm_resume(dev, 0);
		}
	}

	/* Runtime suspend should be allowed now. */
	else {

		/* If it used to be prevented then allow it. */
		if (old_use && old_delay < 0)
			atomic_dec(&dev->power.usage_count);

		/* Maybe we can autosuspend now. */
		rpm_idle(dev, RPM_AUTO);
	}
}

/**
 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
 * @dev: Device to handle.
 * @delay: Value of the new delay in milliseconds.
 *
 * Set the device's power.autosuspend_delay value.  If it changes to negative
 * and the power.use_autosuspend flag is set, prevent run-time suspends.  If it
 * changes the other way, allow run-time suspends.
 */
void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
{
	int old_delay, old_use;

	spin_lock_irq(&dev->power.lock);
	old_delay = dev->power.autosuspend_delay;
	old_use = dev->power.use_autosuspend;
	dev->power.autosuspend_delay = delay;
	update_autosuspend(dev, old_delay, old_use);
	spin_unlock_irq(&dev->power.lock);
}
EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);

/**
 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
 * @dev: Device to handle.
 * @use: New value for use_autosuspend.
 *
 * Set the device's power.use_autosuspend flag, and allow or prevent run-time
 * suspends as needed.
 */
void __pm_runtime_use_autosuspend(struct device *dev, bool use)
{
	int old_delay, old_use;

	spin_lock_irq(&dev->power.lock);
	old_delay = dev->power.autosuspend_delay;
	old_use = dev->power.use_autosuspend;
	dev->power.use_autosuspend = use;
	update_autosuspend(dev, old_delay, old_use);
	spin_unlock_irq(&dev->power.lock);
}
EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);

1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202
/**
 * pm_runtime_init - Initialize run-time PM fields in given device object.
 * @dev: Device object to initialize.
 */
void pm_runtime_init(struct device *dev)
{
	dev->power.runtime_status = RPM_SUSPENDED;
	dev->power.idle_notification = false;

	dev->power.disable_depth = 1;
	atomic_set(&dev->power.usage_count, 0);

	dev->power.runtime_error = 0;

	atomic_set(&dev->power.child_count, 0);
	pm_suspend_ignore_children(dev, false);
1203
	dev->power.runtime_auto = true;
1204 1205 1206 1207

	dev->power.request_pending = false;
	dev->power.request = RPM_REQ_NONE;
	dev->power.deferred_resume = false;
1208
	dev->power.accounting_timestamp = jiffies;
1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228
	INIT_WORK(&dev->power.work, pm_runtime_work);

	dev->power.timer_expires = 0;
	setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
			(unsigned long)dev);

	init_waitqueue_head(&dev->power.wait_queue);
}

/**
 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
 * @dev: Device object being removed from device hierarchy.
 */
void pm_runtime_remove(struct device *dev)
{
	__pm_runtime_disable(dev, false);

	/* Change the status back to 'suspended' to match the initial status. */
	if (dev->power.runtime_status == RPM_ACTIVE)
		pm_runtime_set_suspended(dev);
1229 1230
	if (dev->power.irq_safe && dev->parent)
		pm_runtime_put_sync(dev->parent);
1231
}