domain.c 54.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * drivers/base/power/domain.c - Common code related to device power domains.
 *
 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
 *
 * This file is released under the GPLv2.
 */

#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/pm_runtime.h>
#include <linux/pm_domain.h>
14
#include <linux/pm_qos.h>
15 16
#include <linux/slab.h>
#include <linux/err.h>
17 18
#include <linux/sched.h>
#include <linux/suspend.h>
19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
#include <linux/export.h>

#define GENPD_DEV_CALLBACK(genpd, type, callback, dev)		\
({								\
	type (*__routine)(struct device *__d); 			\
	type __ret = (type)0;					\
								\
	__routine = genpd->dev_ops.callback; 			\
	if (__routine) {					\
		__ret = __routine(dev); 			\
	} else {						\
		__routine = dev_gpd_data(dev)->ops.callback;	\
		if (__routine) 					\
			__ret = __routine(dev);			\
	}							\
	__ret;							\
})
36

37 38 39 40 41
#define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name)	\
({										\
	ktime_t __start = ktime_get();						\
	type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev);		\
	s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start));		\
42 43 44
	struct gpd_timing_data *__td = &dev_gpd_data(dev)->td;			\
	if (!__retval && __elapsed > __td->field) {				\
		__td->field = __elapsed;					\
45 46
		dev_warn(dev, name " latency exceeded, new value %lld ns\n",	\
			__elapsed);						\
47 48
		genpd->max_off_time_changed = true;				\
		__td->constraint_changed = true;				\
49 50 51 52
	}									\
	__retval;								\
})

53 54 55
static LIST_HEAD(gpd_list);
static DEFINE_MUTEX(gpd_list_lock);

56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73
static struct generic_pm_domain *pm_genpd_lookup_name(const char *domain_name)
{
	struct generic_pm_domain *genpd = NULL, *gpd;

	if (IS_ERR_OR_NULL(domain_name))
		return NULL;

	mutex_lock(&gpd_list_lock);
	list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
		if (!strcmp(gpd->name, domain_name)) {
			genpd = gpd;
			break;
		}
	}
	mutex_unlock(&gpd_list_lock);
	return genpd;
}

74 75
#ifdef CONFIG_PM

76
struct generic_pm_domain *dev_to_genpd(struct device *dev)
77 78 79 80
{
	if (IS_ERR_OR_NULL(dev->pm_domain))
		return ERR_PTR(-EINVAL);

81
	return pd_to_genpd(dev->pm_domain);
82
}
83

84 85
static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
{
86 87
	return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev,
					stop_latency_ns, "stop");
88 89 90 91
}

static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
{
92 93
	return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev,
					start_latency_ns, "start");
94 95
}

96 97 98 99 100 101
static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd,
				     struct device *dev)
{
	return GENPD_DEV_CALLBACK(genpd, int, start, dev);
}

102
static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
103
{
104 105 106 107 108 109 110 111 112 113 114 115
	bool ret = false;

	if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
		ret = !!atomic_dec_and_test(&genpd->sd_count);

	return ret;
}

static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
{
	atomic_inc(&genpd->sd_count);
	smp_mb__after_atomic_inc();
116 117
}

118 119 120 121 122 123 124 125 126 127 128 129
static void genpd_acquire_lock(struct generic_pm_domain *genpd)
{
	DEFINE_WAIT(wait);

	mutex_lock(&genpd->lock);
	/*
	 * Wait for the domain to transition into either the active,
	 * or the power off state.
	 */
	for (;;) {
		prepare_to_wait(&genpd->status_wait_queue, &wait,
				TASK_UNINTERRUPTIBLE);
130 131
		if (genpd->status == GPD_STATE_ACTIVE
		    || genpd->status == GPD_STATE_POWER_OFF)
132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
			break;
		mutex_unlock(&genpd->lock);

		schedule();

		mutex_lock(&genpd->lock);
	}
	finish_wait(&genpd->status_wait_queue, &wait);
}

static void genpd_release_lock(struct generic_pm_domain *genpd)
{
	mutex_unlock(&genpd->lock);
}

147 148 149 150 151 152
static void genpd_set_active(struct generic_pm_domain *genpd)
{
	if (genpd->resume_count == 0)
		genpd->status = GPD_STATE_ACTIVE;
}

153 154 155 156 157 158 159 160 161 162 163 164 165
static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd)
{
	s64 usecs64;

	if (!genpd->cpu_data)
		return;

	usecs64 = genpd->power_on_latency_ns;
	do_div(usecs64, NSEC_PER_USEC);
	usecs64 += genpd->cpu_data->saved_exit_latency;
	genpd->cpu_data->idle_state->exit_latency = usecs64;
}

166
/**
167
 * __pm_genpd_poweron - Restore power to a given PM domain and its masters.
168 169
 * @genpd: PM domain to power up.
 *
170
 * Restore power to @genpd and all of its masters so that it is possible to
171 172
 * resume a device belonging to it.
 */
173
static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
174
	__releases(&genpd->lock) __acquires(&genpd->lock)
175
{
176
	struct gpd_link *link;
177
	DEFINE_WAIT(wait);
178 179
	int ret = 0;

180
	/* If the domain's master is being waited for, we have to wait too. */
181 182 183
	for (;;) {
		prepare_to_wait(&genpd->status_wait_queue, &wait,
				TASK_UNINTERRUPTIBLE);
184
		if (genpd->status != GPD_STATE_WAIT_MASTER)
185 186
			break;
		mutex_unlock(&genpd->lock);
187

188 189 190 191 192
		schedule();

		mutex_lock(&genpd->lock);
	}
	finish_wait(&genpd->status_wait_queue, &wait);
193

194
	if (genpd->status == GPD_STATE_ACTIVE
195
	    || (genpd->prepared_count > 0 && genpd->suspend_power_off))
196
		return 0;
197

198 199
	if (genpd->status != GPD_STATE_POWER_OFF) {
		genpd_set_active(genpd);
200
		return 0;
201 202
	}

203 204 205 206 207 208 209
	if (genpd->cpu_data) {
		cpuidle_pause_and_lock();
		genpd->cpu_data->idle_state->disabled = true;
		cpuidle_resume_and_unlock();
		goto out;
	}

210 211 212 213 214 215 216
	/*
	 * The list is guaranteed not to change while the loop below is being
	 * executed, unless one of the masters' .power_on() callbacks fiddles
	 * with it.
	 */
	list_for_each_entry(link, &genpd->slave_links, slave_node) {
		genpd_sd_counter_inc(link->master);
217
		genpd->status = GPD_STATE_WAIT_MASTER;
218

219 220
		mutex_unlock(&genpd->lock);

221
		ret = pm_genpd_poweron(link->master);
222 223 224

		mutex_lock(&genpd->lock);

225 226
		/*
		 * The "wait for parent" status is guaranteed not to change
227
		 * while the master is powering on.
228 229 230
		 */
		genpd->status = GPD_STATE_POWER_OFF;
		wake_up_all(&genpd->status_wait_queue);
231 232
		if (ret) {
			genpd_sd_counter_dec(link->master);
233
			goto err;
234
		}
235 236
	}

237
	if (genpd->power_on) {
238 239 240
		ktime_t time_start = ktime_get();
		s64 elapsed_ns;

241
		ret = genpd->power_on(genpd);
242 243
		if (ret)
			goto err;
244 245

		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
246
		if (elapsed_ns > genpd->power_on_latency_ns) {
247
			genpd->power_on_latency_ns = elapsed_ns;
248
			genpd->max_off_time_changed = true;
249
			genpd_recalc_cpu_exit_latency(genpd);
250 251 252 253 254
			if (genpd->name)
				pr_warning("%s: Power-on latency exceeded, "
					"new value %lld ns\n", genpd->name,
					elapsed_ns);
		}
255
	}
256

257
 out:
258 259
	genpd_set_active(genpd);

260
	return 0;
261 262

 err:
263 264
	list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node)
		genpd_sd_counter_dec(link->master);
265

266 267 268 269
	return ret;
}

/**
270
 * pm_genpd_poweron - Restore power to a given PM domain and its masters.
271 272 273 274 275 276 277 278 279 280
 * @genpd: PM domain to power up.
 */
int pm_genpd_poweron(struct generic_pm_domain *genpd)
{
	int ret;

	mutex_lock(&genpd->lock);
	ret = __pm_genpd_poweron(genpd);
	mutex_unlock(&genpd->lock);
	return ret;
281 282
}

283 284 285 286 287 288 289 290 291 292 293 294
/**
 * pm_genpd_name_poweron - Restore power to a given PM domain and its masters.
 * @domain_name: Name of the PM domain to power up.
 */
int pm_genpd_name_poweron(const char *domain_name)
{
	struct generic_pm_domain *genpd;

	genpd = pm_genpd_lookup_name(domain_name);
	return genpd ? pm_genpd_poweron(genpd) : -EINVAL;
}

295 296 297 298
#endif /* CONFIG_PM */

#ifdef CONFIG_PM_RUNTIME

299 300 301 302 303 304 305 306 307 308 309 310 311
static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
{
	return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
					save_state_latency_ns, "state save");
}

static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev)
{
	return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,
					restore_state_latency_ns,
					"state restore");
}

312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335
static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
				     unsigned long val, void *ptr)
{
	struct generic_pm_domain_data *gpd_data;
	struct device *dev;

	gpd_data = container_of(nb, struct generic_pm_domain_data, nb);

	mutex_lock(&gpd_data->lock);
	dev = gpd_data->base.dev;
	if (!dev) {
		mutex_unlock(&gpd_data->lock);
		return NOTIFY_DONE;
	}
	mutex_unlock(&gpd_data->lock);

	for (;;) {
		struct generic_pm_domain *genpd;
		struct pm_domain_data *pdd;

		spin_lock_irq(&dev->power.lock);

		pdd = dev->power.subsys_data ?
				dev->power.subsys_data->domain_data : NULL;
336
		if (pdd && pdd->dev) {
337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358
			to_gpd_data(pdd)->td.constraint_changed = true;
			genpd = dev_to_genpd(dev);
		} else {
			genpd = ERR_PTR(-ENODATA);
		}

		spin_unlock_irq(&dev->power.lock);

		if (!IS_ERR(genpd)) {
			mutex_lock(&genpd->lock);
			genpd->max_off_time_changed = true;
			mutex_unlock(&genpd->lock);
		}

		dev = dev->parent;
		if (!dev || dev->power.ignore_children)
			break;
	}

	return NOTIFY_DONE;
}

359 360
/**
 * __pm_genpd_save_device - Save the pre-suspend state of a device.
361
 * @pdd: Domain data of the device to save the state of.
362 363
 * @genpd: PM domain the device belongs to.
 */
364
static int __pm_genpd_save_device(struct pm_domain_data *pdd,
365
				  struct generic_pm_domain *genpd)
366
	__releases(&genpd->lock) __acquires(&genpd->lock)
367
{
368
	struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
369
	struct device *dev = pdd->dev;
370 371
	int ret = 0;

372
	if (gpd_data->need_restore)
373 374
		return 0;

375 376
	mutex_unlock(&genpd->lock);

377 378 379
	genpd_start_dev(genpd, dev);
	ret = genpd_save_dev(genpd, dev);
	genpd_stop_dev(genpd, dev);
380

381 382
	mutex_lock(&genpd->lock);

383
	if (!ret)
384
		gpd_data->need_restore = true;
385 386 387 388 389 390

	return ret;
}

/**
 * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
391
 * @pdd: Domain data of the device to restore the state of.
392 393
 * @genpd: PM domain the device belongs to.
 */
394
static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
395
				      struct generic_pm_domain *genpd)
396
	__releases(&genpd->lock) __acquires(&genpd->lock)
397
{
398
	struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
399
	struct device *dev = pdd->dev;
400
	bool need_restore = gpd_data->need_restore;
401

402
	gpd_data->need_restore = false;
403 404
	mutex_unlock(&genpd->lock);

405
	genpd_start_dev(genpd, dev);
406 407
	if (need_restore)
		genpd_restore_dev(genpd, dev);
408

409
	mutex_lock(&genpd->lock);
410 411
}

412 413 414 415 416 417 418 419 420 421 422
/**
 * genpd_abort_poweroff - Check if a PM domain power off should be aborted.
 * @genpd: PM domain to check.
 *
 * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during
 * a "power off" operation, which means that a "power on" has occured in the
 * meantime, or if its resume_count field is different from zero, which means
 * that one of its devices has been resumed in the meantime.
 */
static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
{
423
	return genpd->status == GPD_STATE_WAIT_MASTER
424
		|| genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
425 426
}

427 428 429 430 431 432 433
/**
 * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
 * @genpd: PM domait to power off.
 *
 * Queue up the execution of pm_genpd_poweroff() unless it's already been done
 * before.
 */
434
void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
435 436 437 438 439
{
	if (!work_pending(&genpd->power_off_work))
		queue_work(pm_wq, &genpd->power_off_work);
}

440 441 442 443 444 445 446 447 448
/**
 * pm_genpd_poweroff - Remove power from a given PM domain.
 * @genpd: PM domain to power down.
 *
 * If all of the @genpd's devices have been suspended and all of its subdomains
 * have been powered down, run the runtime suspend callbacks provided by all of
 * the @genpd's devices' drivers and remove power from @genpd.
 */
static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
449
	__releases(&genpd->lock) __acquires(&genpd->lock)
450
{
451
	struct pm_domain_data *pdd;
452
	struct gpd_link *link;
453
	unsigned int not_suspended;
454
	int ret = 0;
455

456 457 458 459
 start:
	/*
	 * Do not try to power off the domain in the following situations:
	 * (1) The domain is already in the "power off" state.
460
	 * (2) The domain is waiting for its master to power up.
461
	 * (3) One of the domain's devices is being resumed right now.
462
	 * (4) System suspend is in progress.
463
	 */
464
	if (genpd->status == GPD_STATE_POWER_OFF
465
	    || genpd->status == GPD_STATE_WAIT_MASTER
466
	    || genpd->resume_count > 0 || genpd->prepared_count > 0)
467 468
		return 0;

469
	if (atomic_read(&genpd->sd_count) > 0)
470 471 472
		return -EBUSY;

	not_suspended = 0;
473
	list_for_each_entry(pdd, &genpd->dev_list, list_node)
474
		if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
475
		    || pdd->dev->power.irq_safe))
476 477 478 479 480
			not_suspended++;

	if (not_suspended > genpd->in_progress)
		return -EBUSY;

481 482 483 484 485 486 487 488 489
	if (genpd->poweroff_task) {
		/*
		 * Another instance of pm_genpd_poweroff() is executing
		 * callbacks, so tell it to start over and return.
		 */
		genpd->status = GPD_STATE_REPEAT;
		return 0;
	}

490 491 492 493 494
	if (genpd->gov && genpd->gov->power_down_ok) {
		if (!genpd->gov->power_down_ok(&genpd->domain))
			return -EAGAIN;
	}

495
	genpd->status = GPD_STATE_BUSY;
496
	genpd->poweroff_task = current;
497

498
	list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
499
		ret = atomic_read(&genpd->sd_count) == 0 ?
500
			__pm_genpd_save_device(pdd, genpd) : -EBUSY;
501 502 503 504

		if (genpd_abort_poweroff(genpd))
			goto out;

505 506 507 508
		if (ret) {
			genpd_set_active(genpd);
			goto out;
		}
509

510 511 512 513 514
		if (genpd->status == GPD_STATE_REPEAT) {
			genpd->poweroff_task = NULL;
			goto start;
		}
	}
515

516 517 518 519 520 521 522 523 524 525 526 527 528 529 530
	if (genpd->cpu_data) {
		/*
		 * If cpu_data is set, cpuidle should turn the domain off when
		 * the CPU in it is idle.  In that case we don't decrement the
		 * subdomain counts of the master domains, so that power is not
		 * removed from the current domain prematurely as a result of
		 * cutting off the masters' power.
		 */
		genpd->status = GPD_STATE_POWER_OFF;
		cpuidle_pause_and_lock();
		genpd->cpu_data->idle_state->disabled = false;
		cpuidle_resume_and_unlock();
		goto out;
	}

531
	if (genpd->power_off) {
532 533 534
		ktime_t time_start;
		s64 elapsed_ns;

535 536
		if (atomic_read(&genpd->sd_count) > 0) {
			ret = -EBUSY;
537 538
			goto out;
		}
539

540 541
		time_start = ktime_get();

542
		/*
543 544
		 * If sd_count > 0 at this point, one of the subdomains hasn't
		 * managed to call pm_genpd_poweron() for the master yet after
545 546 547 548 549
		 * incrementing it.  In that case pm_genpd_poweron() will wait
		 * for us to drop the lock, so we can call .power_off() and let
		 * the pm_genpd_poweron() restore power for us (this shouldn't
		 * happen very often).
		 */
550 551 552 553 554
		ret = genpd->power_off(genpd);
		if (ret == -EBUSY) {
			genpd_set_active(genpd);
			goto out;
		}
555 556

		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
557
		if (elapsed_ns > genpd->power_off_latency_ns) {
558
			genpd->power_off_latency_ns = elapsed_ns;
559
			genpd->max_off_time_changed = true;
560 561 562 563 564
			if (genpd->name)
				pr_warning("%s: Power-off latency exceeded, "
					"new value %lld ns\n", genpd->name,
					elapsed_ns);
		}
565
	}
566

567
	genpd->status = GPD_STATE_POWER_OFF;
568

569 570 571 572
	list_for_each_entry(link, &genpd->slave_links, slave_node) {
		genpd_sd_counter_dec(link->master);
		genpd_queue_power_off_work(link->master);
	}
573

574 575 576 577
 out:
	genpd->poweroff_task = NULL;
	wake_up_all(&genpd->status_wait_queue);
	return ret;
578 579 580 581 582 583 584 585 586 587 588 589
}

/**
 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
 * @work: Work structure used for scheduling the execution of this function.
 */
static void genpd_power_off_work_fn(struct work_struct *work)
{
	struct generic_pm_domain *genpd;

	genpd = container_of(work, struct generic_pm_domain, power_off_work);

590
	genpd_acquire_lock(genpd);
591
	pm_genpd_poweroff(genpd);
592
	genpd_release_lock(genpd);
593 594 595 596 597 598 599 600 601 602 603 604 605
}

/**
 * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
 * @dev: Device to suspend.
 *
 * Carry out a runtime suspend of a device under the assumption that its
 * pm_domain field points to the domain member of an object of type
 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
 */
static int pm_genpd_runtime_suspend(struct device *dev)
{
	struct generic_pm_domain *genpd;
606
	bool (*stop_ok)(struct device *__dev);
607
	int ret;
608 609 610

	dev_dbg(dev, "%s()\n", __func__);

611 612
	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
613 614
		return -EINVAL;

615 616
	might_sleep_if(!genpd->dev_irq_safe);

617 618 619 620
	stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
	if (stop_ok && !stop_ok(dev))
		return -EBUSY;

621 622 623
	ret = genpd_stop_dev(genpd, dev);
	if (ret)
		return ret;
624

625 626 627 628 629 630 631
	/*
	 * If power.irq_safe is set, this routine will be run with interrupts
	 * off, so it can't use mutexes.
	 */
	if (dev->power.irq_safe)
		return 0;

632
	mutex_lock(&genpd->lock);
633 634 635
	genpd->in_progress++;
	pm_genpd_poweroff(genpd);
	genpd->in_progress--;
636
	mutex_unlock(&genpd->lock);
637 638 639 640 641 642 643 644 645 646 647 648 649 650 651

	return 0;
}

/**
 * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
 * @dev: Device to resume.
 *
 * Carry out a runtime resume of a device under the assumption that its
 * pm_domain field points to the domain member of an object of type
 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
 */
static int pm_genpd_runtime_resume(struct device *dev)
{
	struct generic_pm_domain *genpd;
652
	DEFINE_WAIT(wait);
653 654 655 656
	int ret;

	dev_dbg(dev, "%s()\n", __func__);

657 658
	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
659 660
		return -EINVAL;

661 662 663 664
	might_sleep_if(!genpd->dev_irq_safe);

	/* If power.irq_safe, the PM domain is never powered off. */
	if (dev->power.irq_safe)
665
		return genpd_start_dev_no_timing(genpd, dev);
666

667
	mutex_lock(&genpd->lock);
668 669 670 671 672
	ret = __pm_genpd_poweron(genpd);
	if (ret) {
		mutex_unlock(&genpd->lock);
		return ret;
	}
673
	genpd->status = GPD_STATE_BUSY;
674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691
	genpd->resume_count++;
	for (;;) {
		prepare_to_wait(&genpd->status_wait_queue, &wait,
				TASK_UNINTERRUPTIBLE);
		/*
		 * If current is the powering off task, we have been called
		 * reentrantly from one of the device callbacks, so we should
		 * not wait.
		 */
		if (!genpd->poweroff_task || genpd->poweroff_task == current)
			break;
		mutex_unlock(&genpd->lock);

		schedule();

		mutex_lock(&genpd->lock);
	}
	finish_wait(&genpd->status_wait_queue, &wait);
692
	__pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd);
693 694
	genpd->resume_count--;
	genpd_set_active(genpd);
695
	wake_up_all(&genpd->status_wait_queue);
696
	mutex_unlock(&genpd->lock);
697

698 699 700
	return 0;
}

701 702 703 704 705 706 707 708 709 710 711 712 713 714 715
/**
 * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
 */
void pm_genpd_poweroff_unused(void)
{
	struct generic_pm_domain *genpd;

	mutex_lock(&gpd_list_lock);

	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
		genpd_queue_power_off_work(genpd);

	mutex_unlock(&gpd_list_lock);
}

716 717
#else

718 719 720 721 722 723
static inline int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
					    unsigned long val, void *ptr)
{
	return NOTIFY_DONE;
}

724 725 726 727 728 729 730
static inline void genpd_power_off_work_fn(struct work_struct *work) {}

#define pm_genpd_runtime_suspend	NULL
#define pm_genpd_runtime_resume		NULL

#endif /* CONFIG_PM_RUNTIME */

731 732
#ifdef CONFIG_PM_SLEEP

733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750
/**
 * pm_genpd_present - Check if the given PM domain has been initialized.
 * @genpd: PM domain to check.
 */
static bool pm_genpd_present(struct generic_pm_domain *genpd)
{
	struct generic_pm_domain *gpd;

	if (IS_ERR_OR_NULL(genpd))
		return false;

	list_for_each_entry(gpd, &gpd_list, gpd_list_node)
		if (gpd == genpd)
			return true;

	return false;
}

751 752 753 754 755 756
static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
				    struct device *dev)
{
	return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
}

757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796
static int genpd_suspend_dev(struct generic_pm_domain *genpd, struct device *dev)
{
	return GENPD_DEV_CALLBACK(genpd, int, suspend, dev);
}

static int genpd_suspend_late(struct generic_pm_domain *genpd, struct device *dev)
{
	return GENPD_DEV_CALLBACK(genpd, int, suspend_late, dev);
}

static int genpd_resume_early(struct generic_pm_domain *genpd, struct device *dev)
{
	return GENPD_DEV_CALLBACK(genpd, int, resume_early, dev);
}

static int genpd_resume_dev(struct generic_pm_domain *genpd, struct device *dev)
{
	return GENPD_DEV_CALLBACK(genpd, int, resume, dev);
}

static int genpd_freeze_dev(struct generic_pm_domain *genpd, struct device *dev)
{
	return GENPD_DEV_CALLBACK(genpd, int, freeze, dev);
}

static int genpd_freeze_late(struct generic_pm_domain *genpd, struct device *dev)
{
	return GENPD_DEV_CALLBACK(genpd, int, freeze_late, dev);
}

static int genpd_thaw_early(struct generic_pm_domain *genpd, struct device *dev)
{
	return GENPD_DEV_CALLBACK(genpd, int, thaw_early, dev);
}

static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev)
{
	return GENPD_DEV_CALLBACK(genpd, int, thaw, dev);
}

797
/**
798
 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
799 800 801
 * @genpd: PM domain to power off, if possible.
 *
 * Check if the given PM domain can be powered off (during system suspend or
802
 * hibernation) and do that if so.  Also, in that case propagate to its masters.
803
 *
804 805 806 807
 * This function is only called in "noirq" and "syscore" stages of system power
 * transitions, so it need not acquire locks (all of the "noirq" callbacks are
 * executed sequentially, so it is guaranteed that it will never run twice in
 * parallel).
808 809 810
 */
static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
{
811
	struct gpd_link *link;
812

813
	if (genpd->status == GPD_STATE_POWER_OFF)
814 815
		return;

816 817
	if (genpd->suspended_count != genpd->device_count
	    || atomic_read(&genpd->sd_count) > 0)
818 819 820 821 822
		return;

	if (genpd->power_off)
		genpd->power_off(genpd);

823
	genpd->status = GPD_STATE_POWER_OFF;
824 825 826 827

	list_for_each_entry(link, &genpd->slave_links, slave_node) {
		genpd_sd_counter_dec(link->master);
		pm_genpd_sync_poweroff(link->master);
828 829 830
	}
}

831 832 833 834
/**
 * pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters.
 * @genpd: PM domain to power on.
 *
835 836 837 838
 * This function is only called in "noirq" and "syscore" stages of system power
 * transitions, so it need not acquire locks (all of the "noirq" callbacks are
 * executed sequentially, so it is guaranteed that it will never run twice in
 * parallel).
839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857
 */
static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd)
{
	struct gpd_link *link;

	if (genpd->status != GPD_STATE_POWER_OFF)
		return;

	list_for_each_entry(link, &genpd->slave_links, slave_node) {
		pm_genpd_sync_poweron(link->master);
		genpd_sd_counter_inc(link->master);
	}

	if (genpd->power_on)
		genpd->power_on(genpd);

	genpd->status = GPD_STATE_ACTIVE;
}

858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880
/**
 * resume_needed - Check whether to resume a device before system suspend.
 * @dev: Device to check.
 * @genpd: PM domain the device belongs to.
 *
 * There are two cases in which a device that can wake up the system from sleep
 * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
 * to wake up the system and it has to remain active for this purpose while the
 * system is in the sleep state and (2) if the device is not enabled to wake up
 * the system from sleep states and it generally doesn't generate wakeup signals
 * by itself (those signals are generated on its behalf by other parts of the
 * system).  In the latter case it may be necessary to reconfigure the device's
 * wakeup settings during system suspend, because it may have been set up to
 * signal remote wakeup from the system's working state as needed by runtime PM.
 * Return 'true' in either of the above cases.
 */
static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
{
	bool active_wakeup;

	if (!device_can_wakeup(dev))
		return false;

881
	active_wakeup = genpd_dev_active_wakeup(genpd, dev);
882 883 884
	return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
}

885 886 887 888 889 890 891 892 893 894 895 896
/**
 * pm_genpd_prepare - Start power transition of a device in a PM domain.
 * @dev: Device to start the transition of.
 *
 * Start a power transition of a device (during a system-wide power transition)
 * under the assumption that its pm_domain field points to the domain member of
 * an object of type struct generic_pm_domain representing a PM domain
 * consisting of I/O devices.
 */
static int pm_genpd_prepare(struct device *dev)
{
	struct generic_pm_domain *genpd;
897
	int ret;
898 899 900 901 902 903 904

	dev_dbg(dev, "%s()\n", __func__);

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;

905 906 907 908 909 910 911 912 913 914 915 916 917 918
	/*
	 * If a wakeup request is pending for the device, it should be woken up
	 * at this point and a system wakeup event should be reported if it's
	 * set up to wake up the system from sleep states.
	 */
	pm_runtime_get_noresume(dev);
	if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
		pm_wakeup_event(dev, 0);

	if (pm_wakeup_pending()) {
		pm_runtime_put_sync(dev);
		return -EBUSY;
	}

919 920 921
	if (resume_needed(dev, genpd))
		pm_runtime_resume(dev);

922
	genpd_acquire_lock(genpd);
923

924 925
	if (genpd->prepared_count++ == 0) {
		genpd->suspended_count = 0;
926
		genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
927
	}
928 929

	genpd_release_lock(genpd);
930 931

	if (genpd->suspend_power_off) {
932
		pm_runtime_put_noidle(dev);
933 934 935 936
		return 0;
	}

	/*
937 938
	 * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
	 * so pm_genpd_poweron() will return immediately, but if the device
939
	 * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need
940
	 * to make it operational.
941
	 */
942
	pm_runtime_resume(dev);
943 944
	__pm_runtime_disable(dev, false);

945 946 947 948 949 950 951 952
	ret = pm_generic_prepare(dev);
	if (ret) {
		mutex_lock(&genpd->lock);

		if (--genpd->prepared_count == 0)
			genpd->suspend_power_off = false;

		mutex_unlock(&genpd->lock);
953
		pm_runtime_enable(dev);
954
	}
955 956

	pm_runtime_put_sync(dev);
957
	return ret;
958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977
}

/**
 * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
 * @dev: Device to suspend.
 *
 * Suspend a device under the assumption that its pm_domain field points to the
 * domain member of an object of type struct generic_pm_domain representing
 * a PM domain consisting of I/O devices.
 */
static int pm_genpd_suspend(struct device *dev)
{
	struct generic_pm_domain *genpd;

	dev_dbg(dev, "%s()\n", __func__);

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;

978
	return genpd->suspend_power_off ? 0 : genpd_suspend_dev(genpd, dev);
979 980 981
}

/**
982
 * pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain.
983 984 985 986 987 988
 * @dev: Device to suspend.
 *
 * Carry out a late suspend of a device under the assumption that its
 * pm_domain field points to the domain member of an object of type
 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
 */
989
static int pm_genpd_suspend_late(struct device *dev)
990 991 992 993 994 995 996 997 998
{
	struct generic_pm_domain *genpd;

	dev_dbg(dev, "%s()\n", __func__);

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;

999 1000
	return genpd->suspend_power_off ? 0 : genpd_suspend_late(genpd, dev);
}
1001

1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017
/**
 * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
 * @dev: Device to suspend.
 *
 * Stop the device and remove power from the domain if all devices in it have
 * been stopped.
 */
static int pm_genpd_suspend_noirq(struct device *dev)
{
	struct generic_pm_domain *genpd;

	dev_dbg(dev, "%s()\n", __func__);

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;
1018

1019
	if (genpd->suspend_power_off
1020
	    || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
1021 1022
		return 0;

1023
	genpd_stop_dev(genpd, dev);
1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036

	/*
	 * Since all of the "noirq" callbacks are executed sequentially, it is
	 * guaranteed that this function will never run twice in parallel for
	 * the same PM domain, so it is not necessary to use locking here.
	 */
	genpd->suspended_count++;
	pm_genpd_sync_poweroff(genpd);

	return 0;
}

/**
1037
 * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain.
1038 1039
 * @dev: Device to resume.
 *
1040
 * Restore power to the device's PM domain, if necessary, and start the device.
1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051
 */
static int pm_genpd_resume_noirq(struct device *dev)
{
	struct generic_pm_domain *genpd;

	dev_dbg(dev, "%s()\n", __func__);

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;

1052
	if (genpd->suspend_power_off
1053
	    || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
1054 1055 1056 1057 1058 1059 1060
		return 0;

	/*
	 * Since all of the "noirq" callbacks are executed sequentially, it is
	 * guaranteed that this function will never run twice in parallel for
	 * the same PM domain, so it is not necessary to use locking here.
	 */
1061
	pm_genpd_sync_poweron(genpd);
1062 1063
	genpd->suspended_count--;

1064
	return genpd_start_dev(genpd, dev);
1065 1066 1067
}

/**
1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090
 * pm_genpd_resume_early - Early resume of a device in an I/O PM domain.
 * @dev: Device to resume.
 *
 * Carry out an early resume of a device under the assumption that its
 * pm_domain field points to the domain member of an object of type
 * struct generic_pm_domain representing a power domain consisting of I/O
 * devices.
 */
static int pm_genpd_resume_early(struct device *dev)
{
	struct generic_pm_domain *genpd;

	dev_dbg(dev, "%s()\n", __func__);

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;

	return genpd->suspend_power_off ? 0 : genpd_resume_early(genpd, dev);
}

/**
 * pm_genpd_resume - Resume of device in an I/O PM domain.
1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106
 * @dev: Device to resume.
 *
 * Resume a device under the assumption that its pm_domain field points to the
 * domain member of an object of type struct generic_pm_domain representing
 * a power domain consisting of I/O devices.
 */
static int pm_genpd_resume(struct device *dev)
{
	struct generic_pm_domain *genpd;

	dev_dbg(dev, "%s()\n", __func__);

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;

1107
	return genpd->suspend_power_off ? 0 : genpd_resume_dev(genpd, dev);
1108 1109 1110
}

/**
1111
 * pm_genpd_freeze - Freezing a device in an I/O PM domain.
1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127
 * @dev: Device to freeze.
 *
 * Freeze a device under the assumption that its pm_domain field points to the
 * domain member of an object of type struct generic_pm_domain representing
 * a power domain consisting of I/O devices.
 */
static int pm_genpd_freeze(struct device *dev)
{
	struct generic_pm_domain *genpd;

	dev_dbg(dev, "%s()\n", __func__);

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;

1128
	return genpd->suspend_power_off ? 0 : genpd_freeze_dev(genpd, dev);
1129 1130 1131
}

/**
1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154
 * pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain.
 * @dev: Device to freeze.
 *
 * Carry out a late freeze of a device under the assumption that its
 * pm_domain field points to the domain member of an object of type
 * struct generic_pm_domain representing a power domain consisting of I/O
 * devices.
 */
static int pm_genpd_freeze_late(struct device *dev)
{
	struct generic_pm_domain *genpd;

	dev_dbg(dev, "%s()\n", __func__);

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;

	return genpd->suspend_power_off ? 0 : genpd_freeze_late(genpd, dev);
}

/**
 * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171
 * @dev: Device to freeze.
 *
 * Carry out a late freeze of a device under the assumption that its
 * pm_domain field points to the domain member of an object of type
 * struct generic_pm_domain representing a power domain consisting of I/O
 * devices.
 */
static int pm_genpd_freeze_noirq(struct device *dev)
{
	struct generic_pm_domain *genpd;

	dev_dbg(dev, "%s()\n", __func__);

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;

1172
	return genpd->suspend_power_off ? 0 : genpd_stop_dev(genpd, dev);
1173
}
1174

1175 1176 1177 1178 1179 1180 1181 1182 1183 1184
/**
 * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
 * @dev: Device to thaw.
 *
 * Start the device, unless power has been removed from the domain already
 * before the system transition.
 */
static int pm_genpd_thaw_noirq(struct device *dev)
{
	struct generic_pm_domain *genpd;
1185

1186
	dev_dbg(dev, "%s()\n", __func__);
1187

1188 1189 1190 1191
	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;

1192
	return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev);
1193 1194 1195
}

/**
1196
 * pm_genpd_thaw_early - Early thaw of device in an I/O PM domain.
1197 1198 1199 1200 1201 1202 1203
 * @dev: Device to thaw.
 *
 * Carry out an early thaw of a device under the assumption that its
 * pm_domain field points to the domain member of an object of type
 * struct generic_pm_domain representing a power domain consisting of I/O
 * devices.
 */
1204
static int pm_genpd_thaw_early(struct device *dev)
1205 1206 1207 1208 1209 1210 1211 1212 1213
{
	struct generic_pm_domain *genpd;

	dev_dbg(dev, "%s()\n", __func__);

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;

1214
	return genpd->suspend_power_off ? 0 : genpd_thaw_early(genpd, dev);
1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234
}

/**
 * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
 * @dev: Device to thaw.
 *
 * Thaw a device under the assumption that its pm_domain field points to the
 * domain member of an object of type struct generic_pm_domain representing
 * a power domain consisting of I/O devices.
 */
static int pm_genpd_thaw(struct device *dev)
{
	struct generic_pm_domain *genpd;

	dev_dbg(dev, "%s()\n", __func__);

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;

1235
	return genpd->suspend_power_off ? 0 : genpd_thaw_dev(genpd, dev);
1236 1237 1238
}

/**
1239
 * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1240 1241
 * @dev: Device to resume.
 *
1242 1243
 * Make sure the domain will be in the same power state as before the
 * hibernation the system is resuming from and start the device if necessary.
1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258
 */
static int pm_genpd_restore_noirq(struct device *dev)
{
	struct generic_pm_domain *genpd;

	dev_dbg(dev, "%s()\n", __func__);

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;

	/*
	 * Since all of the "noirq" callbacks are executed sequentially, it is
	 * guaranteed that this function will never run twice in parallel for
	 * the same PM domain, so it is not necessary to use locking here.
1259 1260 1261
	 *
	 * At this point suspended_count == 0 means we are being run for the
	 * first time for the given domain in the present cycle.
1262
	 */
1263
	if (genpd->suspended_count++ == 0) {
1264
		/*
1265
		 * The boot kernel might put the domain into arbitrary state,
1266 1267
		 * so make it appear as powered off to pm_genpd_sync_poweron(),
		 * so that it tries to power it on in case it was really off.
1268
		 */
1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279
		genpd->status = GPD_STATE_POWER_OFF;
		if (genpd->suspend_power_off) {
			/*
			 * If the domain was off before the hibernation, make
			 * sure it will be off going forward.
			 */
			if (genpd->power_off)
				genpd->power_off(genpd);

			return 0;
		}
1280 1281
	}

1282 1283 1284
	if (genpd->suspend_power_off)
		return 0;

1285
	pm_genpd_sync_poweron(genpd);
1286

1287
	return genpd_start_dev(genpd, dev);
1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319
}

/**
 * pm_genpd_complete - Complete power transition of a device in a power domain.
 * @dev: Device to complete the transition of.
 *
 * Complete a power transition of a device (during a system-wide power
 * transition) under the assumption that its pm_domain field points to the
 * domain member of an object of type struct generic_pm_domain representing
 * a power domain consisting of I/O devices.
 */
static void pm_genpd_complete(struct device *dev)
{
	struct generic_pm_domain *genpd;
	bool run_complete;

	dev_dbg(dev, "%s()\n", __func__);

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return;

	mutex_lock(&genpd->lock);

	run_complete = !genpd->suspend_power_off;
	if (--genpd->prepared_count == 0)
		genpd->suspend_power_off = false;

	mutex_unlock(&genpd->lock);

	if (run_complete) {
		pm_generic_complete(dev);
1320
		pm_runtime_set_active(dev);
1321
		pm_runtime_enable(dev);
1322
		pm_runtime_idle(dev);
1323 1324 1325
	}
}

1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350
/**
 * pm_genpd_syscore_switch - Switch power during system core suspend or resume.
 * @dev: Device that normally is marked as "always on" to switch power for.
 *
 * This routine may only be called during the system core (syscore) suspend or
 * resume phase for devices whose "always on" flags are set.
 */
void pm_genpd_syscore_switch(struct device *dev, bool suspend)
{
	struct generic_pm_domain *genpd;

	genpd = dev_to_genpd(dev);
	if (!pm_genpd_present(genpd))
		return;

	if (suspend) {
		genpd->suspended_count++;
		pm_genpd_sync_poweroff(genpd);
	} else {
		pm_genpd_sync_poweron(genpd);
		genpd->suspended_count--;
	}
}
EXPORT_SYMBOL_GPL(pm_genpd_syscore_switch);

1351 1352 1353 1354
#else

#define pm_genpd_prepare		NULL
#define pm_genpd_suspend		NULL
1355
#define pm_genpd_suspend_late		NULL
1356
#define pm_genpd_suspend_noirq		NULL
1357
#define pm_genpd_resume_early		NULL
1358 1359 1360
#define pm_genpd_resume_noirq		NULL
#define pm_genpd_resume			NULL
#define pm_genpd_freeze			NULL
1361
#define pm_genpd_freeze_late		NULL
1362
#define pm_genpd_freeze_noirq		NULL
1363
#define pm_genpd_thaw_early		NULL
1364 1365 1366 1367 1368 1369 1370
#define pm_genpd_thaw_noirq		NULL
#define pm_genpd_thaw			NULL
#define pm_genpd_restore_noirq		NULL
#define pm_genpd_complete		NULL

#endif /* CONFIG_PM_SLEEP */

1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391
static struct generic_pm_domain_data *__pm_genpd_alloc_dev_data(struct device *dev)
{
	struct generic_pm_domain_data *gpd_data;

	gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
	if (!gpd_data)
		return NULL;

	mutex_init(&gpd_data->lock);
	gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
	dev_pm_qos_add_notifier(dev, &gpd_data->nb);
	return gpd_data;
}

static void __pm_genpd_free_dev_data(struct device *dev,
				     struct generic_pm_domain_data *gpd_data)
{
	dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
	kfree(gpd_data);
}

1392
/**
1393
 * __pm_genpd_add_device - Add a device to an I/O PM domain.
1394 1395
 * @genpd: PM domain to add the device to.
 * @dev: Device to be added.
1396
 * @td: Set of PM QoS timing parameters to attach to the device.
1397
 */
1398 1399
int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
			  struct gpd_timing_data *td)
1400
{
1401
	struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL;
1402
	struct pm_domain_data *pdd;
1403 1404 1405 1406 1407 1408 1409
	int ret = 0;

	dev_dbg(dev, "%s()\n", __func__);

	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
		return -EINVAL;

1410 1411
	gpd_data_new = __pm_genpd_alloc_dev_data(dev);
	if (!gpd_data_new)
1412 1413
		return -ENOMEM;

1414
	genpd_acquire_lock(genpd);
1415

1416 1417 1418 1419 1420
	if (genpd->prepared_count > 0) {
		ret = -EAGAIN;
		goto out;
	}

1421 1422
	list_for_each_entry(pdd, &genpd->dev_list, list_node)
		if (pdd->dev == dev) {
1423 1424 1425 1426
			ret = -EINVAL;
			goto out;
		}

1427 1428 1429 1430
	ret = dev_pm_get_subsys_data(dev);
	if (ret)
		goto out;

1431
	genpd->device_count++;
1432
	genpd->max_off_time_changed = true;
1433

1434
	spin_lock_irq(&dev->power.lock);
1435

1436
	dev->pm_domain = &genpd->domain;
1437 1438 1439 1440 1441 1442 1443
	if (dev->power.subsys_data->domain_data) {
		gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
	} else {
		gpd_data = gpd_data_new;
		dev->power.subsys_data->domain_data = &gpd_data->base;
	}
	gpd_data->refcount++;
1444 1445
	if (td)
		gpd_data->td = *td;
1446

1447 1448 1449 1450 1451 1452
	spin_unlock_irq(&dev->power.lock);

	mutex_lock(&gpd_data->lock);
	gpd_data->base.dev = dev;
	list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
	gpd_data->need_restore = genpd->status == GPD_STATE_POWER_OFF;
1453 1454 1455 1456
	gpd_data->td.constraint_changed = true;
	gpd_data->td.effective_constraint_ns = -1;
	mutex_unlock(&gpd_data->lock);

1457
 out:
1458
	genpd_release_lock(genpd);
1459

1460 1461 1462
	if (gpd_data != gpd_data_new)
		__pm_genpd_free_dev_data(dev, gpd_data_new);

1463 1464 1465
	return ret;
}

T
Thomas Abraham 已提交
1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497
/**
 * __pm_genpd_of_add_device - Add a device to an I/O PM domain.
 * @genpd_node: Device tree node pointer representing a PM domain to which the
 *   the device is added to.
 * @dev: Device to be added.
 * @td: Set of PM QoS timing parameters to attach to the device.
 */
int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev,
			     struct gpd_timing_data *td)
{
	struct generic_pm_domain *genpd = NULL, *gpd;

	dev_dbg(dev, "%s()\n", __func__);

	if (IS_ERR_OR_NULL(genpd_node) || IS_ERR_OR_NULL(dev))
		return -EINVAL;

	mutex_lock(&gpd_list_lock);
	list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
		if (gpd->of_node == genpd_node) {
			genpd = gpd;
			break;
		}
	}
	mutex_unlock(&gpd_list_lock);

	if (!genpd)
		return -EINVAL;

	return __pm_genpd_add_device(genpd, dev, td);
}

1498 1499 1500 1501 1502 1503 1504 1505 1506 1507

/**
 * __pm_genpd_name_add_device - Find I/O PM domain and add a device to it.
 * @domain_name: Name of the PM domain to add the device to.
 * @dev: Device to be added.
 * @td: Set of PM QoS timing parameters to attach to the device.
 */
int __pm_genpd_name_add_device(const char *domain_name, struct device *dev,
			       struct gpd_timing_data *td)
{
1508
	return __pm_genpd_add_device(pm_genpd_lookup_name(domain_name), dev, td);
1509 1510
}

1511 1512 1513 1514 1515 1516 1517 1518
/**
 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
 * @genpd: PM domain to remove the device from.
 * @dev: Device to be removed.
 */
int pm_genpd_remove_device(struct generic_pm_domain *genpd,
			   struct device *dev)
{
1519
	struct generic_pm_domain_data *gpd_data;
1520
	struct pm_domain_data *pdd;
1521
	bool remove = false;
1522
	int ret = 0;
1523 1524 1525

	dev_dbg(dev, "%s()\n", __func__);

1526 1527 1528
	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)
	    ||  IS_ERR_OR_NULL(dev->pm_domain)
	    ||  pd_to_genpd(dev->pm_domain) != genpd)
1529 1530
		return -EINVAL;

1531
	genpd_acquire_lock(genpd);
1532

1533 1534 1535 1536 1537
	if (genpd->prepared_count > 0) {
		ret = -EAGAIN;
		goto out;
	}

1538 1539 1540 1541
	genpd->device_count--;
	genpd->max_off_time_changed = true;

	spin_lock_irq(&dev->power.lock);
1542

1543 1544 1545
	dev->pm_domain = NULL;
	pdd = dev->power.subsys_data->domain_data;
	list_del_init(&pdd->list_node);
1546 1547 1548 1549 1550 1551
	gpd_data = to_gpd_data(pdd);
	if (--gpd_data->refcount == 0) {
		dev->power.subsys_data->domain_data = NULL;
		remove = true;
	}

1552
	spin_unlock_irq(&dev->power.lock);
1553

1554 1555 1556 1557 1558 1559 1560
	mutex_lock(&gpd_data->lock);
	pdd->dev = NULL;
	mutex_unlock(&gpd_data->lock);

	genpd_release_lock(genpd);

	dev_pm_put_subsys_data(dev);
1561 1562 1563
	if (remove)
		__pm_genpd_free_dev_data(dev, gpd_data);

1564
	return 0;
1565

1566
 out:
1567
	genpd_release_lock(genpd);
1568 1569 1570 1571

	return ret;
}

1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591
/**
 * pm_genpd_dev_need_restore - Set/unset the device's "need restore" flag.
 * @dev: Device to set/unset the flag for.
 * @val: The new value of the device's "need restore" flag.
 */
void pm_genpd_dev_need_restore(struct device *dev, bool val)
{
	struct pm_subsys_data *psd;
	unsigned long flags;

	spin_lock_irqsave(&dev->power.lock, flags);

	psd = dev_to_psd(dev);
	if (psd && psd->domain_data)
		to_gpd_data(psd->domain_data)->need_restore = val;

	spin_unlock_irqrestore(&dev->power.lock, flags);
}
EXPORT_SYMBOL_GPL(pm_genpd_dev_need_restore);

1592 1593 1594
/**
 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
 * @genpd: Master PM domain to add the subdomain to.
1595
 * @subdomain: Subdomain to be added.
1596 1597
 */
int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1598
			   struct generic_pm_domain *subdomain)
1599
{
1600
	struct gpd_link *link;
1601 1602
	int ret = 0;

1603 1604
	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
	    || genpd == subdomain)
1605 1606
		return -EINVAL;

1607 1608
 start:
	genpd_acquire_lock(genpd);
1609
	mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1610

1611 1612 1613
	if (subdomain->status != GPD_STATE_POWER_OFF
	    && subdomain->status != GPD_STATE_ACTIVE) {
		mutex_unlock(&subdomain->lock);
1614 1615 1616 1617 1618
		genpd_release_lock(genpd);
		goto start;
	}

	if (genpd->status == GPD_STATE_POWER_OFF
1619
	    &&  subdomain->status != GPD_STATE_POWER_OFF) {
1620 1621 1622 1623
		ret = -EINVAL;
		goto out;
	}

1624
	list_for_each_entry(link, &genpd->master_links, master_node) {
1625
		if (link->slave == subdomain && link->master == genpd) {
1626 1627 1628 1629 1630
			ret = -EINVAL;
			goto out;
		}
	}

1631 1632 1633 1634 1635 1636 1637
	link = kzalloc(sizeof(*link), GFP_KERNEL);
	if (!link) {
		ret = -ENOMEM;
		goto out;
	}
	link->master = genpd;
	list_add_tail(&link->master_node, &genpd->master_links);
1638 1639 1640
	link->slave = subdomain;
	list_add_tail(&link->slave_node, &subdomain->slave_links);
	if (subdomain->status != GPD_STATE_POWER_OFF)
1641
		genpd_sd_counter_inc(genpd);
1642 1643

 out:
1644
	mutex_unlock(&subdomain->lock);
1645
	genpd_release_lock(genpd);
1646 1647 1648 1649

	return ret;
}

1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678
/**
 * pm_genpd_add_subdomain_names - Add a subdomain to an I/O PM domain.
 * @master_name: Name of the master PM domain to add the subdomain to.
 * @subdomain_name: Name of the subdomain to be added.
 */
int pm_genpd_add_subdomain_names(const char *master_name,
				 const char *subdomain_name)
{
	struct generic_pm_domain *master = NULL, *subdomain = NULL, *gpd;

	if (IS_ERR_OR_NULL(master_name) || IS_ERR_OR_NULL(subdomain_name))
		return -EINVAL;

	mutex_lock(&gpd_list_lock);
	list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
		if (!master && !strcmp(gpd->name, master_name))
			master = gpd;

		if (!subdomain && !strcmp(gpd->name, subdomain_name))
			subdomain = gpd;

		if (master && subdomain)
			break;
	}
	mutex_unlock(&gpd_list_lock);

	return pm_genpd_add_subdomain(master, subdomain);
}

1679 1680 1681
/**
 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
 * @genpd: Master PM domain to remove the subdomain from.
1682
 * @subdomain: Subdomain to be removed.
1683 1684
 */
int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1685
			      struct generic_pm_domain *subdomain)
1686
{
1687
	struct gpd_link *link;
1688 1689
	int ret = -EINVAL;

1690
	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1691 1692
		return -EINVAL;

1693 1694
 start:
	genpd_acquire_lock(genpd);
1695

1696 1697
	list_for_each_entry(link, &genpd->master_links, master_node) {
		if (link->slave != subdomain)
1698 1699 1700 1701
			continue;

		mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);

1702 1703 1704 1705 1706 1707 1708
		if (subdomain->status != GPD_STATE_POWER_OFF
		    && subdomain->status != GPD_STATE_ACTIVE) {
			mutex_unlock(&subdomain->lock);
			genpd_release_lock(genpd);
			goto start;
		}

1709 1710 1711
		list_del(&link->master_node);
		list_del(&link->slave_node);
		kfree(link);
1712
		if (subdomain->status != GPD_STATE_POWER_OFF)
1713 1714 1715 1716 1717 1718 1719 1720
			genpd_sd_counter_dec(genpd);

		mutex_unlock(&subdomain->lock);

		ret = 0;
		break;
	}

1721
	genpd_release_lock(genpd);
1722 1723 1724 1725

	return ret;
}

1726 1727 1728 1729
/**
 * pm_genpd_add_callbacks - Add PM domain callbacks to a given device.
 * @dev: Device to add the callbacks to.
 * @ops: Set of callbacks to add.
1730
 * @td: Timing data to add to the device along with the callbacks (optional).
1731 1732 1733
 *
 * Every call to this routine should be balanced with a call to
 * __pm_genpd_remove_callbacks() and they must not be nested.
1734
 */
1735 1736
int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops,
			   struct gpd_timing_data *td)
1737
{
1738
	struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL;
1739 1740
	int ret = 0;

1741
	if (!(dev && ops))
1742 1743
		return -EINVAL;

1744 1745 1746 1747
	gpd_data_new = __pm_genpd_alloc_dev_data(dev);
	if (!gpd_data_new)
		return -ENOMEM;

1748 1749 1750
	pm_runtime_disable(dev);
	device_pm_lock();

1751 1752 1753 1754 1755
	ret = dev_pm_get_subsys_data(dev);
	if (ret)
		goto out;

	spin_lock_irq(&dev->power.lock);
1756

1757 1758
	if (dev->power.subsys_data->domain_data) {
		gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1759
	} else {
1760 1761
		gpd_data = gpd_data_new;
		dev->power.subsys_data->domain_data = &gpd_data->base;
1762
	}
1763 1764 1765 1766
	gpd_data->refcount++;
	gpd_data->ops = *ops;
	if (td)
		gpd_data->td = *td;
1767

1768 1769 1770
	spin_unlock_irq(&dev->power.lock);

 out:
1771 1772 1773
	device_pm_unlock();
	pm_runtime_enable(dev);

1774 1775 1776
	if (gpd_data != gpd_data_new)
		__pm_genpd_free_dev_data(dev, gpd_data_new);

1777 1778 1779 1780 1781
	return ret;
}
EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks);

/**
1782
 * __pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device.
1783
 * @dev: Device to remove the callbacks from.
1784
 * @clear_td: If set, clear the device's timing data too.
1785 1786
 *
 * This routine can only be called after pm_genpd_add_callbacks().
1787
 */
1788
int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
1789
{
1790 1791
	struct generic_pm_domain_data *gpd_data = NULL;
	bool remove = false;
1792 1793 1794 1795 1796 1797 1798 1799
	int ret = 0;

	if (!(dev && dev->power.subsys_data))
		return -EINVAL;

	pm_runtime_disable(dev);
	device_pm_lock();

1800
	spin_lock_irq(&dev->power.lock);
1801

1802 1803
	if (dev->power.subsys_data->domain_data) {
		gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1804
		gpd_data->ops = (struct gpd_dev_ops){ NULL };
1805 1806
		if (clear_td)
			gpd_data->td = (struct gpd_timing_data){ 0 };
1807 1808 1809 1810 1811

		if (--gpd_data->refcount == 0) {
			dev->power.subsys_data->domain_data = NULL;
			remove = true;
		}
1812 1813 1814 1815
	} else {
		ret = -EINVAL;
	}

1816 1817
	spin_unlock_irq(&dev->power.lock);

1818 1819 1820
	device_pm_unlock();
	pm_runtime_enable(dev);

1821 1822 1823 1824 1825 1826 1827 1828
	if (ret)
		return ret;

	dev_pm_put_subsys_data(dev);
	if (remove)
		__pm_genpd_free_dev_data(dev, gpd_data);

	return 0;
1829
}
1830
EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
1831

1832 1833 1834 1835 1836 1837 1838 1839 1840 1841
/**
 * pm_genpd_attach_cpuidle - Connect the given PM domain with cpuidle.
 * @genpd: PM domain to be connected with cpuidle.
 * @state: cpuidle state this domain can disable/enable.
 *
 * Make a PM domain behave as though it contained a CPU core, that is, instead
 * of calling its power down routine it will enable the given cpuidle state so
 * that the cpuidle subsystem can power it down (if possible and desirable).
 */
int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889
{
	struct cpuidle_driver *cpuidle_drv;
	struct gpd_cpu_data *cpu_data;
	struct cpuidle_state *idle_state;
	int ret = 0;

	if (IS_ERR_OR_NULL(genpd) || state < 0)
		return -EINVAL;

	genpd_acquire_lock(genpd);

	if (genpd->cpu_data) {
		ret = -EEXIST;
		goto out;
	}
	cpu_data = kzalloc(sizeof(*cpu_data), GFP_KERNEL);
	if (!cpu_data) {
		ret = -ENOMEM;
		goto out;
	}
	cpuidle_drv = cpuidle_driver_ref();
	if (!cpuidle_drv) {
		ret = -ENODEV;
		goto out;
	}
	if (cpuidle_drv->state_count <= state) {
		ret = -EINVAL;
		goto err;
	}
	idle_state = &cpuidle_drv->states[state];
	if (!idle_state->disabled) {
		ret = -EAGAIN;
		goto err;
	}
	cpu_data->idle_state = idle_state;
	cpu_data->saved_exit_latency = idle_state->exit_latency;
	genpd->cpu_data = cpu_data;
	genpd_recalc_cpu_exit_latency(genpd);

 out:
	genpd_release_lock(genpd);
	return ret;

 err:
	cpuidle_driver_unref();
	goto out;
}

1890 1891 1892 1893 1894 1895 1896 1897
/**
 * pm_genpd_detach_cpuidle - Remove the cpuidle connection from a PM domain.
 * @genpd: PM domain to remove the cpuidle connection from.
 *
 * Remove the cpuidle connection set up by pm_genpd_attach_cpuidle() from the
 * given PM domain.
 */
int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927
{
	struct gpd_cpu_data *cpu_data;
	struct cpuidle_state *idle_state;
	int ret = 0;

	if (IS_ERR_OR_NULL(genpd))
		return -EINVAL;

	genpd_acquire_lock(genpd);

	cpu_data = genpd->cpu_data;
	if (!cpu_data) {
		ret = -ENODEV;
		goto out;
	}
	idle_state = cpu_data->idle_state;
	if (!idle_state->disabled) {
		ret = -EAGAIN;
		goto out;
	}
	idle_state->exit_latency = cpu_data->saved_exit_latency;
	cpuidle_driver_unref();
	genpd->cpu_data = NULL;
	kfree(cpu_data);

 out:
	genpd_release_lock(genpd);
	return ret;
}

1928 1929
/* Default device callbacks for generic PM domains. */

1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941
/**
 * pm_genpd_default_save_state - Default "save device state" for PM domians.
 * @dev: Device to handle.
 */
static int pm_genpd_default_save_state(struct device *dev)
{
	int (*cb)(struct device *__dev);

	cb = dev_gpd_data(dev)->ops.save_state;
	if (cb)
		return cb(dev);

1942 1943 1944 1945 1946 1947 1948 1949
	if (dev->type && dev->type->pm)
		cb = dev->type->pm->runtime_suspend;
	else if (dev->class && dev->class->pm)
		cb = dev->class->pm->runtime_suspend;
	else if (dev->bus && dev->bus->pm)
		cb = dev->bus->pm->runtime_suspend;
	else
		cb = NULL;
1950

1951 1952 1953 1954
	if (!cb && dev->driver && dev->driver->pm)
		cb = dev->driver->pm->runtime_suspend;

	return cb ? cb(dev) : 0;
1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968
}

/**
 * pm_genpd_default_restore_state - Default PM domians "restore device state".
 * @dev: Device to handle.
 */
static int pm_genpd_default_restore_state(struct device *dev)
{
	int (*cb)(struct device *__dev);

	cb = dev_gpd_data(dev)->ops.restore_state;
	if (cb)
		return cb(dev);

1969 1970 1971 1972 1973 1974 1975 1976
	if (dev->type && dev->type->pm)
		cb = dev->type->pm->runtime_resume;
	else if (dev->class && dev->class->pm)
		cb = dev->class->pm->runtime_resume;
	else if (dev->bus && dev->bus->pm)
		cb = dev->bus->pm->runtime_resume;
	else
		cb = NULL;
1977

1978 1979 1980 1981
	if (!cb && dev->driver && dev->driver->pm)
		cb = dev->driver->pm->runtime_resume;

	return cb ? cb(dev) : 0;
1982 1983
}

1984 1985
#ifdef CONFIG_PM_SLEEP

1986 1987 1988 1989 1990 1991
/**
 * pm_genpd_default_suspend - Default "device suspend" for PM domians.
 * @dev: Device to handle.
 */
static int pm_genpd_default_suspend(struct device *dev)
{
1992
	int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend;
1993 1994 1995 1996 1997 1998 1999 2000 2001 2002

	return cb ? cb(dev) : pm_generic_suspend(dev);
}

/**
 * pm_genpd_default_suspend_late - Default "late device suspend" for PM domians.
 * @dev: Device to handle.
 */
static int pm_genpd_default_suspend_late(struct device *dev)
{
2003
	int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend_late;
2004

2005
	return cb ? cb(dev) : pm_generic_suspend_late(dev);
2006 2007 2008 2009 2010 2011 2012 2013
}

/**
 * pm_genpd_default_resume_early - Default "early device resume" for PM domians.
 * @dev: Device to handle.
 */
static int pm_genpd_default_resume_early(struct device *dev)
{
2014
	int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume_early;
2015

2016
	return cb ? cb(dev) : pm_generic_resume_early(dev);
2017 2018 2019 2020 2021 2022 2023 2024
}

/**
 * pm_genpd_default_resume - Default "device resume" for PM domians.
 * @dev: Device to handle.
 */
static int pm_genpd_default_resume(struct device *dev)
{
2025
	int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume;
2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048

	return cb ? cb(dev) : pm_generic_resume(dev);
}

/**
 * pm_genpd_default_freeze - Default "device freeze" for PM domians.
 * @dev: Device to handle.
 */
static int pm_genpd_default_freeze(struct device *dev)
{
	int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze;

	return cb ? cb(dev) : pm_generic_freeze(dev);
}

/**
 * pm_genpd_default_freeze_late - Default "late device freeze" for PM domians.
 * @dev: Device to handle.
 */
static int pm_genpd_default_freeze_late(struct device *dev)
{
	int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late;

2049
	return cb ? cb(dev) : pm_generic_freeze_late(dev);
2050 2051 2052 2053 2054 2055 2056 2057 2058 2059
}

/**
 * pm_genpd_default_thaw_early - Default "early device thaw" for PM domians.
 * @dev: Device to handle.
 */
static int pm_genpd_default_thaw_early(struct device *dev)
{
	int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early;

2060
	return cb ? cb(dev) : pm_generic_thaw_early(dev);
2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073
}

/**
 * pm_genpd_default_thaw - Default "device thaw" for PM domians.
 * @dev: Device to handle.
 */
static int pm_genpd_default_thaw(struct device *dev)
{
	int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw;

	return cb ? cb(dev) : pm_generic_thaw(dev);
}

2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086
#else /* !CONFIG_PM_SLEEP */

#define pm_genpd_default_suspend	NULL
#define pm_genpd_default_suspend_late	NULL
#define pm_genpd_default_resume_early	NULL
#define pm_genpd_default_resume		NULL
#define pm_genpd_default_freeze		NULL
#define pm_genpd_default_freeze_late	NULL
#define pm_genpd_default_thaw_early	NULL
#define pm_genpd_default_thaw		NULL

#endif /* !CONFIG_PM_SLEEP */

2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098
/**
 * pm_genpd_init - Initialize a generic I/O PM domain object.
 * @genpd: PM domain object to initialize.
 * @gov: PM domain governor to associate with the domain (may be NULL).
 * @is_off: Initial value of the domain's power_is_off field.
 */
void pm_genpd_init(struct generic_pm_domain *genpd,
		   struct dev_power_governor *gov, bool is_off)
{
	if (IS_ERR_OR_NULL(genpd))
		return;

2099 2100
	INIT_LIST_HEAD(&genpd->master_links);
	INIT_LIST_HEAD(&genpd->slave_links);
2101 2102 2103 2104 2105
	INIT_LIST_HEAD(&genpd->dev_list);
	mutex_init(&genpd->lock);
	genpd->gov = gov;
	INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
	genpd->in_progress = 0;
2106
	atomic_set(&genpd->sd_count, 0);
2107 2108
	genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
	init_waitqueue_head(&genpd->status_wait_queue);
2109 2110
	genpd->poweroff_task = NULL;
	genpd->resume_count = 0;
2111
	genpd->device_count = 0;
2112
	genpd->max_off_time_ns = -1;
2113
	genpd->max_off_time_changed = true;
2114 2115 2116
	genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
	genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
	genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
2117 2118
	genpd->domain.ops.prepare = pm_genpd_prepare;
	genpd->domain.ops.suspend = pm_genpd_suspend;
2119
	genpd->domain.ops.suspend_late = pm_genpd_suspend_late;
2120 2121
	genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
	genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
2122
	genpd->domain.ops.resume_early = pm_genpd_resume_early;
2123 2124
	genpd->domain.ops.resume = pm_genpd_resume;
	genpd->domain.ops.freeze = pm_genpd_freeze;
2125
	genpd->domain.ops.freeze_late = pm_genpd_freeze_late;
2126 2127
	genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
	genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
2128
	genpd->domain.ops.thaw_early = pm_genpd_thaw_early;
2129
	genpd->domain.ops.thaw = pm_genpd_thaw;
2130
	genpd->domain.ops.poweroff = pm_genpd_suspend;
2131
	genpd->domain.ops.poweroff_late = pm_genpd_suspend_late;
2132
	genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
2133
	genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
2134
	genpd->domain.ops.restore_early = pm_genpd_resume_early;
2135
	genpd->domain.ops.restore = pm_genpd_resume;
2136
	genpd->domain.ops.complete = pm_genpd_complete;
2137 2138
	genpd->dev_ops.save_state = pm_genpd_default_save_state;
	genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
2139 2140 2141 2142
	genpd->dev_ops.suspend = pm_genpd_default_suspend;
	genpd->dev_ops.suspend_late = pm_genpd_default_suspend_late;
	genpd->dev_ops.resume_early = pm_genpd_default_resume_early;
	genpd->dev_ops.resume = pm_genpd_default_resume;
2143 2144 2145 2146
	genpd->dev_ops.freeze = pm_genpd_default_freeze;
	genpd->dev_ops.freeze_late = pm_genpd_default_freeze_late;
	genpd->dev_ops.thaw_early = pm_genpd_default_thaw_early;
	genpd->dev_ops.thaw = pm_genpd_default_thaw;
2147 2148 2149 2150
	mutex_lock(&gpd_list_lock);
	list_add(&genpd->gpd_list_node, &gpd_list);
	mutex_unlock(&gpd_list_lock);
}