domain.c 55.0 KB
Newer Older
1 2 3 4 5 6 7 8
/*
 * drivers/base/power/domain.c - Common code related to device power domains.
 *
 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
 *
 * This file is released under the GPLv2.
 */

9
#include <linux/delay.h>
10 11
#include <linux/kernel.h>
#include <linux/io.h>
12
#include <linux/platform_device.h>
13 14
#include <linux/pm_runtime.h>
#include <linux/pm_domain.h>
15
#include <linux/pm_qos.h>
16
#include <linux/pm_clock.h>
17 18
#include <linux/slab.h>
#include <linux/err.h>
19 20
#include <linux/sched.h>
#include <linux/suspend.h>
21 22
#include <linux/export.h>

23 24
#define GENPD_RETRY_MAX_MS	250		/* Approximate */

25 26 27 28 29 30 31 32 33 34 35
#define GENPD_DEV_CALLBACK(genpd, type, callback, dev)		\
({								\
	type (*__routine)(struct device *__d); 			\
	type __ret = (type)0;					\
								\
	__routine = genpd->dev_ops.callback; 			\
	if (__routine) {					\
		__ret = __routine(dev); 			\
	}							\
	__ret;							\
})
36

37 38 39 40 41
#define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name)	\
({										\
	ktime_t __start = ktime_get();						\
	type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev);		\
	s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start));		\
42 43 44
	struct gpd_timing_data *__td = &dev_gpd_data(dev)->td;			\
	if (!__retval && __elapsed > __td->field) {				\
		__td->field = __elapsed;					\
45
		dev_dbg(dev, name " latency exceeded, new value %lld ns\n",	\
46
			__elapsed);						\
47 48
		genpd->max_off_time_changed = true;				\
		__td->constraint_changed = true;				\
49 50 51 52
	}									\
	__retval;								\
})

53 54 55
static LIST_HEAD(gpd_list);
static DEFINE_MUTEX(gpd_list_lock);

56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73
static struct generic_pm_domain *pm_genpd_lookup_name(const char *domain_name)
{
	struct generic_pm_domain *genpd = NULL, *gpd;

	if (IS_ERR_OR_NULL(domain_name))
		return NULL;

	mutex_lock(&gpd_list_lock);
	list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
		if (!strcmp(gpd->name, domain_name)) {
			genpd = gpd;
			break;
		}
	}
	mutex_unlock(&gpd_list_lock);
	return genpd;
}

74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103
/*
 * Get the generic PM domain for a particular struct device.
 * This validates the struct device pointer, the PM domain pointer,
 * and checks that the PM domain pointer is a real generic PM domain.
 * Any failure results in NULL being returned.
 */
struct generic_pm_domain *pm_genpd_lookup_dev(struct device *dev)
{
	struct generic_pm_domain *genpd = NULL, *gpd;

	if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
		return NULL;

	mutex_lock(&gpd_list_lock);
	list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
		if (&gpd->domain == dev->pm_domain) {
			genpd = gpd;
			break;
		}
	}
	mutex_unlock(&gpd_list_lock);

	return genpd;
}

/*
 * This should only be used where we are certain that the pm_domain
 * attached to the device is a genpd domain.
 */
static struct generic_pm_domain *dev_to_genpd(struct device *dev)
104 105 106 107
{
	if (IS_ERR_OR_NULL(dev->pm_domain))
		return ERR_PTR(-EINVAL);

108
	return pd_to_genpd(dev->pm_domain);
109
}
110

111 112
static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
{
113 114
	return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev,
					stop_latency_ns, "stop");
115 116
}

117 118
static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev,
			bool timed)
119
{
120 121 122
	if (!timed)
		return GENPD_DEV_CALLBACK(genpd, int, start, dev);

123 124
	return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev,
					start_latency_ns, "start");
125 126
}

127
static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
128
{
129 130 131 132 133 134 135 136 137 138 139
	bool ret = false;

	if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
		ret = !!atomic_dec_and_test(&genpd->sd_count);

	return ret;
}

static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
{
	atomic_inc(&genpd->sd_count);
140
	smp_mb__after_atomic();
141 142
}

143 144 145 146
static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd)
{
	s64 usecs64;

147
	if (!genpd->cpuidle_data)
148 149 150 151
		return;

	usecs64 = genpd->power_on_latency_ns;
	do_div(usecs64, NSEC_PER_USEC);
152 153
	usecs64 += genpd->cpuidle_data->saved_exit_latency;
	genpd->cpuidle_data->idle_state->exit_latency = usecs64;
154 155
}

156
static int genpd_power_on(struct generic_pm_domain *genpd, bool timed)
157 158 159 160 161 162 163 164
{
	ktime_t time_start;
	s64 elapsed_ns;
	int ret;

	if (!genpd->power_on)
		return 0;

165 166 167
	if (!timed)
		return genpd->power_on(genpd);

168 169 170 171 172 173 174 175 176 177 178 179
	time_start = ktime_get();
	ret = genpd->power_on(genpd);
	if (ret)
		return ret;

	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
	if (elapsed_ns <= genpd->power_on_latency_ns)
		return ret;

	genpd->power_on_latency_ns = elapsed_ns;
	genpd->max_off_time_changed = true;
	genpd_recalc_cpu_exit_latency(genpd);
180 181
	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
		 genpd->name, "on", elapsed_ns);
182 183 184 185

	return ret;
}

186
static int genpd_power_off(struct generic_pm_domain *genpd, bool timed)
187 188 189 190 191 192 193 194
{
	ktime_t time_start;
	s64 elapsed_ns;
	int ret;

	if (!genpd->power_off)
		return 0;

195 196 197
	if (!timed)
		return genpd->power_off(genpd);

198 199 200 201 202 203 204 205 206 207 208
	time_start = ktime_get();
	ret = genpd->power_off(genpd);
	if (ret == -EBUSY)
		return ret;

	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
	if (elapsed_ns <= genpd->power_off_latency_ns)
		return ret;

	genpd->power_off_latency_ns = elapsed_ns;
	genpd->max_off_time_changed = true;
209 210
	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
		 genpd->name, "off", elapsed_ns);
211 212 213 214

	return ret;
}

215 216 217 218 219 220 221 222 223 224 225 226
/**
 * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
 * @genpd: PM domait to power off.
 *
 * Queue up the execution of pm_genpd_poweroff() unless it's already been done
 * before.
 */
static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
{
	queue_work(pm_wq, &genpd->power_off_work);
}

227
/**
228
 * __pm_genpd_poweron - Restore power to a given PM domain and its masters.
229 230
 * @genpd: PM domain to power up.
 *
231
 * Restore power to @genpd and all of its masters so that it is possible to
232 233
 * resume a device belonging to it.
 */
234
static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
235
{
236
	struct gpd_link *link;
237 238
	int ret = 0;

239
	if (genpd->status == GPD_STATE_ACTIVE
240
	    || (genpd->prepared_count > 0 && genpd->suspend_power_off))
241
		return 0;
242

243
	if (genpd->cpuidle_data) {
244
		cpuidle_pause_and_lock();
245
		genpd->cpuidle_data->idle_state->disabled = true;
246 247 248 249
		cpuidle_resume_and_unlock();
		goto out;
	}

250 251 252 253 254 255 256
	/*
	 * The list is guaranteed not to change while the loop below is being
	 * executed, unless one of the masters' .power_on() callbacks fiddles
	 * with it.
	 */
	list_for_each_entry(link, &genpd->slave_links, slave_node) {
		genpd_sd_counter_inc(link->master);
257

258 259 260
		ret = pm_genpd_poweron(link->master);
		if (ret) {
			genpd_sd_counter_dec(link->master);
261
			goto err;
262
		}
263 264
	}

265
	ret = genpd_power_on(genpd, true);
266 267
	if (ret)
		goto err;
268

269
 out:
270
	genpd->status = GPD_STATE_ACTIVE;
271
	return 0;
272 273

 err:
274 275 276
	list_for_each_entry_continue_reverse(link,
					&genpd->slave_links,
					slave_node) {
277
		genpd_sd_counter_dec(link->master);
278 279
		genpd_queue_power_off_work(link->master);
	}
280

281 282 283 284
	return ret;
}

/**
285
 * pm_genpd_poweron - Restore power to a given PM domain and its masters.
286 287 288 289 290 291 292 293 294 295
 * @genpd: PM domain to power up.
 */
int pm_genpd_poweron(struct generic_pm_domain *genpd)
{
	int ret;

	mutex_lock(&genpd->lock);
	ret = __pm_genpd_poweron(genpd);
	mutex_unlock(&genpd->lock);
	return ret;
296 297
}

298 299 300 301 302 303 304 305 306 307 308 309
/**
 * pm_genpd_name_poweron - Restore power to a given PM domain and its masters.
 * @domain_name: Name of the PM domain to power up.
 */
int pm_genpd_name_poweron(const char *domain_name)
{
	struct generic_pm_domain *genpd;

	genpd = pm_genpd_lookup_name(domain_name);
	return genpd ? pm_genpd_poweron(genpd) : -EINVAL;
}

310 311 312 313 314 315
static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
{
	return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
					save_state_latency_ns, "state save");
}

316 317
static int genpd_restore_dev(struct generic_pm_domain *genpd,
			struct device *dev, bool timed)
318
{
319 320 321
	if (!timed)
		return GENPD_DEV_CALLBACK(genpd, int, restore_state, dev);

322 323 324 325 326
	return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,
					restore_state_latency_ns,
					"state restore");
}

327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343
static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
				     unsigned long val, void *ptr)
{
	struct generic_pm_domain_data *gpd_data;
	struct device *dev;

	gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
	dev = gpd_data->base.dev;

	for (;;) {
		struct generic_pm_domain *genpd;
		struct pm_domain_data *pdd;

		spin_lock_irq(&dev->power.lock);

		pdd = dev->power.subsys_data ?
				dev->power.subsys_data->domain_data : NULL;
344
		if (pdd && pdd->dev) {
345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366
			to_gpd_data(pdd)->td.constraint_changed = true;
			genpd = dev_to_genpd(dev);
		} else {
			genpd = ERR_PTR(-ENODATA);
		}

		spin_unlock_irq(&dev->power.lock);

		if (!IS_ERR(genpd)) {
			mutex_lock(&genpd->lock);
			genpd->max_off_time_changed = true;
			mutex_unlock(&genpd->lock);
		}

		dev = dev->parent;
		if (!dev || dev->power.ignore_children)
			break;
	}

	return NOTIFY_DONE;
}

367 368 369 370 371
/**
 * pm_genpd_poweroff - Remove power from a given PM domain.
 * @genpd: PM domain to power down.
 *
 * If all of the @genpd's devices have been suspended and all of its subdomains
372
 * have been powered down, remove power from @genpd.
373 374 375
 */
static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
{
376
	struct pm_domain_data *pdd;
377
	struct gpd_link *link;
378
	unsigned int not_suspended = 0;
379

380 381 382
	/*
	 * Do not try to power off the domain in the following situations:
	 * (1) The domain is already in the "power off" state.
383
	 * (2) System suspend is in progress.
384
	 */
385
	if (genpd->status == GPD_STATE_POWER_OFF
386
	    || genpd->prepared_count > 0)
387 388
		return 0;

389
	if (atomic_read(&genpd->sd_count) > 0)
390 391
		return -EBUSY;

392 393 394 395 396 397 398 399 400
	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
		enum pm_qos_flags_status stat;

		stat = dev_pm_qos_flags(pdd->dev,
					PM_QOS_FLAG_NO_POWER_OFF
						| PM_QOS_FLAG_REMOTE_WAKEUP);
		if (stat > PM_QOS_FLAGS_NONE)
			return -EBUSY;

401
		if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
402
		    || pdd->dev->power.irq_safe))
403
			not_suspended++;
404
	}
405 406 407 408 409 410 411 412 413

	if (not_suspended > genpd->in_progress)
		return -EBUSY;

	if (genpd->gov && genpd->gov->power_down_ok) {
		if (!genpd->gov->power_down_ok(&genpd->domain))
			return -EAGAIN;
	}

414
	if (genpd->cpuidle_data) {
415
		/*
416 417 418 419 420
		 * If cpuidle_data is set, cpuidle should turn the domain off
		 * when the CPU in it is idle.  In that case we don't decrement
		 * the subdomain counts of the master domains, so that power is
		 * not removed from the current domain prematurely as a result
		 * of cutting off the masters' power.
421 422 423
		 */
		genpd->status = GPD_STATE_POWER_OFF;
		cpuidle_pause_and_lock();
424
		genpd->cpuidle_data->idle_state->disabled = false;
425
		cpuidle_resume_and_unlock();
426
		return 0;
427 428
	}

429
	if (genpd->power_off) {
430 431 432 433
		int ret;

		if (atomic_read(&genpd->sd_count) > 0)
			return -EBUSY;
434

435
		/*
436 437
		 * If sd_count > 0 at this point, one of the subdomains hasn't
		 * managed to call pm_genpd_poweron() for the master yet after
438 439 440 441 442
		 * incrementing it.  In that case pm_genpd_poweron() will wait
		 * for us to drop the lock, so we can call .power_off() and let
		 * the pm_genpd_poweron() restore power for us (this shouldn't
		 * happen very often).
		 */
443
		ret = genpd_power_off(genpd, true);
444 445
		if (ret)
			return ret;
446
	}
447

448
	genpd->status = GPD_STATE_POWER_OFF;
449

450 451 452 453
	list_for_each_entry(link, &genpd->slave_links, slave_node) {
		genpd_sd_counter_dec(link->master);
		genpd_queue_power_off_work(link->master);
	}
454

455
	return 0;
456 457 458 459 460 461 462 463 464 465 466 467
}

/**
 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
 * @work: Work structure used for scheduling the execution of this function.
 */
static void genpd_power_off_work_fn(struct work_struct *work)
{
	struct generic_pm_domain *genpd;

	genpd = container_of(work, struct generic_pm_domain, power_off_work);

468
	mutex_lock(&genpd->lock);
469
	pm_genpd_poweroff(genpd);
470
	mutex_unlock(&genpd->lock);
471 472 473 474 475 476 477 478 479 480 481 482 483
}

/**
 * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
 * @dev: Device to suspend.
 *
 * Carry out a runtime suspend of a device under the assumption that its
 * pm_domain field points to the domain member of an object of type
 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
 */
static int pm_genpd_runtime_suspend(struct device *dev)
{
	struct generic_pm_domain *genpd;
484
	bool (*stop_ok)(struct device *__dev);
485
	int ret;
486 487 488

	dev_dbg(dev, "%s()\n", __func__);

489 490
	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
491 492
		return -EINVAL;

493 494 495 496
	stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
	if (stop_ok && !stop_ok(dev))
		return -EBUSY;

497
	ret = genpd_save_dev(genpd, dev);
498 499
	if (ret)
		return ret;
500

501 502 503 504 505 506
	ret = genpd_stop_dev(genpd, dev);
	if (ret) {
		genpd_restore_dev(genpd, dev, true);
		return ret;
	}

507 508 509 510 511 512 513
	/*
	 * If power.irq_safe is set, this routine will be run with interrupts
	 * off, so it can't use mutexes.
	 */
	if (dev->power.irq_safe)
		return 0;

514
	mutex_lock(&genpd->lock);
515 516 517
	genpd->in_progress++;
	pm_genpd_poweroff(genpd);
	genpd->in_progress--;
518
	mutex_unlock(&genpd->lock);
519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534

	return 0;
}

/**
 * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
 * @dev: Device to resume.
 *
 * Carry out a runtime resume of a device under the assumption that its
 * pm_domain field points to the domain member of an object of type
 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
 */
static int pm_genpd_runtime_resume(struct device *dev)
{
	struct generic_pm_domain *genpd;
	int ret;
535
	bool timed = true;
536 537 538

	dev_dbg(dev, "%s()\n", __func__);

539 540
	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
541 542
		return -EINVAL;

543
	/* If power.irq_safe, the PM domain is never powered off. */
544 545 546 547
	if (dev->power.irq_safe) {
		timed = false;
		goto out;
	}
548

549
	mutex_lock(&genpd->lock);
550
	ret = __pm_genpd_poweron(genpd);
551
	mutex_unlock(&genpd->lock);
552

553 554
	if (ret)
		return ret;
555

556 557 558
 out:
	genpd_start_dev(genpd, dev, timed);
	genpd_restore_dev(genpd, dev, timed);
559

560 561 562
	return 0;
}

563 564 565 566 567 568 569 570
static bool pd_ignore_unused;
static int __init pd_ignore_unused_setup(char *__unused)
{
	pd_ignore_unused = true;
	return 1;
}
__setup("pd_ignore_unused", pd_ignore_unused_setup);

571 572 573 574 575 576 577
/**
 * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
 */
void pm_genpd_poweroff_unused(void)
{
	struct generic_pm_domain *genpd;

578 579 580 581 582
	if (pd_ignore_unused) {
		pr_warn("genpd: Not disabling unused power domains\n");
		return;
	}

583 584 585 586 587 588 589 590
	mutex_lock(&gpd_list_lock);

	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
		genpd_queue_power_off_work(genpd);

	mutex_unlock(&gpd_list_lock);
}

591 592 593 594 595 596 597
static int __init genpd_poweroff_unused(void)
{
	pm_genpd_poweroff_unused();
	return 0;
}
late_initcall(genpd_poweroff_unused);

598 599
#ifdef CONFIG_PM_SLEEP

600 601 602 603
/**
 * pm_genpd_present - Check if the given PM domain has been initialized.
 * @genpd: PM domain to check.
 */
604
static bool pm_genpd_present(const struct generic_pm_domain *genpd)
605
{
606
	const struct generic_pm_domain *gpd;
607 608 609 610 611 612 613 614 615 616 617

	if (IS_ERR_OR_NULL(genpd))
		return false;

	list_for_each_entry(gpd, &gpd_list, gpd_list_node)
		if (gpd == genpd)
			return true;

	return false;
}

618 619 620 621 622 623
static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
				    struct device *dev)
{
	return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
}

624
/**
625
 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
626
 * @genpd: PM domain to power off, if possible.
627
 * @timed: True if latency measurements are allowed.
628 629
 *
 * Check if the given PM domain can be powered off (during system suspend or
630
 * hibernation) and do that if so.  Also, in that case propagate to its masters.
631
 *
632 633 634 635
 * This function is only called in "noirq" and "syscore" stages of system power
 * transitions, so it need not acquire locks (all of the "noirq" callbacks are
 * executed sequentially, so it is guaranteed that it will never run twice in
 * parallel).
636
 */
637 638
static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd,
				   bool timed)
639
{
640
	struct gpd_link *link;
641

642
	if (genpd->status == GPD_STATE_POWER_OFF)
643 644
		return;

645 646
	if (genpd->suspended_count != genpd->device_count
	    || atomic_read(&genpd->sd_count) > 0)
647 648
		return;

649
	genpd_power_off(genpd, timed);
650

651
	genpd->status = GPD_STATE_POWER_OFF;
652 653 654

	list_for_each_entry(link, &genpd->slave_links, slave_node) {
		genpd_sd_counter_dec(link->master);
655
		pm_genpd_sync_poweroff(link->master, timed);
656 657 658
	}
}

659 660 661
/**
 * pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters.
 * @genpd: PM domain to power on.
662
 * @timed: True if latency measurements are allowed.
663
 *
664 665 666 667
 * This function is only called in "noirq" and "syscore" stages of system power
 * transitions, so it need not acquire locks (all of the "noirq" callbacks are
 * executed sequentially, so it is guaranteed that it will never run twice in
 * parallel).
668
 */
669 670
static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd,
				  bool timed)
671 672 673
{
	struct gpd_link *link;

674
	if (genpd->status == GPD_STATE_ACTIVE)
675 676 677
		return;

	list_for_each_entry(link, &genpd->slave_links, slave_node) {
678
		pm_genpd_sync_poweron(link->master, timed);
679 680 681
		genpd_sd_counter_inc(link->master);
	}

682
	genpd_power_on(genpd, timed);
683 684 685 686

	genpd->status = GPD_STATE_ACTIVE;
}

687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709
/**
 * resume_needed - Check whether to resume a device before system suspend.
 * @dev: Device to check.
 * @genpd: PM domain the device belongs to.
 *
 * There are two cases in which a device that can wake up the system from sleep
 * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
 * to wake up the system and it has to remain active for this purpose while the
 * system is in the sleep state and (2) if the device is not enabled to wake up
 * the system from sleep states and it generally doesn't generate wakeup signals
 * by itself (those signals are generated on its behalf by other parts of the
 * system).  In the latter case it may be necessary to reconfigure the device's
 * wakeup settings during system suspend, because it may have been set up to
 * signal remote wakeup from the system's working state as needed by runtime PM.
 * Return 'true' in either of the above cases.
 */
static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
{
	bool active_wakeup;

	if (!device_can_wakeup(dev))
		return false;

710
	active_wakeup = genpd_dev_active_wakeup(genpd, dev);
711 712 713
	return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
}

714 715 716 717 718 719 720 721 722 723 724 725
/**
 * pm_genpd_prepare - Start power transition of a device in a PM domain.
 * @dev: Device to start the transition of.
 *
 * Start a power transition of a device (during a system-wide power transition)
 * under the assumption that its pm_domain field points to the domain member of
 * an object of type struct generic_pm_domain representing a PM domain
 * consisting of I/O devices.
 */
static int pm_genpd_prepare(struct device *dev)
{
	struct generic_pm_domain *genpd;
726
	int ret;
727 728 729 730 731 732 733

	dev_dbg(dev, "%s()\n", __func__);

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;

734 735 736 737 738 739 740 741 742 743
	/*
	 * If a wakeup request is pending for the device, it should be woken up
	 * at this point and a system wakeup event should be reported if it's
	 * set up to wake up the system from sleep states.
	 */
	pm_runtime_get_noresume(dev);
	if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
		pm_wakeup_event(dev, 0);

	if (pm_wakeup_pending()) {
744
		pm_runtime_put(dev);
745 746 747
		return -EBUSY;
	}

748 749 750
	if (resume_needed(dev, genpd))
		pm_runtime_resume(dev);

751
	mutex_lock(&genpd->lock);
752

753 754
	if (genpd->prepared_count++ == 0) {
		genpd->suspended_count = 0;
755
		genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
756
	}
757

758
	mutex_unlock(&genpd->lock);
759 760

	if (genpd->suspend_power_off) {
761
		pm_runtime_put_noidle(dev);
762 763 764 765
		return 0;
	}

	/*
766 767
	 * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
	 * so pm_genpd_poweron() will return immediately, but if the device
768
	 * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need
769
	 * to make it operational.
770
	 */
771
	pm_runtime_resume(dev);
772 773
	__pm_runtime_disable(dev, false);

774 775 776 777 778 779 780 781
	ret = pm_generic_prepare(dev);
	if (ret) {
		mutex_lock(&genpd->lock);

		if (--genpd->prepared_count == 0)
			genpd->suspend_power_off = false;

		mutex_unlock(&genpd->lock);
782
		pm_runtime_enable(dev);
783
	}
784

785
	pm_runtime_put(dev);
786
	return ret;
787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806
}

/**
 * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
 * @dev: Device to suspend.
 *
 * Suspend a device under the assumption that its pm_domain field points to the
 * domain member of an object of type struct generic_pm_domain representing
 * a PM domain consisting of I/O devices.
 */
static int pm_genpd_suspend(struct device *dev)
{
	struct generic_pm_domain *genpd;

	dev_dbg(dev, "%s()\n", __func__);

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;

807
	return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev);
808 809 810
}

/**
811
 * pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain.
812 813 814 815 816 817
 * @dev: Device to suspend.
 *
 * Carry out a late suspend of a device under the assumption that its
 * pm_domain field points to the domain member of an object of type
 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
 */
818
static int pm_genpd_suspend_late(struct device *dev)
819 820 821 822 823 824 825 826 827
{
	struct generic_pm_domain *genpd;

	dev_dbg(dev, "%s()\n", __func__);

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;

828
	return genpd->suspend_power_off ? 0 : pm_generic_suspend_late(dev);
829
}
830

831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846
/**
 * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
 * @dev: Device to suspend.
 *
 * Stop the device and remove power from the domain if all devices in it have
 * been stopped.
 */
static int pm_genpd_suspend_noirq(struct device *dev)
{
	struct generic_pm_domain *genpd;

	dev_dbg(dev, "%s()\n", __func__);

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;
847

848
	if (genpd->suspend_power_off
849
	    || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
850 851
		return 0;

852
	genpd_stop_dev(genpd, dev);
853 854 855 856 857 858 859

	/*
	 * Since all of the "noirq" callbacks are executed sequentially, it is
	 * guaranteed that this function will never run twice in parallel for
	 * the same PM domain, so it is not necessary to use locking here.
	 */
	genpd->suspended_count++;
860
	pm_genpd_sync_poweroff(genpd, true);
861 862 863 864 865

	return 0;
}

/**
866
 * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain.
867 868
 * @dev: Device to resume.
 *
869
 * Restore power to the device's PM domain, if necessary, and start the device.
870 871 872 873 874 875 876 877 878 879 880
 */
static int pm_genpd_resume_noirq(struct device *dev)
{
	struct generic_pm_domain *genpd;

	dev_dbg(dev, "%s()\n", __func__);

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;

881
	if (genpd->suspend_power_off
882
	    || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
883 884 885 886 887 888 889
		return 0;

	/*
	 * Since all of the "noirq" callbacks are executed sequentially, it is
	 * guaranteed that this function will never run twice in parallel for
	 * the same PM domain, so it is not necessary to use locking here.
	 */
890
	pm_genpd_sync_poweron(genpd, true);
891 892
	genpd->suspended_count--;

893
	return genpd_start_dev(genpd, dev, true);
894 895 896
}

/**
897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914
 * pm_genpd_resume_early - Early resume of a device in an I/O PM domain.
 * @dev: Device to resume.
 *
 * Carry out an early resume of a device under the assumption that its
 * pm_domain field points to the domain member of an object of type
 * struct generic_pm_domain representing a power domain consisting of I/O
 * devices.
 */
static int pm_genpd_resume_early(struct device *dev)
{
	struct generic_pm_domain *genpd;

	dev_dbg(dev, "%s()\n", __func__);

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;

915
	return genpd->suspend_power_off ? 0 : pm_generic_resume_early(dev);
916 917 918 919
}

/**
 * pm_genpd_resume - Resume of device in an I/O PM domain.
920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935
 * @dev: Device to resume.
 *
 * Resume a device under the assumption that its pm_domain field points to the
 * domain member of an object of type struct generic_pm_domain representing
 * a power domain consisting of I/O devices.
 */
static int pm_genpd_resume(struct device *dev)
{
	struct generic_pm_domain *genpd;

	dev_dbg(dev, "%s()\n", __func__);

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;

936
	return genpd->suspend_power_off ? 0 : pm_generic_resume(dev);
937 938 939
}

/**
940
 * pm_genpd_freeze - Freezing a device in an I/O PM domain.
941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956
 * @dev: Device to freeze.
 *
 * Freeze a device under the assumption that its pm_domain field points to the
 * domain member of an object of type struct generic_pm_domain representing
 * a power domain consisting of I/O devices.
 */
static int pm_genpd_freeze(struct device *dev)
{
	struct generic_pm_domain *genpd;

	dev_dbg(dev, "%s()\n", __func__);

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;

957
	return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev);
958 959 960
}

/**
961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978
 * pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain.
 * @dev: Device to freeze.
 *
 * Carry out a late freeze of a device under the assumption that its
 * pm_domain field points to the domain member of an object of type
 * struct generic_pm_domain representing a power domain consisting of I/O
 * devices.
 */
static int pm_genpd_freeze_late(struct device *dev)
{
	struct generic_pm_domain *genpd;

	dev_dbg(dev, "%s()\n", __func__);

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;

979
	return genpd->suspend_power_off ? 0 : pm_generic_freeze_late(dev);
980 981 982 983
}

/**
 * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000
 * @dev: Device to freeze.
 *
 * Carry out a late freeze of a device under the assumption that its
 * pm_domain field points to the domain member of an object of type
 * struct generic_pm_domain representing a power domain consisting of I/O
 * devices.
 */
static int pm_genpd_freeze_noirq(struct device *dev)
{
	struct generic_pm_domain *genpd;

	dev_dbg(dev, "%s()\n", __func__);

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;

1001
	return genpd->suspend_power_off ? 0 : genpd_stop_dev(genpd, dev);
1002
}
1003

1004 1005 1006 1007 1008 1009 1010 1011 1012 1013
/**
 * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
 * @dev: Device to thaw.
 *
 * Start the device, unless power has been removed from the domain already
 * before the system transition.
 */
static int pm_genpd_thaw_noirq(struct device *dev)
{
	struct generic_pm_domain *genpd;
1014

1015
	dev_dbg(dev, "%s()\n", __func__);
1016

1017 1018 1019 1020
	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;

1021
	return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev, true);
1022 1023 1024
}

/**
1025
 * pm_genpd_thaw_early - Early thaw of device in an I/O PM domain.
1026 1027 1028 1029 1030 1031 1032
 * @dev: Device to thaw.
 *
 * Carry out an early thaw of a device under the assumption that its
 * pm_domain field points to the domain member of an object of type
 * struct generic_pm_domain representing a power domain consisting of I/O
 * devices.
 */
1033
static int pm_genpd_thaw_early(struct device *dev)
1034 1035 1036 1037 1038 1039 1040 1041 1042
{
	struct generic_pm_domain *genpd;

	dev_dbg(dev, "%s()\n", __func__);

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;

1043
	return genpd->suspend_power_off ? 0 : pm_generic_thaw_early(dev);
1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063
}

/**
 * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
 * @dev: Device to thaw.
 *
 * Thaw a device under the assumption that its pm_domain field points to the
 * domain member of an object of type struct generic_pm_domain representing
 * a power domain consisting of I/O devices.
 */
static int pm_genpd_thaw(struct device *dev)
{
	struct generic_pm_domain *genpd;

	dev_dbg(dev, "%s()\n", __func__);

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;

1064
	return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev);
1065 1066 1067
}

/**
1068
 * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1069 1070
 * @dev: Device to resume.
 *
1071 1072
 * Make sure the domain will be in the same power state as before the
 * hibernation the system is resuming from and start the device if necessary.
1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087
 */
static int pm_genpd_restore_noirq(struct device *dev)
{
	struct generic_pm_domain *genpd;

	dev_dbg(dev, "%s()\n", __func__);

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;

	/*
	 * Since all of the "noirq" callbacks are executed sequentially, it is
	 * guaranteed that this function will never run twice in parallel for
	 * the same PM domain, so it is not necessary to use locking here.
1088 1089 1090
	 *
	 * At this point suspended_count == 0 means we are being run for the
	 * first time for the given domain in the present cycle.
1091
	 */
1092
	if (genpd->suspended_count++ == 0) {
1093
		/*
1094
		 * The boot kernel might put the domain into arbitrary state,
1095 1096
		 * so make it appear as powered off to pm_genpd_sync_poweron(),
		 * so that it tries to power it on in case it was really off.
1097
		 */
1098 1099 1100 1101 1102 1103
		genpd->status = GPD_STATE_POWER_OFF;
		if (genpd->suspend_power_off) {
			/*
			 * If the domain was off before the hibernation, make
			 * sure it will be off going forward.
			 */
1104
			genpd_power_off(genpd, true);
1105 1106 1107

			return 0;
		}
1108 1109
	}

1110 1111 1112
	if (genpd->suspend_power_off)
		return 0;

1113
	pm_genpd_sync_poweron(genpd, true);
1114

1115
	return genpd_start_dev(genpd, dev, true);
1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147
}

/**
 * pm_genpd_complete - Complete power transition of a device in a power domain.
 * @dev: Device to complete the transition of.
 *
 * Complete a power transition of a device (during a system-wide power
 * transition) under the assumption that its pm_domain field points to the
 * domain member of an object of type struct generic_pm_domain representing
 * a power domain consisting of I/O devices.
 */
static void pm_genpd_complete(struct device *dev)
{
	struct generic_pm_domain *genpd;
	bool run_complete;

	dev_dbg(dev, "%s()\n", __func__);

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return;

	mutex_lock(&genpd->lock);

	run_complete = !genpd->suspend_power_off;
	if (--genpd->prepared_count == 0)
		genpd->suspend_power_off = false;

	mutex_unlock(&genpd->lock);

	if (run_complete) {
		pm_generic_complete(dev);
1148
		pm_runtime_set_active(dev);
1149
		pm_runtime_enable(dev);
1150
		pm_request_idle(dev);
1151 1152 1153
	}
}

1154
/**
1155
 * genpd_syscore_switch - Switch power during system core suspend or resume.
1156 1157 1158 1159 1160
 * @dev: Device that normally is marked as "always on" to switch power for.
 *
 * This routine may only be called during the system core (syscore) suspend or
 * resume phase for devices whose "always on" flags are set.
 */
1161
static void genpd_syscore_switch(struct device *dev, bool suspend)
1162 1163 1164 1165 1166 1167 1168 1169 1170
{
	struct generic_pm_domain *genpd;

	genpd = dev_to_genpd(dev);
	if (!pm_genpd_present(genpd))
		return;

	if (suspend) {
		genpd->suspended_count++;
1171
		pm_genpd_sync_poweroff(genpd, false);
1172
	} else {
1173
		pm_genpd_sync_poweron(genpd, false);
1174 1175 1176
		genpd->suspended_count--;
	}
}
1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188

void pm_genpd_syscore_poweroff(struct device *dev)
{
	genpd_syscore_switch(dev, true);
}
EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff);

void pm_genpd_syscore_poweron(struct device *dev)
{
	genpd_syscore_switch(dev, false);
}
EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
1189

1190
#else /* !CONFIG_PM_SLEEP */
1191 1192 1193

#define pm_genpd_prepare		NULL
#define pm_genpd_suspend		NULL
1194
#define pm_genpd_suspend_late		NULL
1195
#define pm_genpd_suspend_noirq		NULL
1196
#define pm_genpd_resume_early		NULL
1197 1198 1199
#define pm_genpd_resume_noirq		NULL
#define pm_genpd_resume			NULL
#define pm_genpd_freeze			NULL
1200
#define pm_genpd_freeze_late		NULL
1201
#define pm_genpd_freeze_noirq		NULL
1202
#define pm_genpd_thaw_early		NULL
1203 1204 1205 1206 1207 1208 1209
#define pm_genpd_thaw_noirq		NULL
#define pm_genpd_thaw			NULL
#define pm_genpd_restore_noirq		NULL
#define pm_genpd_complete		NULL

#endif /* CONFIG_PM_SLEEP */

1210 1211 1212
static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
					struct generic_pm_domain *genpd,
					struct gpd_timing_data *td)
1213 1214
{
	struct generic_pm_domain_data *gpd_data;
1215 1216 1217 1218 1219
	int ret;

	ret = dev_pm_get_subsys_data(dev);
	if (ret)
		return ERR_PTR(ret);
1220 1221

	gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1222 1223 1224 1225
	if (!gpd_data) {
		ret = -ENOMEM;
		goto err_put;
	}
1226

1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246
	if (td)
		gpd_data->td = *td;

	gpd_data->base.dev = dev;
	gpd_data->td.constraint_changed = true;
	gpd_data->td.effective_constraint_ns = -1;
	gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;

	spin_lock_irq(&dev->power.lock);

	if (dev->power.subsys_data->domain_data) {
		ret = -EINVAL;
		goto err_free;
	}

	dev->power.subsys_data->domain_data = &gpd_data->base;
	dev->pm_domain = &genpd->domain;

	spin_unlock_irq(&dev->power.lock);

1247
	return gpd_data;
1248

1249 1250 1251
 err_free:
	spin_unlock_irq(&dev->power.lock);
	kfree(gpd_data);
1252 1253 1254
 err_put:
	dev_pm_put_subsys_data(dev);
	return ERR_PTR(ret);
1255 1256
}

1257 1258
static void genpd_free_dev_data(struct device *dev,
				struct generic_pm_domain_data *gpd_data)
1259
{
1260 1261 1262 1263 1264 1265 1266
	spin_lock_irq(&dev->power.lock);

	dev->pm_domain = NULL;
	dev->power.subsys_data->domain_data = NULL;

	spin_unlock_irq(&dev->power.lock);

1267
	kfree(gpd_data);
1268
	dev_pm_put_subsys_data(dev);
1269 1270
}

1271
/**
1272
 * __pm_genpd_add_device - Add a device to an I/O PM domain.
1273 1274
 * @genpd: PM domain to add the device to.
 * @dev: Device to be added.
1275
 * @td: Set of PM QoS timing parameters to attach to the device.
1276
 */
1277 1278
int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
			  struct gpd_timing_data *td)
1279
{
1280
	struct generic_pm_domain_data *gpd_data;
1281 1282 1283 1284 1285 1286 1287
	int ret = 0;

	dev_dbg(dev, "%s()\n", __func__);

	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
		return -EINVAL;

1288
	gpd_data = genpd_alloc_dev_data(dev, genpd, td);
1289 1290
	if (IS_ERR(gpd_data))
		return PTR_ERR(gpd_data);
1291

1292
	mutex_lock(&genpd->lock);
1293

1294 1295 1296 1297 1298
	if (genpd->prepared_count > 0) {
		ret = -EAGAIN;
		goto out;
	}

1299 1300 1301
	ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
	if (ret)
		goto out;
1302

1303 1304 1305
	genpd->device_count++;
	genpd->max_off_time_changed = true;

1306
	list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1307

1308
 out:
1309
	mutex_unlock(&genpd->lock);
1310

1311 1312 1313 1314
	if (ret)
		genpd_free_dev_data(dev, gpd_data);
	else
		dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1315

1316 1317 1318
	return ret;
}

1319 1320 1321 1322 1323 1324 1325 1326 1327
/**
 * __pm_genpd_name_add_device - Find I/O PM domain and add a device to it.
 * @domain_name: Name of the PM domain to add the device to.
 * @dev: Device to be added.
 * @td: Set of PM QoS timing parameters to attach to the device.
 */
int __pm_genpd_name_add_device(const char *domain_name, struct device *dev,
			       struct gpd_timing_data *td)
{
1328
	return __pm_genpd_add_device(pm_genpd_lookup_name(domain_name), dev, td);
1329 1330
}

1331 1332 1333 1334 1335 1336 1337 1338
/**
 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
 * @genpd: PM domain to remove the device from.
 * @dev: Device to be removed.
 */
int pm_genpd_remove_device(struct generic_pm_domain *genpd,
			   struct device *dev)
{
1339
	struct generic_pm_domain_data *gpd_data;
1340
	struct pm_domain_data *pdd;
1341
	int ret = 0;
1342 1343 1344

	dev_dbg(dev, "%s()\n", __func__);

1345
	if (!genpd || genpd != pm_genpd_lookup_dev(dev))
1346 1347
		return -EINVAL;

1348 1349 1350 1351 1352
	/* The above validation also means we have existing domain_data. */
	pdd = dev->power.subsys_data->domain_data;
	gpd_data = to_gpd_data(pdd);
	dev_pm_qos_remove_notifier(dev, &gpd_data->nb);

1353
	mutex_lock(&genpd->lock);
1354

1355 1356 1357 1358 1359
	if (genpd->prepared_count > 0) {
		ret = -EAGAIN;
		goto out;
	}

1360 1361 1362
	genpd->device_count--;
	genpd->max_off_time_changed = true;

1363
	if (genpd->detach_dev)
1364
		genpd->detach_dev(genpd, dev);
1365

1366
	list_del_init(&pdd->list_node);
1367

1368
	mutex_unlock(&genpd->lock);
1369

1370
	genpd_free_dev_data(dev, gpd_data);
1371

1372
	return 0;
1373

1374
 out:
1375
	mutex_unlock(&genpd->lock);
1376
	dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1377 1378 1379 1380 1381 1382 1383

	return ret;
}

/**
 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
 * @genpd: Master PM domain to add the subdomain to.
1384
 * @subdomain: Subdomain to be added.
1385 1386
 */
int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1387
			   struct generic_pm_domain *subdomain)
1388
{
1389
	struct gpd_link *link;
1390 1391
	int ret = 0;

1392 1393
	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
	    || genpd == subdomain)
1394 1395
		return -EINVAL;

1396
	mutex_lock(&genpd->lock);
1397
	mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1398

1399
	if (genpd->status == GPD_STATE_POWER_OFF
1400
	    &&  subdomain->status != GPD_STATE_POWER_OFF) {
1401 1402 1403 1404
		ret = -EINVAL;
		goto out;
	}

1405
	list_for_each_entry(link, &genpd->master_links, master_node) {
1406
		if (link->slave == subdomain && link->master == genpd) {
1407 1408 1409 1410 1411
			ret = -EINVAL;
			goto out;
		}
	}

1412 1413 1414 1415 1416 1417 1418
	link = kzalloc(sizeof(*link), GFP_KERNEL);
	if (!link) {
		ret = -ENOMEM;
		goto out;
	}
	link->master = genpd;
	list_add_tail(&link->master_node, &genpd->master_links);
1419 1420 1421
	link->slave = subdomain;
	list_add_tail(&link->slave_node, &subdomain->slave_links);
	if (subdomain->status != GPD_STATE_POWER_OFF)
1422
		genpd_sd_counter_inc(genpd);
1423 1424

 out:
1425
	mutex_unlock(&subdomain->lock);
1426
	mutex_unlock(&genpd->lock);
1427 1428 1429 1430

	return ret;
}

1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459
/**
 * pm_genpd_add_subdomain_names - Add a subdomain to an I/O PM domain.
 * @master_name: Name of the master PM domain to add the subdomain to.
 * @subdomain_name: Name of the subdomain to be added.
 */
int pm_genpd_add_subdomain_names(const char *master_name,
				 const char *subdomain_name)
{
	struct generic_pm_domain *master = NULL, *subdomain = NULL, *gpd;

	if (IS_ERR_OR_NULL(master_name) || IS_ERR_OR_NULL(subdomain_name))
		return -EINVAL;

	mutex_lock(&gpd_list_lock);
	list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
		if (!master && !strcmp(gpd->name, master_name))
			master = gpd;

		if (!subdomain && !strcmp(gpd->name, subdomain_name))
			subdomain = gpd;

		if (master && subdomain)
			break;
	}
	mutex_unlock(&gpd_list_lock);

	return pm_genpd_add_subdomain(master, subdomain);
}

1460 1461 1462
/**
 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
 * @genpd: Master PM domain to remove the subdomain from.
1463
 * @subdomain: Subdomain to be removed.
1464 1465
 */
int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1466
			      struct generic_pm_domain *subdomain)
1467
{
1468
	struct gpd_link *link;
1469 1470
	int ret = -EINVAL;

1471
	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1472 1473
		return -EINVAL;

1474
	mutex_lock(&genpd->lock);
1475

1476 1477
	list_for_each_entry(link, &genpd->master_links, master_node) {
		if (link->slave != subdomain)
1478 1479 1480 1481
			continue;

		mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);

1482 1483 1484
		list_del(&link->master_node);
		list_del(&link->slave_node);
		kfree(link);
1485
		if (subdomain->status != GPD_STATE_POWER_OFF)
1486 1487 1488 1489 1490 1491 1492 1493
			genpd_sd_counter_dec(genpd);

		mutex_unlock(&subdomain->lock);

		ret = 0;
		break;
	}

1494
	mutex_unlock(&genpd->lock);
1495 1496 1497 1498

	return ret;
}

1499 1500 1501 1502 1503 1504 1505 1506 1507 1508
/**
 * pm_genpd_attach_cpuidle - Connect the given PM domain with cpuidle.
 * @genpd: PM domain to be connected with cpuidle.
 * @state: cpuidle state this domain can disable/enable.
 *
 * Make a PM domain behave as though it contained a CPU core, that is, instead
 * of calling its power down routine it will enable the given cpuidle state so
 * that the cpuidle subsystem can power it down (if possible and desirable).
 */
int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
1509 1510
{
	struct cpuidle_driver *cpuidle_drv;
1511
	struct gpd_cpuidle_data *cpuidle_data;
1512 1513 1514 1515 1516 1517
	struct cpuidle_state *idle_state;
	int ret = 0;

	if (IS_ERR_OR_NULL(genpd) || state < 0)
		return -EINVAL;

1518
	mutex_lock(&genpd->lock);
1519

1520
	if (genpd->cpuidle_data) {
1521 1522 1523
		ret = -EEXIST;
		goto out;
	}
1524 1525
	cpuidle_data = kzalloc(sizeof(*cpuidle_data), GFP_KERNEL);
	if (!cpuidle_data) {
1526 1527 1528 1529 1530 1531
		ret = -ENOMEM;
		goto out;
	}
	cpuidle_drv = cpuidle_driver_ref();
	if (!cpuidle_drv) {
		ret = -ENODEV;
1532
		goto err_drv;
1533 1534 1535 1536 1537 1538 1539 1540 1541 1542
	}
	if (cpuidle_drv->state_count <= state) {
		ret = -EINVAL;
		goto err;
	}
	idle_state = &cpuidle_drv->states[state];
	if (!idle_state->disabled) {
		ret = -EAGAIN;
		goto err;
	}
1543 1544 1545
	cpuidle_data->idle_state = idle_state;
	cpuidle_data->saved_exit_latency = idle_state->exit_latency;
	genpd->cpuidle_data = cpuidle_data;
1546 1547 1548
	genpd_recalc_cpu_exit_latency(genpd);

 out:
1549
	mutex_unlock(&genpd->lock);
1550 1551 1552 1553
	return ret;

 err:
	cpuidle_driver_unref();
1554 1555

 err_drv:
1556
	kfree(cpuidle_data);
1557 1558 1559
	goto out;
}

1560 1561 1562 1563 1564 1565 1566 1567 1568 1569
/**
 * pm_genpd_name_attach_cpuidle - Find PM domain and connect cpuidle to it.
 * @name: Name of the domain to connect to cpuidle.
 * @state: cpuidle state this domain can manipulate.
 */
int pm_genpd_name_attach_cpuidle(const char *name, int state)
{
	return pm_genpd_attach_cpuidle(pm_genpd_lookup_name(name), state);
}

1570 1571 1572 1573 1574 1575 1576 1577
/**
 * pm_genpd_detach_cpuidle - Remove the cpuidle connection from a PM domain.
 * @genpd: PM domain to remove the cpuidle connection from.
 *
 * Remove the cpuidle connection set up by pm_genpd_attach_cpuidle() from the
 * given PM domain.
 */
int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
1578
{
1579
	struct gpd_cpuidle_data *cpuidle_data;
1580 1581 1582 1583 1584 1585
	struct cpuidle_state *idle_state;
	int ret = 0;

	if (IS_ERR_OR_NULL(genpd))
		return -EINVAL;

1586
	mutex_lock(&genpd->lock);
1587

1588 1589
	cpuidle_data = genpd->cpuidle_data;
	if (!cpuidle_data) {
1590 1591 1592
		ret = -ENODEV;
		goto out;
	}
1593
	idle_state = cpuidle_data->idle_state;
1594 1595 1596 1597
	if (!idle_state->disabled) {
		ret = -EAGAIN;
		goto out;
	}
1598
	idle_state->exit_latency = cpuidle_data->saved_exit_latency;
1599
	cpuidle_driver_unref();
1600 1601
	genpd->cpuidle_data = NULL;
	kfree(cpuidle_data);
1602 1603

 out:
1604
	mutex_unlock(&genpd->lock);
1605 1606 1607
	return ret;
}

1608 1609 1610 1611 1612 1613 1614 1615 1616
/**
 * pm_genpd_name_detach_cpuidle - Find PM domain and disconnect cpuidle from it.
 * @name: Name of the domain to disconnect cpuidle from.
 */
int pm_genpd_name_detach_cpuidle(const char *name)
{
	return pm_genpd_detach_cpuidle(pm_genpd_lookup_name(name));
}

1617 1618
/* Default device callbacks for generic PM domains. */

1619
/**
1620
 * pm_genpd_default_save_state - Default "save device state" for PM domains.
1621 1622 1623 1624 1625 1626
 * @dev: Device to handle.
 */
static int pm_genpd_default_save_state(struct device *dev)
{
	int (*cb)(struct device *__dev);

1627 1628 1629 1630 1631 1632 1633 1634
	if (dev->type && dev->type->pm)
		cb = dev->type->pm->runtime_suspend;
	else if (dev->class && dev->class->pm)
		cb = dev->class->pm->runtime_suspend;
	else if (dev->bus && dev->bus->pm)
		cb = dev->bus->pm->runtime_suspend;
	else
		cb = NULL;
1635

1636 1637 1638 1639
	if (!cb && dev->driver && dev->driver->pm)
		cb = dev->driver->pm->runtime_suspend;

	return cb ? cb(dev) : 0;
1640 1641 1642
}

/**
1643
 * pm_genpd_default_restore_state - Default PM domains "restore device state".
1644 1645 1646 1647 1648 1649
 * @dev: Device to handle.
 */
static int pm_genpd_default_restore_state(struct device *dev)
{
	int (*cb)(struct device *__dev);

1650 1651 1652 1653 1654 1655 1656 1657
	if (dev->type && dev->type->pm)
		cb = dev->type->pm->runtime_resume;
	else if (dev->class && dev->class->pm)
		cb = dev->class->pm->runtime_resume;
	else if (dev->bus && dev->bus->pm)
		cb = dev->bus->pm->runtime_resume;
	else
		cb = NULL;
1658

1659 1660 1661 1662
	if (!cb && dev->driver && dev->driver->pm)
		cb = dev->driver->pm->runtime_resume;

	return cb ? cb(dev) : 0;
1663 1664
}

1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676
/**
 * pm_genpd_init - Initialize a generic I/O PM domain object.
 * @genpd: PM domain object to initialize.
 * @gov: PM domain governor to associate with the domain (may be NULL).
 * @is_off: Initial value of the domain's power_is_off field.
 */
void pm_genpd_init(struct generic_pm_domain *genpd,
		   struct dev_power_governor *gov, bool is_off)
{
	if (IS_ERR_OR_NULL(genpd))
		return;

1677 1678
	INIT_LIST_HEAD(&genpd->master_links);
	INIT_LIST_HEAD(&genpd->slave_links);
1679 1680 1681 1682 1683
	INIT_LIST_HEAD(&genpd->dev_list);
	mutex_init(&genpd->lock);
	genpd->gov = gov;
	INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
	genpd->in_progress = 0;
1684
	atomic_set(&genpd->sd_count, 0);
1685
	genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1686
	genpd->device_count = 0;
1687
	genpd->max_off_time_ns = -1;
1688
	genpd->max_off_time_changed = true;
1689 1690
	genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
	genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
1691 1692
	genpd->domain.ops.prepare = pm_genpd_prepare;
	genpd->domain.ops.suspend = pm_genpd_suspend;
1693
	genpd->domain.ops.suspend_late = pm_genpd_suspend_late;
1694 1695
	genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
	genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
1696
	genpd->domain.ops.resume_early = pm_genpd_resume_early;
1697 1698
	genpd->domain.ops.resume = pm_genpd_resume;
	genpd->domain.ops.freeze = pm_genpd_freeze;
1699
	genpd->domain.ops.freeze_late = pm_genpd_freeze_late;
1700 1701
	genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
	genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
1702
	genpd->domain.ops.thaw_early = pm_genpd_thaw_early;
1703
	genpd->domain.ops.thaw = pm_genpd_thaw;
1704
	genpd->domain.ops.poweroff = pm_genpd_suspend;
1705
	genpd->domain.ops.poweroff_late = pm_genpd_suspend_late;
1706
	genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
1707
	genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
1708
	genpd->domain.ops.restore_early = pm_genpd_resume_early;
1709
	genpd->domain.ops.restore = pm_genpd_resume;
1710
	genpd->domain.ops.complete = pm_genpd_complete;
1711 1712
	genpd->dev_ops.save_state = pm_genpd_default_save_state;
	genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
1713 1714 1715 1716 1717 1718

	if (genpd->flags & GENPD_FLAG_PM_CLK) {
		genpd->dev_ops.stop = pm_clk_suspend;
		genpd->dev_ops.start = pm_clk_resume;
	}

1719 1720 1721 1722
	mutex_lock(&gpd_list_lock);
	list_add(&genpd->gpd_list_node, &gpd_list);
	mutex_unlock(&gpd_list_lock);
}
1723
EXPORT_SYMBOL_GPL(pm_genpd_init);
1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872

#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
/*
 * Device Tree based PM domain providers.
 *
 * The code below implements generic device tree based PM domain providers that
 * bind device tree nodes with generic PM domains registered in the system.
 *
 * Any driver that registers generic PM domains and needs to support binding of
 * devices to these domains is supposed to register a PM domain provider, which
 * maps a PM domain specifier retrieved from the device tree to a PM domain.
 *
 * Two simple mapping functions have been provided for convenience:
 *  - __of_genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
 *  - __of_genpd_xlate_onecell() for mapping of multiple PM domains per node by
 *    index.
 */

/**
 * struct of_genpd_provider - PM domain provider registration structure
 * @link: Entry in global list of PM domain providers
 * @node: Pointer to device tree node of PM domain provider
 * @xlate: Provider-specific xlate callback mapping a set of specifier cells
 *         into a PM domain.
 * @data: context pointer to be passed into @xlate callback
 */
struct of_genpd_provider {
	struct list_head link;
	struct device_node *node;
	genpd_xlate_t xlate;
	void *data;
};

/* List of registered PM domain providers. */
static LIST_HEAD(of_genpd_providers);
/* Mutex to protect the list above. */
static DEFINE_MUTEX(of_genpd_mutex);

/**
 * __of_genpd_xlate_simple() - Xlate function for direct node-domain mapping
 * @genpdspec: OF phandle args to map into a PM domain
 * @data: xlate function private data - pointer to struct generic_pm_domain
 *
 * This is a generic xlate function that can be used to model PM domains that
 * have their own device tree nodes. The private data of xlate function needs
 * to be a valid pointer to struct generic_pm_domain.
 */
struct generic_pm_domain *__of_genpd_xlate_simple(
					struct of_phandle_args *genpdspec,
					void *data)
{
	if (genpdspec->args_count != 0)
		return ERR_PTR(-EINVAL);
	return data;
}
EXPORT_SYMBOL_GPL(__of_genpd_xlate_simple);

/**
 * __of_genpd_xlate_onecell() - Xlate function using a single index.
 * @genpdspec: OF phandle args to map into a PM domain
 * @data: xlate function private data - pointer to struct genpd_onecell_data
 *
 * This is a generic xlate function that can be used to model simple PM domain
 * controllers that have one device tree node and provide multiple PM domains.
 * A single cell is used as an index into an array of PM domains specified in
 * the genpd_onecell_data struct when registering the provider.
 */
struct generic_pm_domain *__of_genpd_xlate_onecell(
					struct of_phandle_args *genpdspec,
					void *data)
{
	struct genpd_onecell_data *genpd_data = data;
	unsigned int idx = genpdspec->args[0];

	if (genpdspec->args_count != 1)
		return ERR_PTR(-EINVAL);

	if (idx >= genpd_data->num_domains) {
		pr_err("%s: invalid domain index %u\n", __func__, idx);
		return ERR_PTR(-EINVAL);
	}

	if (!genpd_data->domains[idx])
		return ERR_PTR(-ENOENT);

	return genpd_data->domains[idx];
}
EXPORT_SYMBOL_GPL(__of_genpd_xlate_onecell);

/**
 * __of_genpd_add_provider() - Register a PM domain provider for a node
 * @np: Device node pointer associated with the PM domain provider.
 * @xlate: Callback for decoding PM domain from phandle arguments.
 * @data: Context pointer for @xlate callback.
 */
int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
			void *data)
{
	struct of_genpd_provider *cp;

	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
	if (!cp)
		return -ENOMEM;

	cp->node = of_node_get(np);
	cp->data = data;
	cp->xlate = xlate;

	mutex_lock(&of_genpd_mutex);
	list_add(&cp->link, &of_genpd_providers);
	mutex_unlock(&of_genpd_mutex);
	pr_debug("Added domain provider from %s\n", np->full_name);

	return 0;
}
EXPORT_SYMBOL_GPL(__of_genpd_add_provider);

/**
 * of_genpd_del_provider() - Remove a previously registered PM domain provider
 * @np: Device node pointer associated with the PM domain provider
 */
void of_genpd_del_provider(struct device_node *np)
{
	struct of_genpd_provider *cp;

	mutex_lock(&of_genpd_mutex);
	list_for_each_entry(cp, &of_genpd_providers, link) {
		if (cp->node == np) {
			list_del(&cp->link);
			of_node_put(cp->node);
			kfree(cp);
			break;
		}
	}
	mutex_unlock(&of_genpd_mutex);
}
EXPORT_SYMBOL_GPL(of_genpd_del_provider);

/**
 * of_genpd_get_from_provider() - Look-up PM domain
 * @genpdspec: OF phandle args to use for look-up
 *
 * Looks for a PM domain provider under the node specified by @genpdspec and if
 * found, uses xlate function of the provider to map phandle args to a PM
 * domain.
 *
 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
 * on failure.
 */
1873
struct generic_pm_domain *of_genpd_get_from_provider(
1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892
					struct of_phandle_args *genpdspec)
{
	struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
	struct of_genpd_provider *provider;

	mutex_lock(&of_genpd_mutex);

	/* Check if we have such a provider in our array */
	list_for_each_entry(provider, &of_genpd_providers, link) {
		if (provider->node == genpdspec->np)
			genpd = provider->xlate(genpdspec, provider->data);
		if (!IS_ERR(genpd))
			break;
	}

	mutex_unlock(&of_genpd_mutex);

	return genpd;
}
1893
EXPORT_SYMBOL_GPL(of_genpd_get_from_provider);
1894 1895 1896

/**
 * genpd_dev_pm_detach - Detach a device from its PM domain.
1897
 * @dev: Device to detach.
1898 1899 1900 1901 1902 1903 1904
 * @power_off: Currently not used
 *
 * Try to locate a corresponding generic PM domain, which the device was
 * attached to previously. If such is found, the device is detached from it.
 */
static void genpd_dev_pm_detach(struct device *dev, bool power_off)
{
1905
	struct generic_pm_domain *pd;
1906
	unsigned int i;
1907 1908
	int ret = 0;

1909
	pd = pm_genpd_lookup_dev(dev);
1910 1911 1912 1913 1914
	if (!pd)
		return;

	dev_dbg(dev, "removing from PM domain %s\n", pd->name);

1915
	for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
1916 1917 1918
		ret = pm_genpd_remove_device(pd, dev);
		if (ret != -EAGAIN)
			break;
1919 1920

		mdelay(i);
1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933
		cond_resched();
	}

	if (ret < 0) {
		dev_err(dev, "failed to remove from PM domain %s: %d",
			pd->name, ret);
		return;
	}

	/* Check if PM domain can be powered off after removing this device. */
	genpd_queue_power_off_work(pd);
}

1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944
static void genpd_dev_pm_sync(struct device *dev)
{
	struct generic_pm_domain *pd;

	pd = dev_to_genpd(dev);
	if (IS_ERR(pd))
		return;

	genpd_queue_power_off_work(pd);
}

1945 1946 1947 1948 1949 1950 1951 1952 1953 1954
/**
 * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
 * @dev: Device to attach.
 *
 * Parse device's OF node to find a PM domain specifier. If such is found,
 * attaches the device to retrieved pm_domain ops.
 *
 * Both generic and legacy Samsung-specific DT bindings are supported to keep
 * backwards compatibility with existing DTBs.
 *
1955 1956 1957 1958
 * Returns 0 on successfully attached PM domain or negative error code. Note
 * that if a power-domain exists for the device, but it cannot be found or
 * turned on, then return -EPROBE_DEFER to ensure that the device is not
 * probed and to re-try again later.
1959 1960 1961 1962 1963
 */
int genpd_dev_pm_attach(struct device *dev)
{
	struct of_phandle_args pd_args;
	struct generic_pm_domain *pd;
1964
	unsigned int i;
1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994
	int ret;

	if (!dev->of_node)
		return -ENODEV;

	if (dev->pm_domain)
		return -EEXIST;

	ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
					"#power-domain-cells", 0, &pd_args);
	if (ret < 0) {
		if (ret != -ENOENT)
			return ret;

		/*
		 * Try legacy Samsung-specific bindings
		 * (for backwards compatibility of DT ABI)
		 */
		pd_args.args_count = 0;
		pd_args.np = of_parse_phandle(dev->of_node,
						"samsung,power-domain", 0);
		if (!pd_args.np)
			return -ENOENT;
	}

	pd = of_genpd_get_from_provider(&pd_args);
	if (IS_ERR(pd)) {
		dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
			__func__, PTR_ERR(pd));
		of_node_put(dev->of_node);
1995
		return -EPROBE_DEFER;
1996 1997 1998 1999
	}

	dev_dbg(dev, "adding to PM domain %s\n", pd->name);

2000
	for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
2001 2002 2003
		ret = pm_genpd_add_device(pd, dev);
		if (ret != -EAGAIN)
			break;
2004 2005

		mdelay(i);
2006 2007 2008 2009 2010 2011 2012
		cond_resched();
	}

	if (ret < 0) {
		dev_err(dev, "failed to add to PM domain %s: %d",
			pd->name, ret);
		of_node_put(dev->of_node);
2013
		goto out;
2014 2015 2016
	}

	dev->pm_domain->detach = genpd_dev_pm_detach;
2017
	dev->pm_domain->sync = genpd_dev_pm_sync;
2018
	ret = pm_genpd_poweron(pd);
2019

2020 2021
out:
	return ret ? -EPROBE_DEFER : 0;
2022 2023
}
EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
2024
#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039


/***        debugfs support        ***/

#ifdef CONFIG_PM_ADVANCED_DEBUG
#include <linux/pm.h>
#include <linux/device.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/kobject.h>
static struct dentry *pm_genpd_debugfs_dir;

/*
 * TODO: This function is a slightly modified version of rtpm_status_show
2040
 * from sysfs.c, so generalize it.
2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064
 */
static void rtpm_status_str(struct seq_file *s, struct device *dev)
{
	static const char * const status_lookup[] = {
		[RPM_ACTIVE] = "active",
		[RPM_RESUMING] = "resuming",
		[RPM_SUSPENDED] = "suspended",
		[RPM_SUSPENDING] = "suspending"
	};
	const char *p = "";

	if (dev->power.runtime_error)
		p = "error";
	else if (dev->power.disable_depth)
		p = "unsupported";
	else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
		p = status_lookup[dev->power.runtime_status];
	else
		WARN_ON(1);

	seq_puts(s, p);
}

static int pm_genpd_summary_one(struct seq_file *s,
2065
				struct generic_pm_domain *genpd)
2066 2067 2068 2069 2070 2071 2072 2073 2074 2075
{
	static const char * const status_lookup[] = {
		[GPD_STATE_ACTIVE] = "on",
		[GPD_STATE_POWER_OFF] = "off"
	};
	struct pm_domain_data *pm_data;
	const char *kobj_path;
	struct gpd_link *link;
	int ret;

2076
	ret = mutex_lock_interruptible(&genpd->lock);
2077 2078 2079
	if (ret)
		return -ERESTARTSYS;

2080
	if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
2081
		goto exit;
2082
	seq_printf(s, "%-30s  %-15s ", genpd->name, status_lookup[genpd->status]);
2083 2084 2085 2086

	/*
	 * Modifications on the list require holding locks on both
	 * master and slave, so we are safe.
2087
	 * Also genpd->name is immutable.
2088
	 */
2089
	list_for_each_entry(link, &genpd->master_links, master_node) {
2090
		seq_printf(s, "%s", link->slave->name);
2091
		if (!list_is_last(&link->master_node, &genpd->master_links))
2092 2093 2094
			seq_puts(s, ", ");
	}

2095
	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106
		kobj_path = kobject_get_path(&pm_data->dev->kobj, GFP_KERNEL);
		if (kobj_path == NULL)
			continue;

		seq_printf(s, "\n    %-50s  ", kobj_path);
		rtpm_status_str(s, pm_data->dev);
		kfree(kobj_path);
	}

	seq_puts(s, "\n");
exit:
2107
	mutex_unlock(&genpd->lock);
2108 2109 2110 2111 2112 2113

	return 0;
}

static int pm_genpd_summary_show(struct seq_file *s, void *data)
{
2114
	struct generic_pm_domain *genpd;
2115 2116
	int ret = 0;

2117 2118
	seq_puts(s, "domain                          status          slaves\n");
	seq_puts(s, "    /device                                             runtime status\n");
2119 2120 2121 2122 2123 2124
	seq_puts(s, "----------------------------------------------------------------------\n");

	ret = mutex_lock_interruptible(&gpd_list_lock);
	if (ret)
		return -ERESTARTSYS;

2125 2126
	list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
		ret = pm_genpd_summary_one(s, genpd);
2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170
		if (ret)
			break;
	}
	mutex_unlock(&gpd_list_lock);

	return ret;
}

static int pm_genpd_summary_open(struct inode *inode, struct file *file)
{
	return single_open(file, pm_genpd_summary_show, NULL);
}

static const struct file_operations pm_genpd_summary_fops = {
	.open = pm_genpd_summary_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release,
};

static int __init pm_genpd_debug_init(void)
{
	struct dentry *d;

	pm_genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);

	if (!pm_genpd_debugfs_dir)
		return -ENOMEM;

	d = debugfs_create_file("pm_genpd_summary", S_IRUGO,
			pm_genpd_debugfs_dir, NULL, &pm_genpd_summary_fops);
	if (!d)
		return -ENOMEM;

	return 0;
}
late_initcall(pm_genpd_debug_init);

static void __exit pm_genpd_debug_exit(void)
{
	debugfs_remove_recursive(pm_genpd_debugfs_dir);
}
__exitcall(pm_genpd_debug_exit);
#endif /* CONFIG_PM_ADVANCED_DEBUG */