domain.c 69.0 KB
Newer Older
1 2 3 4 5 6 7 8
/*
 * drivers/base/power/domain.c - Common code related to device power domains.
 *
 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
 *
 * This file is released under the GPLv2.
 */

9
#include <linux/delay.h>
10 11
#include <linux/kernel.h>
#include <linux/io.h>
12
#include <linux/platform_device.h>
13
#include <linux/pm_opp.h>
14 15
#include <linux/pm_runtime.h>
#include <linux/pm_domain.h>
16
#include <linux/pm_qos.h>
17
#include <linux/pm_clock.h>
18 19
#include <linux/slab.h>
#include <linux/err.h>
20 21
#include <linux/sched.h>
#include <linux/suspend.h>
22 23
#include <linux/export.h>

24 25
#include "power.h"

26 27
#define GENPD_RETRY_MAX_MS	250		/* Approximate */

28 29 30 31 32 33 34 35 36 37 38
#define GENPD_DEV_CALLBACK(genpd, type, callback, dev)		\
({								\
	type (*__routine)(struct device *__d); 			\
	type __ret = (type)0;					\
								\
	__routine = genpd->dev_ops.callback; 			\
	if (__routine) {					\
		__ret = __routine(dev); 			\
	}							\
	__ret;							\
})
39

40 41 42
static LIST_HEAD(gpd_list);
static DEFINE_MUTEX(gpd_list_lock);

L
Lina Iyer 已提交
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
struct genpd_lock_ops {
	void (*lock)(struct generic_pm_domain *genpd);
	void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
	int (*lock_interruptible)(struct generic_pm_domain *genpd);
	void (*unlock)(struct generic_pm_domain *genpd);
};

static void genpd_lock_mtx(struct generic_pm_domain *genpd)
{
	mutex_lock(&genpd->mlock);
}

static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
					int depth)
{
	mutex_lock_nested(&genpd->mlock, depth);
}

static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
{
	return mutex_lock_interruptible(&genpd->mlock);
}

static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
{
	return mutex_unlock(&genpd->mlock);
}

static const struct genpd_lock_ops genpd_mtx_ops = {
	.lock = genpd_lock_mtx,
	.lock_nested = genpd_lock_nested_mtx,
	.lock_interruptible = genpd_lock_interruptible_mtx,
	.unlock = genpd_unlock_mtx,
};

78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
static void genpd_lock_spin(struct generic_pm_domain *genpd)
	__acquires(&genpd->slock)
{
	unsigned long flags;

	spin_lock_irqsave(&genpd->slock, flags);
	genpd->lock_flags = flags;
}

static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
					int depth)
	__acquires(&genpd->slock)
{
	unsigned long flags;

	spin_lock_irqsave_nested(&genpd->slock, flags, depth);
	genpd->lock_flags = flags;
}

static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
	__acquires(&genpd->slock)
{
	unsigned long flags;

	spin_lock_irqsave(&genpd->slock, flags);
	genpd->lock_flags = flags;
	return 0;
}

static void genpd_unlock_spin(struct generic_pm_domain *genpd)
	__releases(&genpd->slock)
{
	spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
}

static const struct genpd_lock_ops genpd_spin_ops = {
	.lock = genpd_lock_spin,
	.lock_nested = genpd_lock_nested_spin,
	.lock_interruptible = genpd_lock_interruptible_spin,
	.unlock = genpd_unlock_spin,
};

L
Lina Iyer 已提交
120 121 122 123 124
#define genpd_lock(p)			p->lock_ops->lock(p)
#define genpd_lock_nested(p, d)		p->lock_ops->lock_nested(p, d)
#define genpd_lock_interruptible(p)	p->lock_ops->lock_interruptible(p)
#define genpd_unlock(p)			p->lock_ops->unlock(p)

125
#define genpd_status_on(genpd)		(genpd->status == GPD_STATE_ACTIVE)
126
#define genpd_is_irq_safe(genpd)	(genpd->flags & GENPD_FLAG_IRQ_SAFE)
127
#define genpd_is_always_on(genpd)	(genpd->flags & GENPD_FLAG_ALWAYS_ON)
128
#define genpd_is_active_wakeup(genpd)	(genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
129 130

static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
131
		const struct generic_pm_domain *genpd)
132 133 134 135 136
{
	bool ret;

	ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);

137 138 139 140 141 142
	/*
	 * Warn once if an IRQ safe device is attached to a no sleep domain, as
	 * to indicate a suboptimal configuration for PM. For an always on
	 * domain this isn't case, thus don't warn.
	 */
	if (ret && !genpd_is_always_on(genpd))
143 144 145 146 147 148
		dev_warn_once(dev, "PM domain %s will not be powered off\n",
				genpd->name);

	return ret;
}

149 150 151 152 153 154
/*
 * Get the generic PM domain for a particular struct device.
 * This validates the struct device pointer, the PM domain pointer,
 * and checks that the PM domain pointer is a real generic PM domain.
 * Any failure results in NULL being returned.
 */
155
static struct generic_pm_domain *genpd_lookup_dev(struct device *dev)
156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178
{
	struct generic_pm_domain *genpd = NULL, *gpd;

	if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
		return NULL;

	mutex_lock(&gpd_list_lock);
	list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
		if (&gpd->domain == dev->pm_domain) {
			genpd = gpd;
			break;
		}
	}
	mutex_unlock(&gpd_list_lock);

	return genpd;
}

/*
 * This should only be used where we are certain that the pm_domain
 * attached to the device is a genpd domain.
 */
static struct generic_pm_domain *dev_to_genpd(struct device *dev)
179 180 181 182
{
	if (IS_ERR_OR_NULL(dev->pm_domain))
		return ERR_PTR(-EINVAL);

183
	return pd_to_genpd(dev->pm_domain);
184
}
185

186 187
static int genpd_stop_dev(const struct generic_pm_domain *genpd,
			  struct device *dev)
188
{
189
	return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
190 191
}

192 193
static int genpd_start_dev(const struct generic_pm_domain *genpd,
			   struct device *dev)
194
{
195
	return GENPD_DEV_CALLBACK(genpd, int, start, dev);
196 197
}

198
static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
199
{
200 201 202 203 204 205 206 207 208 209 210
	bool ret = false;

	if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
		ret = !!atomic_dec_and_test(&genpd->sd_count);

	return ret;
}

static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
{
	atomic_inc(&genpd->sd_count);
211
	smp_mb__after_atomic();
212 213
}

214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
#ifdef CONFIG_DEBUG_FS
static void genpd_update_accounting(struct generic_pm_domain *genpd)
{
	ktime_t delta, now;

	now = ktime_get();
	delta = ktime_sub(now, genpd->accounting_time);

	/*
	 * If genpd->status is active, it means we are just
	 * out of off and so update the idle time and vice
	 * versa.
	 */
	if (genpd->status == GPD_STATE_ACTIVE) {
		int state_idx = genpd->state_idx;

		genpd->states[state_idx].idle_time =
			ktime_add(genpd->states[state_idx].idle_time, delta);
	} else {
		genpd->on_time = ktime_add(genpd->on_time, delta);
	}

	genpd->accounting_time = now;
}
#else
static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
#endif

242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330
/**
 * dev_pm_genpd_set_performance_state- Set performance state of device's power
 * domain.
 *
 * @dev: Device for which the performance-state needs to be set.
 * @state: Target performance state of the device. This can be set as 0 when the
 *	   device doesn't have any performance state constraints left (And so
 *	   the device wouldn't participate anymore to find the target
 *	   performance state of the genpd).
 *
 * It is assumed that the users guarantee that the genpd wouldn't be detached
 * while this routine is getting called.
 *
 * Returns 0 on success and negative error values on failures.
 */
int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
{
	struct generic_pm_domain *genpd;
	struct generic_pm_domain_data *gpd_data, *pd_data;
	struct pm_domain_data *pdd;
	unsigned int prev;
	int ret = 0;

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -ENODEV;

	if (unlikely(!genpd->set_performance_state))
		return -EINVAL;

	if (unlikely(!dev->power.subsys_data ||
		     !dev->power.subsys_data->domain_data)) {
		WARN_ON(1);
		return -EINVAL;
	}

	genpd_lock(genpd);

	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
	prev = gpd_data->performance_state;
	gpd_data->performance_state = state;

	/* New requested state is same as Max requested state */
	if (state == genpd->performance_state)
		goto unlock;

	/* New requested state is higher than Max requested state */
	if (state > genpd->performance_state)
		goto update_state;

	/* Traverse all devices within the domain */
	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
		pd_data = to_gpd_data(pdd);

		if (pd_data->performance_state > state)
			state = pd_data->performance_state;
	}

	if (state == genpd->performance_state)
		goto unlock;

	/*
	 * We aren't propagating performance state changes of a subdomain to its
	 * masters as we don't have hardware that needs it. Over that, the
	 * performance states of subdomain and its masters may not have
	 * one-to-one mapping and would require additional information. We can
	 * get back to this once we have hardware that needs it. For that
	 * reason, we don't have to consider performance state of the subdomains
	 * of genpd here.
	 */

update_state:
	if (genpd_status_on(genpd)) {
		ret = genpd->set_performance_state(genpd, state);
		if (ret) {
			gpd_data->performance_state = prev;
			goto unlock;
		}
	}

	genpd->performance_state = state;

unlock:
	genpd_unlock(genpd);

	return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);

331
static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
332
{
333
	unsigned int state_idx = genpd->state_idx;
334 335 336 337 338 339 340
	ktime_t time_start;
	s64 elapsed_ns;
	int ret;

	if (!genpd->power_on)
		return 0;

341 342 343
	if (!timed)
		return genpd->power_on(genpd);

344 345 346 347 348 349
	time_start = ktime_get();
	ret = genpd->power_on(genpd);
	if (ret)
		return ret;

	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
350 351 352 353 354 355 356 357 358

	if (unlikely(genpd->set_performance_state)) {
		ret = genpd->set_performance_state(genpd, genpd->performance_state);
		if (ret) {
			pr_warn("%s: Failed to set performance state %d (%d)\n",
				genpd->name, genpd->performance_state, ret);
		}
	}

359
	if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
360 361
		return ret;

362
	genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
363
	genpd->max_off_time_changed = true;
364 365
	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
		 genpd->name, "on", elapsed_ns);
366 367 368 369

	return ret;
}

370
static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
371
{
372
	unsigned int state_idx = genpd->state_idx;
373 374 375 376 377 378 379
	ktime_t time_start;
	s64 elapsed_ns;
	int ret;

	if (!genpd->power_off)
		return 0;

380 381 382
	if (!timed)
		return genpd->power_off(genpd);

383 384 385 386 387 388
	time_start = ktime_get();
	ret = genpd->power_off(genpd);
	if (ret == -EBUSY)
		return ret;

	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
389
	if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
390 391
		return ret;

392
	genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
393
	genpd->max_off_time_changed = true;
394 395
	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
		 genpd->name, "off", elapsed_ns);
396 397 398 399

	return ret;
}

400
/**
401
 * genpd_queue_power_off_work - Queue up the execution of genpd_power_off().
402
 * @genpd: PM domain to power off.
403
 *
404
 * Queue up the execution of genpd_power_off() unless it's already been done
405 406 407 408 409 410 411
 * before.
 */
static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
{
	queue_work(pm_wq, &genpd->power_off_work);
}

412 413 414
/**
 * genpd_power_off - Remove power from a given PM domain.
 * @genpd: PM domain to power down.
415 416 417 418
 * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the
 * RPM status of the releated device is in an intermediate state, not yet turned
 * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not
 * be RPM_SUSPENDED, while it tries to power off the PM domain.
419 420 421 422
 *
 * If all of the @genpd's devices have been suspended and all of its subdomains
 * have been powered down, remove power from @genpd.
 */
423 424
static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
			   unsigned int depth)
425 426 427 428 429 430 431 432 433 434
{
	struct pm_domain_data *pdd;
	struct gpd_link *link;
	unsigned int not_suspended = 0;

	/*
	 * Do not try to power off the domain in the following situations:
	 * (1) The domain is already in the "power off" state.
	 * (2) System suspend is in progress.
	 */
435
	if (!genpd_status_on(genpd) || genpd->prepared_count > 0)
436 437
		return 0;

438 439 440 441 442 443
	/*
	 * Abort power off for the PM domain in the following situations:
	 * (1) The domain is configured as always on.
	 * (2) When the domain has a subdomain being powered on.
	 */
	if (genpd_is_always_on(genpd) || atomic_read(&genpd->sd_count) > 0)
444 445 446 447 448
		return -EBUSY;

	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
		enum pm_qos_flags_status stat;

449
		stat = dev_pm_qos_flags(pdd->dev, PM_QOS_FLAG_NO_POWER_OFF);
450 451 452 453 454 455 456 457 458 459 460 461
		if (stat > PM_QOS_FLAGS_NONE)
			return -EBUSY;

		/*
		 * Do not allow PM domain to be powered off, when an IRQ safe
		 * device is part of a non-IRQ safe domain.
		 */
		if (!pm_runtime_suspended(pdd->dev) ||
			irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd))
			not_suspended++;
	}

462
	if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489
		return -EBUSY;

	if (genpd->gov && genpd->gov->power_down_ok) {
		if (!genpd->gov->power_down_ok(&genpd->domain))
			return -EAGAIN;
	}

	if (genpd->power_off) {
		int ret;

		if (atomic_read(&genpd->sd_count) > 0)
			return -EBUSY;

		/*
		 * If sd_count > 0 at this point, one of the subdomains hasn't
		 * managed to call genpd_power_on() for the master yet after
		 * incrementing it.  In that case genpd_power_on() will wait
		 * for us to drop the lock, so we can call .power_off() and let
		 * the genpd_power_on() restore power for us (this shouldn't
		 * happen very often).
		 */
		ret = _genpd_power_off(genpd, true);
		if (ret)
			return ret;
	}

	genpd->status = GPD_STATE_POWER_OFF;
490
	genpd_update_accounting(genpd);
491 492 493

	list_for_each_entry(link, &genpd->slave_links, slave_node) {
		genpd_sd_counter_dec(link->master);
494 495 496
		genpd_lock_nested(link->master, depth + 1);
		genpd_power_off(link->master, false, depth + 1);
		genpd_unlock(link->master);
497 498 499 500 501
	}

	return 0;
}

502
/**
503
 * genpd_power_on - Restore power to a given PM domain and its masters.
504
 * @genpd: PM domain to power up.
505
 * @depth: nesting count for lockdep.
506
 *
507
 * Restore power to @genpd and all of its masters so that it is possible to
508 509
 * resume a device belonging to it.
 */
510
static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
511
{
512
	struct gpd_link *link;
513 514
	int ret = 0;

515
	if (genpd_status_on(genpd))
516
		return 0;
517

518 519 520 521 522 523
	/*
	 * The list is guaranteed not to change while the loop below is being
	 * executed, unless one of the masters' .power_on() callbacks fiddles
	 * with it.
	 */
	list_for_each_entry(link, &genpd->slave_links, slave_node) {
524 525 526 527
		struct generic_pm_domain *master = link->master;

		genpd_sd_counter_inc(master);

L
Lina Iyer 已提交
528
		genpd_lock_nested(master, depth + 1);
529
		ret = genpd_power_on(master, depth + 1);
L
Lina Iyer 已提交
530
		genpd_unlock(master);
531

532
		if (ret) {
533
			genpd_sd_counter_dec(master);
534
			goto err;
535
		}
536 537
	}

538
	ret = _genpd_power_on(genpd, true);
539 540
	if (ret)
		goto err;
541

542
	genpd->status = GPD_STATE_ACTIVE;
543 544
	genpd_update_accounting(genpd);

545
	return 0;
546 547

 err:
548 549 550
	list_for_each_entry_continue_reverse(link,
					&genpd->slave_links,
					slave_node) {
551
		genpd_sd_counter_dec(link->master);
552 553 554
		genpd_lock_nested(link->master, depth + 1);
		genpd_power_off(link->master, false, depth + 1);
		genpd_unlock(link->master);
555
	}
556

557 558 559
	return ret;
}

560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576
static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
				     unsigned long val, void *ptr)
{
	struct generic_pm_domain_data *gpd_data;
	struct device *dev;

	gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
	dev = gpd_data->base.dev;

	for (;;) {
		struct generic_pm_domain *genpd;
		struct pm_domain_data *pdd;

		spin_lock_irq(&dev->power.lock);

		pdd = dev->power.subsys_data ?
				dev->power.subsys_data->domain_data : NULL;
577
		if (pdd) {
578 579 580 581 582 583 584 585 586
			to_gpd_data(pdd)->td.constraint_changed = true;
			genpd = dev_to_genpd(dev);
		} else {
			genpd = ERR_PTR(-ENODATA);
		}

		spin_unlock_irq(&dev->power.lock);

		if (!IS_ERR(genpd)) {
L
Lina Iyer 已提交
587
			genpd_lock(genpd);
588
			genpd->max_off_time_changed = true;
L
Lina Iyer 已提交
589
			genpd_unlock(genpd);
590 591 592 593 594 595 596 597 598 599
		}

		dev = dev->parent;
		if (!dev || dev->power.ignore_children)
			break;
	}

	return NOTIFY_DONE;
}

600 601 602 603 604 605 606 607 608 609
/**
 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
 * @work: Work structure used for scheduling the execution of this function.
 */
static void genpd_power_off_work_fn(struct work_struct *work)
{
	struct generic_pm_domain *genpd;

	genpd = container_of(work, struct generic_pm_domain, power_off_work);

L
Lina Iyer 已提交
610
	genpd_lock(genpd);
611
	genpd_power_off(genpd, false, 0);
L
Lina Iyer 已提交
612
	genpd_unlock(genpd);
613 614
}

615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660
/**
 * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks
 * @dev: Device to handle.
 */
static int __genpd_runtime_suspend(struct device *dev)
{
	int (*cb)(struct device *__dev);

	if (dev->type && dev->type->pm)
		cb = dev->type->pm->runtime_suspend;
	else if (dev->class && dev->class->pm)
		cb = dev->class->pm->runtime_suspend;
	else if (dev->bus && dev->bus->pm)
		cb = dev->bus->pm->runtime_suspend;
	else
		cb = NULL;

	if (!cb && dev->driver && dev->driver->pm)
		cb = dev->driver->pm->runtime_suspend;

	return cb ? cb(dev) : 0;
}

/**
 * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks
 * @dev: Device to handle.
 */
static int __genpd_runtime_resume(struct device *dev)
{
	int (*cb)(struct device *__dev);

	if (dev->type && dev->type->pm)
		cb = dev->type->pm->runtime_resume;
	else if (dev->class && dev->class->pm)
		cb = dev->class->pm->runtime_resume;
	else if (dev->bus && dev->bus->pm)
		cb = dev->bus->pm->runtime_resume;
	else
		cb = NULL;

	if (!cb && dev->driver && dev->driver->pm)
		cb = dev->driver->pm->runtime_resume;

	return cb ? cb(dev) : 0;
}

661
/**
662
 * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
663 664 665 666 667 668
 * @dev: Device to suspend.
 *
 * Carry out a runtime suspend of a device under the assumption that its
 * pm_domain field points to the domain member of an object of type
 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
 */
669
static int genpd_runtime_suspend(struct device *dev)
670 671
{
	struct generic_pm_domain *genpd;
672
	bool (*suspend_ok)(struct device *__dev);
673
	struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
674
	bool runtime_pm = pm_runtime_enabled(dev);
675 676
	ktime_t time_start;
	s64 elapsed_ns;
677
	int ret;
678 679 680

	dev_dbg(dev, "%s()\n", __func__);

681 682
	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
683 684
		return -EINVAL;

685 686 687 688 689 690
	/*
	 * A runtime PM centric subsystem/driver may re-use the runtime PM
	 * callbacks for other purposes than runtime PM. In those scenarios
	 * runtime PM is disabled. Under these circumstances, we shall skip
	 * validating/measuring the PM QoS latency.
	 */
691 692
	suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
	if (runtime_pm && suspend_ok && !suspend_ok(dev))
693 694
		return -EBUSY;

695
	/* Measure suspend latency. */
696
	time_start = 0;
697 698
	if (runtime_pm)
		time_start = ktime_get();
699

700
	ret = __genpd_runtime_suspend(dev);
701 702
	if (ret)
		return ret;
703

704
	ret = genpd_stop_dev(genpd, dev);
705
	if (ret) {
706
		__genpd_runtime_resume(dev);
707 708 709
		return ret;
	}

710
	/* Update suspend latency value if the measured time exceeds it. */
711 712 713 714 715 716 717 718 719
	if (runtime_pm) {
		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
		if (elapsed_ns > td->suspend_latency_ns) {
			td->suspend_latency_ns = elapsed_ns;
			dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
				elapsed_ns);
			genpd->max_off_time_changed = true;
			td->constraint_changed = true;
		}
720 721
	}

722
	/*
723 724
	 * If power.irq_safe is set, this routine may be run with
	 * IRQs disabled, so suspend only if the PM domain also is irq_safe.
725
	 */
726
	if (irq_safe_dev_in_no_sleep_domain(dev, genpd))
727 728
		return 0;

L
Lina Iyer 已提交
729
	genpd_lock(genpd);
730
	genpd_power_off(genpd, true, 0);
L
Lina Iyer 已提交
731
	genpd_unlock(genpd);
732 733 734 735 736

	return 0;
}

/**
737
 * genpd_runtime_resume - Resume a device belonging to I/O PM domain.
738 739 740 741 742 743
 * @dev: Device to resume.
 *
 * Carry out a runtime resume of a device under the assumption that its
 * pm_domain field points to the domain member of an object of type
 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
 */
744
static int genpd_runtime_resume(struct device *dev)
745 746
{
	struct generic_pm_domain *genpd;
747
	struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
748
	bool runtime_pm = pm_runtime_enabled(dev);
749 750
	ktime_t time_start;
	s64 elapsed_ns;
751
	int ret;
752
	bool timed = true;
753 754 755

	dev_dbg(dev, "%s()\n", __func__);

756 757
	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
758 759
		return -EINVAL;

760 761 762 763 764
	/*
	 * As we don't power off a non IRQ safe domain, which holds
	 * an IRQ safe device, we don't need to restore power to it.
	 */
	if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) {
765 766 767
		timed = false;
		goto out;
	}
768

L
Lina Iyer 已提交
769
	genpd_lock(genpd);
770
	ret = genpd_power_on(genpd, 0);
L
Lina Iyer 已提交
771
	genpd_unlock(genpd);
772

773 774
	if (ret)
		return ret;
775

776
 out:
777
	/* Measure resume latency. */
778
	time_start = 0;
779
	if (timed && runtime_pm)
780 781
		time_start = ktime_get();

782 783 784 785
	ret = genpd_start_dev(genpd, dev);
	if (ret)
		goto err_poweroff;

786
	ret = __genpd_runtime_resume(dev);
787 788
	if (ret)
		goto err_stop;
789 790

	/* Update resume latency value if the measured time exceeds it. */
791
	if (timed && runtime_pm) {
792 793 794 795 796 797 798 799 800
		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
		if (elapsed_ns > td->resume_latency_ns) {
			td->resume_latency_ns = elapsed_ns;
			dev_dbg(dev, "resume latency exceeded, %lld ns\n",
				elapsed_ns);
			genpd->max_off_time_changed = true;
			td->constraint_changed = true;
		}
	}
801

802
	return 0;
803 804 805 806

err_stop:
	genpd_stop_dev(genpd, dev);
err_poweroff:
807 808
	if (!pm_runtime_is_irq_safe(dev) ||
		(pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) {
L
Lina Iyer 已提交
809
		genpd_lock(genpd);
810
		genpd_power_off(genpd, true, 0);
L
Lina Iyer 已提交
811
		genpd_unlock(genpd);
812 813 814
	}

	return ret;
815 816
}

817 818 819 820 821 822 823 824
static bool pd_ignore_unused;
static int __init pd_ignore_unused_setup(char *__unused)
{
	pd_ignore_unused = true;
	return 1;
}
__setup("pd_ignore_unused", pd_ignore_unused_setup);

825
/**
826
 * genpd_power_off_unused - Power off all PM domains with no devices in use.
827
 */
828
static int __init genpd_power_off_unused(void)
829 830 831
{
	struct generic_pm_domain *genpd;

832 833
	if (pd_ignore_unused) {
		pr_warn("genpd: Not disabling unused power domains\n");
834
		return 0;
835 836
	}

837 838 839 840 841 842 843
	mutex_lock(&gpd_list_lock);

	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
		genpd_queue_power_off_work(genpd);

	mutex_unlock(&gpd_list_lock);

844 845
	return 0;
}
846
late_initcall(genpd_power_off_unused);
847

848
#if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_GENERIC_DOMAINS_OF)
849

850
static bool genpd_present(const struct generic_pm_domain *genpd)
851
{
852
	const struct generic_pm_domain *gpd;
853 854 855 856 857 858 859 860 861 862 863

	if (IS_ERR_OR_NULL(genpd))
		return false;

	list_for_each_entry(gpd, &gpd_list, gpd_list_node)
		if (gpd == genpd)
			return true;

	return false;
}

864 865 866 867
#endif

#ifdef CONFIG_PM_SLEEP

868
/**
869
 * genpd_sync_power_off - Synchronously power off a PM domain and its masters.
870
 * @genpd: PM domain to power off, if possible.
871 872
 * @use_lock: use the lock.
 * @depth: nesting count for lockdep.
873 874
 *
 * Check if the given PM domain can be powered off (during system suspend or
875
 * hibernation) and do that if so.  Also, in that case propagate to its masters.
876
 *
877
 * This function is only called in "noirq" and "syscore" stages of system power
878 879
 * transitions. The "noirq" callbacks may be executed asynchronously, thus in
 * these cases the lock must be held.
880
 */
881 882
static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
				 unsigned int depth)
883
{
884
	struct gpd_link *link;
885

886
	if (!genpd_status_on(genpd) || genpd_is_always_on(genpd))
887 888
		return;

889 890
	if (genpd->suspended_count != genpd->device_count
	    || atomic_read(&genpd->sd_count) > 0)
891 892
		return;

893 894
	/* Choose the deepest state when suspending */
	genpd->state_idx = genpd->state_count - 1;
895 896
	if (_genpd_power_off(genpd, false))
		return;
897

898
	genpd->status = GPD_STATE_POWER_OFF;
899 900 901

	list_for_each_entry(link, &genpd->slave_links, slave_node) {
		genpd_sd_counter_dec(link->master);
902 903 904 905 906 907 908 909

		if (use_lock)
			genpd_lock_nested(link->master, depth + 1);

		genpd_sync_power_off(link->master, use_lock, depth + 1);

		if (use_lock)
			genpd_unlock(link->master);
910 911 912
	}
}

913
/**
914
 * genpd_sync_power_on - Synchronously power on a PM domain and its masters.
915
 * @genpd: PM domain to power on.
916 917
 * @use_lock: use the lock.
 * @depth: nesting count for lockdep.
918
 *
919
 * This function is only called in "noirq" and "syscore" stages of system power
920 921
 * transitions. The "noirq" callbacks may be executed asynchronously, thus in
 * these cases the lock must be held.
922
 */
923 924
static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
				unsigned int depth)
925 926 927
{
	struct gpd_link *link;

928
	if (genpd_status_on(genpd))
929 930 931 932
		return;

	list_for_each_entry(link, &genpd->slave_links, slave_node) {
		genpd_sd_counter_inc(link->master);
933 934 935 936 937 938 939 940

		if (use_lock)
			genpd_lock_nested(link->master, depth + 1);

		genpd_sync_power_on(link->master, use_lock, depth + 1);

		if (use_lock)
			genpd_unlock(link->master);
941 942
	}

943
	_genpd_power_on(genpd, false);
944 945 946 947

	genpd->status = GPD_STATE_ACTIVE;
}

948 949 950 951 952 953
/**
 * resume_needed - Check whether to resume a device before system suspend.
 * @dev: Device to check.
 * @genpd: PM domain the device belongs to.
 *
 * There are two cases in which a device that can wake up the system from sleep
954
 * states should be resumed by genpd_prepare(): (1) if the device is enabled
955 956 957 958 959 960 961 962 963
 * to wake up the system and it has to remain active for this purpose while the
 * system is in the sleep state and (2) if the device is not enabled to wake up
 * the system from sleep states and it generally doesn't generate wakeup signals
 * by itself (those signals are generated on its behalf by other parts of the
 * system).  In the latter case it may be necessary to reconfigure the device's
 * wakeup settings during system suspend, because it may have been set up to
 * signal remote wakeup from the system's working state as needed by runtime PM.
 * Return 'true' in either of the above cases.
 */
964 965
static bool resume_needed(struct device *dev,
			  const struct generic_pm_domain *genpd)
966 967 968 969 970 971
{
	bool active_wakeup;

	if (!device_can_wakeup(dev))
		return false;

972
	active_wakeup = genpd_is_active_wakeup(genpd);
973 974 975
	return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
}

976
/**
977
 * genpd_prepare - Start power transition of a device in a PM domain.
978 979 980 981 982 983 984
 * @dev: Device to start the transition of.
 *
 * Start a power transition of a device (during a system-wide power transition)
 * under the assumption that its pm_domain field points to the domain member of
 * an object of type struct generic_pm_domain representing a PM domain
 * consisting of I/O devices.
 */
985
static int genpd_prepare(struct device *dev)
986 987
{
	struct generic_pm_domain *genpd;
988
	int ret;
989 990 991 992 993 994 995

	dev_dbg(dev, "%s()\n", __func__);

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;

996 997 998 999 1000
	/*
	 * If a wakeup request is pending for the device, it should be woken up
	 * at this point and a system wakeup event should be reported if it's
	 * set up to wake up the system from sleep states.
	 */
1001 1002 1003
	if (resume_needed(dev, genpd))
		pm_runtime_resume(dev);

L
Lina Iyer 已提交
1004
	genpd_lock(genpd);
1005

1006
	if (genpd->prepared_count++ == 0)
1007
		genpd->suspended_count = 0;
1008

L
Lina Iyer 已提交
1009
	genpd_unlock(genpd);
1010

1011
	ret = pm_generic_prepare(dev);
1012
	if (ret < 0) {
L
Lina Iyer 已提交
1013
		genpd_lock(genpd);
1014

1015
		genpd->prepared_count--;
1016

L
Lina Iyer 已提交
1017
		genpd_unlock(genpd);
1018
	}
1019

1020 1021
	/* Never return 1, as genpd don't cope with the direct_complete path. */
	return ret >= 0 ? 0 : ret;
1022 1023
}

1024
/**
1025 1026
 * genpd_finish_suspend - Completion of suspend or hibernation of device in an
 *   I/O pm domain.
1027
 * @dev: Device to suspend.
1028
 * @poweroff: Specifies if this is a poweroff_noirq or suspend_noirq callback.
1029 1030 1031 1032
 *
 * Stop the device and remove power from the domain if all devices in it have
 * been stopped.
 */
1033
static int genpd_finish_suspend(struct device *dev, bool poweroff)
1034 1035
{
	struct generic_pm_domain *genpd;
1036
	int ret = 0;
1037 1038 1039 1040

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;
1041

1042 1043 1044 1045 1046 1047 1048
	if (poweroff)
		ret = pm_generic_poweroff_noirq(dev);
	else
		ret = pm_generic_suspend_noirq(dev);
	if (ret)
		return ret;

1049 1050 1051
	if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
		return 0;

1052 1053 1054
	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
	    !pm_runtime_status_suspended(dev)) {
		ret = genpd_stop_dev(genpd, dev);
1055 1056 1057 1058 1059
		if (ret) {
			if (poweroff)
				pm_generic_restore_noirq(dev);
			else
				pm_generic_resume_noirq(dev);
1060
			return ret;
1061
		}
1062 1063
	}

1064
	genpd_lock(genpd);
1065
	genpd->suspended_count++;
1066 1067
	genpd_sync_power_off(genpd, true, 0);
	genpd_unlock(genpd);
1068 1069 1070 1071

	return 0;
}

1072
/**
1073
 * genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
1074 1075 1076 1077 1078
 * @dev: Device to suspend.
 *
 * Stop the device and remove power from the domain if all devices in it have
 * been stopped.
 */
1079
static int genpd_suspend_noirq(struct device *dev)
1080 1081 1082 1083 1084 1085
{
	dev_dbg(dev, "%s()\n", __func__);

	return genpd_finish_suspend(dev, false);
}

1086
/**
1087
 * genpd_resume_noirq - Start of resume of device in an I/O PM domain.
1088 1089
 * @dev: Device to resume.
 *
1090
 * Restore power to the device's PM domain, if necessary, and start the device.
1091
 */
1092
static int genpd_resume_noirq(struct device *dev)
1093 1094
{
	struct generic_pm_domain *genpd;
1095
	int ret;
1096 1097 1098 1099 1100 1101 1102

	dev_dbg(dev, "%s()\n", __func__);

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;

1103
	if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
1104
		return pm_generic_resume_noirq(dev);
1105

1106 1107
	genpd_lock(genpd);
	genpd_sync_power_on(genpd, true, 0);
1108
	genpd->suspended_count--;
1109
	genpd_unlock(genpd);
1110

1111 1112 1113
	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
	    !pm_runtime_status_suspended(dev)) {
		ret = genpd_start_dev(genpd, dev);
1114 1115 1116
		if (ret)
			return ret;
	}
1117

1118
	return pm_generic_resume_noirq(dev);
1119 1120
}

1121
/**
1122
 * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
1123 1124 1125 1126 1127 1128 1129
 * @dev: Device to freeze.
 *
 * Carry out a late freeze of a device under the assumption that its
 * pm_domain field points to the domain member of an object of type
 * struct generic_pm_domain representing a power domain consisting of I/O
 * devices.
 */
1130
static int genpd_freeze_noirq(struct device *dev)
1131
{
1132
	const struct generic_pm_domain *genpd;
1133
	int ret = 0;
1134 1135 1136 1137 1138 1139 1140

	dev_dbg(dev, "%s()\n", __func__);

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;

1141 1142 1143 1144
	ret = pm_generic_freeze_noirq(dev);
	if (ret)
		return ret;

1145 1146 1147
	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
	    !pm_runtime_status_suspended(dev))
		ret = genpd_stop_dev(genpd, dev);
1148 1149

	return ret;
1150
}
1151

1152
/**
1153
 * genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
1154 1155 1156 1157 1158
 * @dev: Device to thaw.
 *
 * Start the device, unless power has been removed from the domain already
 * before the system transition.
 */
1159
static int genpd_thaw_noirq(struct device *dev)
1160
{
1161
	const struct generic_pm_domain *genpd;
1162
	int ret = 0;
1163

1164
	dev_dbg(dev, "%s()\n", __func__);
1165

1166 1167 1168 1169
	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;

1170 1171 1172
	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
	    !pm_runtime_status_suspended(dev)) {
		ret = genpd_start_dev(genpd, dev);
1173 1174 1175
		if (ret)
			return ret;
	}
1176

1177 1178 1179 1180
	return pm_generic_thaw_noirq(dev);
}

/**
1181
 * genpd_poweroff_noirq - Completion of hibernation of device in an
1182 1183 1184 1185 1186 1187
 *   I/O PM domain.
 * @dev: Device to poweroff.
 *
 * Stop the device and remove power from the domain if all devices in it have
 * been stopped.
 */
1188
static int genpd_poweroff_noirq(struct device *dev)
1189 1190 1191 1192
{
	dev_dbg(dev, "%s()\n", __func__);

	return genpd_finish_suspend(dev, true);
1193 1194 1195
}

/**
1196
 * genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1197 1198
 * @dev: Device to resume.
 *
1199 1200
 * Make sure the domain will be in the same power state as before the
 * hibernation the system is resuming from and start the device if necessary.
1201
 */
1202
static int genpd_restore_noirq(struct device *dev)
1203 1204
{
	struct generic_pm_domain *genpd;
1205
	int ret = 0;
1206 1207 1208 1209 1210 1211 1212 1213

	dev_dbg(dev, "%s()\n", __func__);

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;

	/*
1214 1215
	 * At this point suspended_count == 0 means we are being run for the
	 * first time for the given domain in the present cycle.
1216
	 */
1217
	genpd_lock(genpd);
1218
	if (genpd->suspended_count++ == 0)
1219
		/*
1220
		 * The boot kernel might put the domain into arbitrary state,
1221
		 * so make it appear as powered off to genpd_sync_power_on(),
1222
		 * so that it tries to power it on in case it was really off.
1223
		 */
1224
		genpd->status = GPD_STATE_POWER_OFF;
1225

1226 1227
	genpd_sync_power_on(genpd, true, 0);
	genpd_unlock(genpd);
1228

1229 1230 1231
	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
	    !pm_runtime_status_suspended(dev)) {
		ret = genpd_start_dev(genpd, dev);
1232 1233 1234
		if (ret)
			return ret;
	}
1235

1236
	return pm_generic_restore_noirq(dev);
1237 1238 1239
}

/**
1240
 * genpd_complete - Complete power transition of a device in a power domain.
1241 1242 1243 1244 1245 1246 1247
 * @dev: Device to complete the transition of.
 *
 * Complete a power transition of a device (during a system-wide power
 * transition) under the assumption that its pm_domain field points to the
 * domain member of an object of type struct generic_pm_domain representing
 * a power domain consisting of I/O devices.
 */
1248
static void genpd_complete(struct device *dev)
1249 1250 1251 1252 1253 1254 1255 1256 1257
{
	struct generic_pm_domain *genpd;

	dev_dbg(dev, "%s()\n", __func__);

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return;

1258 1259
	pm_generic_complete(dev);

L
Lina Iyer 已提交
1260
	genpd_lock(genpd);
1261

1262
	genpd->prepared_count--;
1263 1264
	if (!genpd->prepared_count)
		genpd_queue_power_off_work(genpd);
1265

L
Lina Iyer 已提交
1266
	genpd_unlock(genpd);
1267 1268
}

1269
/**
1270
 * genpd_syscore_switch - Switch power during system core suspend or resume.
1271 1272 1273 1274 1275
 * @dev: Device that normally is marked as "always on" to switch power for.
 *
 * This routine may only be called during the system core (syscore) suspend or
 * resume phase for devices whose "always on" flags are set.
 */
1276
static void genpd_syscore_switch(struct device *dev, bool suspend)
1277 1278 1279
{
	struct generic_pm_domain *genpd;

1280
	genpd = dev_to_genpd(dev);
1281
	if (!genpd_present(genpd))
1282 1283 1284 1285
		return;

	if (suspend) {
		genpd->suspended_count++;
1286
		genpd_sync_power_off(genpd, false, 0);
1287
	} else {
1288
		genpd_sync_power_on(genpd, false, 0);
1289 1290 1291
		genpd->suspended_count--;
	}
}
1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303

void pm_genpd_syscore_poweroff(struct device *dev)
{
	genpd_syscore_switch(dev, true);
}
EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff);

void pm_genpd_syscore_poweron(struct device *dev)
{
	genpd_syscore_switch(dev, false);
}
EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
1304

1305
#else /* !CONFIG_PM_SLEEP */
1306

1307 1308 1309 1310 1311 1312 1313 1314
#define genpd_prepare		NULL
#define genpd_suspend_noirq	NULL
#define genpd_resume_noirq	NULL
#define genpd_freeze_noirq	NULL
#define genpd_thaw_noirq	NULL
#define genpd_poweroff_noirq	NULL
#define genpd_restore_noirq	NULL
#define genpd_complete		NULL
1315 1316 1317

#endif /* CONFIG_PM_SLEEP */

1318 1319 1320
static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
					struct generic_pm_domain *genpd,
					struct gpd_timing_data *td)
1321 1322
{
	struct generic_pm_domain_data *gpd_data;
1323 1324 1325 1326 1327
	int ret;

	ret = dev_pm_get_subsys_data(dev);
	if (ret)
		return ERR_PTR(ret);
1328 1329

	gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1330 1331 1332 1333
	if (!gpd_data) {
		ret = -ENOMEM;
		goto err_put;
	}
1334

1335 1336 1337 1338 1339
	if (td)
		gpd_data->td = *td;

	gpd_data->base.dev = dev;
	gpd_data->td.constraint_changed = true;
1340
	gpd_data->td.effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353
	gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;

	spin_lock_irq(&dev->power.lock);

	if (dev->power.subsys_data->domain_data) {
		ret = -EINVAL;
		goto err_free;
	}

	dev->power.subsys_data->domain_data = &gpd_data->base;

	spin_unlock_irq(&dev->power.lock);

1354
	return gpd_data;
1355

1356 1357 1358
 err_free:
	spin_unlock_irq(&dev->power.lock);
	kfree(gpd_data);
1359 1360 1361
 err_put:
	dev_pm_put_subsys_data(dev);
	return ERR_PTR(ret);
1362 1363
}

1364 1365
static void genpd_free_dev_data(struct device *dev,
				struct generic_pm_domain_data *gpd_data)
1366
{
1367 1368 1369 1370 1371 1372
	spin_lock_irq(&dev->power.lock);

	dev->power.subsys_data->domain_data = NULL;

	spin_unlock_irq(&dev->power.lock);

1373
	kfree(gpd_data);
1374
	dev_pm_put_subsys_data(dev);
1375 1376
}

1377 1378
static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
			    struct gpd_timing_data *td)
1379
{
1380
	struct generic_pm_domain_data *gpd_data;
1381
	int ret;
1382 1383 1384 1385 1386 1387

	dev_dbg(dev, "%s()\n", __func__);

	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
		return -EINVAL;

1388
	gpd_data = genpd_alloc_dev_data(dev, genpd, td);
1389 1390
	if (IS_ERR(gpd_data))
		return PTR_ERR(gpd_data);
1391

L
Lina Iyer 已提交
1392
	genpd_lock(genpd);
1393

1394 1395 1396
	ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
	if (ret)
		goto out;
1397

1398 1399
	dev_pm_domain_set(dev, &genpd->domain);

1400 1401 1402
	genpd->device_count++;
	genpd->max_off_time_changed = true;

1403
	list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1404

1405
 out:
L
Lina Iyer 已提交
1406
	genpd_unlock(genpd);
1407

1408 1409 1410 1411
	if (ret)
		genpd_free_dev_data(dev, gpd_data);
	else
		dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1412

1413 1414
	return ret;
}
1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432

/**
 * __pm_genpd_add_device - Add a device to an I/O PM domain.
 * @genpd: PM domain to add the device to.
 * @dev: Device to be added.
 * @td: Set of PM QoS timing parameters to attach to the device.
 */
int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
			  struct gpd_timing_data *td)
{
	int ret;

	mutex_lock(&gpd_list_lock);
	ret = genpd_add_device(genpd, dev, td);
	mutex_unlock(&gpd_list_lock);

	return ret;
}
1433
EXPORT_SYMBOL_GPL(__pm_genpd_add_device);
1434

1435 1436
static int genpd_remove_device(struct generic_pm_domain *genpd,
			       struct device *dev)
1437
{
1438
	struct generic_pm_domain_data *gpd_data;
1439
	struct pm_domain_data *pdd;
1440
	int ret = 0;
1441 1442 1443

	dev_dbg(dev, "%s()\n", __func__);

1444 1445 1446 1447
	pdd = dev->power.subsys_data->domain_data;
	gpd_data = to_gpd_data(pdd);
	dev_pm_qos_remove_notifier(dev, &gpd_data->nb);

L
Lina Iyer 已提交
1448
	genpd_lock(genpd);
1449

1450 1451 1452 1453 1454
	if (genpd->prepared_count > 0) {
		ret = -EAGAIN;
		goto out;
	}

1455 1456 1457
	genpd->device_count--;
	genpd->max_off_time_changed = true;

1458
	if (genpd->detach_dev)
1459
		genpd->detach_dev(genpd, dev);
1460

1461 1462
	dev_pm_domain_set(dev, NULL);

1463
	list_del_init(&pdd->list_node);
1464

L
Lina Iyer 已提交
1465
	genpd_unlock(genpd);
1466

1467
	genpd_free_dev_data(dev, gpd_data);
1468

1469
	return 0;
1470

1471
 out:
L
Lina Iyer 已提交
1472
	genpd_unlock(genpd);
1473
	dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1474 1475 1476

	return ret;
}
1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490

/**
 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
 * @genpd: PM domain to remove the device from.
 * @dev: Device to be removed.
 */
int pm_genpd_remove_device(struct generic_pm_domain *genpd,
			   struct device *dev)
{
	if (!genpd || genpd != genpd_lookup_dev(dev))
		return -EINVAL;

	return genpd_remove_device(genpd, dev);
}
1491
EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
1492

1493 1494
static int genpd_add_subdomain(struct generic_pm_domain *genpd,
			       struct generic_pm_domain *subdomain)
1495
{
1496
	struct gpd_link *link, *itr;
1497 1498
	int ret = 0;

1499 1500
	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
	    || genpd == subdomain)
1501 1502
		return -EINVAL;

1503 1504 1505 1506 1507 1508
	/*
	 * If the domain can be powered on/off in an IRQ safe
	 * context, ensure that the subdomain can also be
	 * powered on/off in that context.
	 */
	if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
1509
		WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
1510 1511 1512 1513
				genpd->name, subdomain->name);
		return -EINVAL;
	}

1514 1515 1516 1517
	link = kzalloc(sizeof(*link), GFP_KERNEL);
	if (!link)
		return -ENOMEM;

L
Lina Iyer 已提交
1518 1519
	genpd_lock(subdomain);
	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1520

1521
	if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) {
1522 1523 1524 1525
		ret = -EINVAL;
		goto out;
	}

1526 1527
	list_for_each_entry(itr, &genpd->master_links, master_node) {
		if (itr->slave == subdomain && itr->master == genpd) {
1528 1529 1530 1531 1532
			ret = -EINVAL;
			goto out;
		}
	}

1533 1534
	link->master = genpd;
	list_add_tail(&link->master_node, &genpd->master_links);
1535 1536
	link->slave = subdomain;
	list_add_tail(&link->slave_node, &subdomain->slave_links);
1537
	if (genpd_status_on(subdomain))
1538
		genpd_sd_counter_inc(genpd);
1539 1540

 out:
L
Lina Iyer 已提交
1541 1542
	genpd_unlock(genpd);
	genpd_unlock(subdomain);
1543 1544
	if (ret)
		kfree(link);
1545 1546
	return ret;
}
1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563

/**
 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
 * @genpd: Master PM domain to add the subdomain to.
 * @subdomain: Subdomain to be added.
 */
int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
			   struct generic_pm_domain *subdomain)
{
	int ret;

	mutex_lock(&gpd_list_lock);
	ret = genpd_add_subdomain(genpd, subdomain);
	mutex_unlock(&gpd_list_lock);

	return ret;
}
1564
EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
1565 1566 1567 1568

/**
 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
 * @genpd: Master PM domain to remove the subdomain from.
1569
 * @subdomain: Subdomain to be removed.
1570 1571
 */
int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1572
			      struct generic_pm_domain *subdomain)
1573
{
1574
	struct gpd_link *l, *link;
1575 1576
	int ret = -EINVAL;

1577
	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1578 1579
		return -EINVAL;

L
Lina Iyer 已提交
1580 1581
	genpd_lock(subdomain);
	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1582

1583
	if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
1584 1585 1586 1587 1588 1589
		pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
			subdomain->name);
		ret = -EBUSY;
		goto out;
	}

1590
	list_for_each_entry_safe(link, l, &genpd->master_links, master_node) {
1591
		if (link->slave != subdomain)
1592 1593
			continue;

1594 1595 1596
		list_del(&link->master_node);
		list_del(&link->slave_node);
		kfree(link);
1597
		if (genpd_status_on(subdomain))
1598 1599 1600 1601 1602 1603
			genpd_sd_counter_dec(genpd);

		ret = 0;
		break;
	}

1604
out:
L
Lina Iyer 已提交
1605 1606
	genpd_unlock(genpd);
	genpd_unlock(subdomain);
1607 1608 1609

	return ret;
}
1610
EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
1611

1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626
static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
{
	struct genpd_power_state *state;

	state = kzalloc(sizeof(*state), GFP_KERNEL);
	if (!state)
		return -ENOMEM;

	genpd->states = state;
	genpd->state_count = 1;
	genpd->free = state;

	return 0;
}

1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637
static void genpd_lock_init(struct generic_pm_domain *genpd)
{
	if (genpd->flags & GENPD_FLAG_IRQ_SAFE) {
		spin_lock_init(&genpd->slock);
		genpd->lock_ops = &genpd_spin_ops;
	} else {
		mutex_init(&genpd->mlock);
		genpd->lock_ops = &genpd_mtx_ops;
	}
}

1638 1639 1640 1641 1642
/**
 * pm_genpd_init - Initialize a generic I/O PM domain object.
 * @genpd: PM domain object to initialize.
 * @gov: PM domain governor to associate with the domain (may be NULL).
 * @is_off: Initial value of the domain's power_is_off field.
1643 1644
 *
 * Returns 0 on successful initialization, else a negative error code.
1645
 */
1646 1647
int pm_genpd_init(struct generic_pm_domain *genpd,
		  struct dev_power_governor *gov, bool is_off)
1648
{
1649 1650
	int ret;

1651
	if (IS_ERR_OR_NULL(genpd))
1652
		return -EINVAL;
1653

1654 1655
	INIT_LIST_HEAD(&genpd->master_links);
	INIT_LIST_HEAD(&genpd->slave_links);
1656
	INIT_LIST_HEAD(&genpd->dev_list);
1657
	genpd_lock_init(genpd);
1658 1659
	genpd->gov = gov;
	INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1660
	atomic_set(&genpd->sd_count, 0);
1661
	genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1662
	genpd->device_count = 0;
1663
	genpd->max_off_time_ns = -1;
1664
	genpd->max_off_time_changed = true;
1665 1666
	genpd->provider = NULL;
	genpd->has_provider = false;
1667
	genpd->accounting_time = ktime_get();
1668 1669
	genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
	genpd->domain.ops.runtime_resume = genpd_runtime_resume;
1670 1671 1672 1673 1674 1675 1676 1677
	genpd->domain.ops.prepare = genpd_prepare;
	genpd->domain.ops.suspend_noirq = genpd_suspend_noirq;
	genpd->domain.ops.resume_noirq = genpd_resume_noirq;
	genpd->domain.ops.freeze_noirq = genpd_freeze_noirq;
	genpd->domain.ops.thaw_noirq = genpd_thaw_noirq;
	genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq;
	genpd->domain.ops.restore_noirq = genpd_restore_noirq;
	genpd->domain.ops.complete = genpd_complete;
1678 1679 1680 1681 1682 1683

	if (genpd->flags & GENPD_FLAG_PM_CLK) {
		genpd->dev_ops.stop = pm_clk_suspend;
		genpd->dev_ops.start = pm_clk_resume;
	}

1684 1685 1686 1687
	/* Always-on domains must be powered on at initialization. */
	if (genpd_is_always_on(genpd) && !genpd_status_on(genpd))
		return -EINVAL;

1688
	/* Use only one "off" state if there were no states declared */
1689 1690 1691 1692 1693
	if (genpd->state_count == 0) {
		ret = genpd_set_default_power_state(genpd);
		if (ret)
			return ret;
	}
1694

1695 1696 1697
	device_initialize(&genpd->dev);
	dev_set_name(&genpd->dev, "%s", genpd->name);

1698 1699 1700
	mutex_lock(&gpd_list_lock);
	list_add(&genpd->gpd_list_node, &gpd_list);
	mutex_unlock(&gpd_list_lock);
1701 1702

	return 0;
1703
}
1704
EXPORT_SYMBOL_GPL(pm_genpd_init);
1705

1706 1707 1708 1709 1710 1711 1712
static int genpd_remove(struct generic_pm_domain *genpd)
{
	struct gpd_link *l, *link;

	if (IS_ERR_OR_NULL(genpd))
		return -EINVAL;

L
Lina Iyer 已提交
1713
	genpd_lock(genpd);
1714 1715

	if (genpd->has_provider) {
L
Lina Iyer 已提交
1716
		genpd_unlock(genpd);
1717 1718 1719 1720 1721
		pr_err("Provider present, unable to remove %s\n", genpd->name);
		return -EBUSY;
	}

	if (!list_empty(&genpd->master_links) || genpd->device_count) {
L
Lina Iyer 已提交
1722
		genpd_unlock(genpd);
1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733
		pr_err("%s: unable to remove %s\n", __func__, genpd->name);
		return -EBUSY;
	}

	list_for_each_entry_safe(link, l, &genpd->slave_links, slave_node) {
		list_del(&link->master_node);
		list_del(&link->slave_node);
		kfree(link);
	}

	list_del(&genpd->gpd_list_node);
L
Lina Iyer 已提交
1734
	genpd_unlock(genpd);
1735
	cancel_work_sync(&genpd->power_off_work);
1736
	kfree(genpd->free);
1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766
	pr_debug("%s: removed %s\n", __func__, genpd->name);

	return 0;
}

/**
 * pm_genpd_remove - Remove a generic I/O PM domain
 * @genpd: Pointer to PM domain that is to be removed.
 *
 * To remove the PM domain, this function:
 *  - Removes the PM domain as a subdomain to any parent domains,
 *    if it was added.
 *  - Removes the PM domain from the list of registered PM domains.
 *
 * The PM domain will only be removed, if the associated provider has
 * been removed, it is not a parent to any other PM domain and has no
 * devices associated with it.
 */
int pm_genpd_remove(struct generic_pm_domain *genpd)
{
	int ret;

	mutex_lock(&gpd_list_lock);
	ret = genpd_remove(genpd);
	mutex_unlock(&gpd_list_lock);

	return ret;
}
EXPORT_SYMBOL_GPL(pm_genpd_remove);

1767
#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
1768

1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779
/*
 * Device Tree based PM domain providers.
 *
 * The code below implements generic device tree based PM domain providers that
 * bind device tree nodes with generic PM domains registered in the system.
 *
 * Any driver that registers generic PM domains and needs to support binding of
 * devices to these domains is supposed to register a PM domain provider, which
 * maps a PM domain specifier retrieved from the device tree to a PM domain.
 *
 * Two simple mapping functions have been provided for convenience:
1780 1781
 *  - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
 *  - genpd_xlate_onecell() for mapping of multiple PM domains per node by
1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805
 *    index.
 */

/**
 * struct of_genpd_provider - PM domain provider registration structure
 * @link: Entry in global list of PM domain providers
 * @node: Pointer to device tree node of PM domain provider
 * @xlate: Provider-specific xlate callback mapping a set of specifier cells
 *         into a PM domain.
 * @data: context pointer to be passed into @xlate callback
 */
struct of_genpd_provider {
	struct list_head link;
	struct device_node *node;
	genpd_xlate_t xlate;
	void *data;
};

/* List of registered PM domain providers. */
static LIST_HEAD(of_genpd_providers);
/* Mutex to protect the list above. */
static DEFINE_MUTEX(of_genpd_mutex);

/**
1806
 * genpd_xlate_simple() - Xlate function for direct node-domain mapping
1807 1808 1809 1810 1811 1812 1813
 * @genpdspec: OF phandle args to map into a PM domain
 * @data: xlate function private data - pointer to struct generic_pm_domain
 *
 * This is a generic xlate function that can be used to model PM domains that
 * have their own device tree nodes. The private data of xlate function needs
 * to be a valid pointer to struct generic_pm_domain.
 */
1814
static struct generic_pm_domain *genpd_xlate_simple(
1815 1816 1817 1818 1819 1820 1821
					struct of_phandle_args *genpdspec,
					void *data)
{
	return data;
}

/**
1822
 * genpd_xlate_onecell() - Xlate function using a single index.
1823 1824 1825 1826 1827 1828 1829 1830
 * @genpdspec: OF phandle args to map into a PM domain
 * @data: xlate function private data - pointer to struct genpd_onecell_data
 *
 * This is a generic xlate function that can be used to model simple PM domain
 * controllers that have one device tree node and provide multiple PM domains.
 * A single cell is used as an index into an array of PM domains specified in
 * the genpd_onecell_data struct when registering the provider.
 */
1831
static struct generic_pm_domain *genpd_xlate_onecell(
1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852
					struct of_phandle_args *genpdspec,
					void *data)
{
	struct genpd_onecell_data *genpd_data = data;
	unsigned int idx = genpdspec->args[0];

	if (genpdspec->args_count != 1)
		return ERR_PTR(-EINVAL);

	if (idx >= genpd_data->num_domains) {
		pr_err("%s: invalid domain index %u\n", __func__, idx);
		return ERR_PTR(-EINVAL);
	}

	if (!genpd_data->domains[idx])
		return ERR_PTR(-ENOENT);

	return genpd_data->domains[idx];
}

/**
1853
 * genpd_add_provider() - Register a PM domain provider for a node
1854 1855 1856 1857
 * @np: Device node pointer associated with the PM domain provider.
 * @xlate: Callback for decoding PM domain from phandle arguments.
 * @data: Context pointer for @xlate callback.
 */
1858 1859
static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
			      void *data)
1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873
{
	struct of_genpd_provider *cp;

	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
	if (!cp)
		return -ENOMEM;

	cp->node = of_node_get(np);
	cp->data = data;
	cp->xlate = xlate;

	mutex_lock(&of_genpd_mutex);
	list_add(&cp->link, &of_genpd_providers);
	mutex_unlock(&of_genpd_mutex);
1874
	pr_debug("Added domain provider from %pOF\n", np);
1875 1876 1877

	return 0;
}
1878 1879 1880 1881 1882 1883 1884 1885 1886

/**
 * of_genpd_add_provider_simple() - Register a simple PM domain provider
 * @np: Device node pointer associated with the PM domain provider.
 * @genpd: Pointer to PM domain associated with the PM domain provider.
 */
int of_genpd_add_provider_simple(struct device_node *np,
				 struct generic_pm_domain *genpd)
{
1887 1888 1889 1890 1891 1892 1893
	int ret = -EINVAL;

	if (!np || !genpd)
		return -EINVAL;

	mutex_lock(&gpd_list_lock);

1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905
	if (!genpd_present(genpd))
		goto unlock;

	genpd->dev.of_node = np;

	/* Parse genpd OPP table */
	if (genpd->set_performance_state) {
		ret = dev_pm_opp_of_add_table(&genpd->dev);
		if (ret) {
			dev_err(&genpd->dev, "Failed to add OPP table: %d\n",
				ret);
			goto unlock;
1906
		}
1907 1908
	}

1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920
	ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
	if (ret) {
		if (genpd->set_performance_state)
			dev_pm_opp_of_remove_table(&genpd->dev);

		goto unlock;
	}

	genpd->provider = &np->fwnode;
	genpd->has_provider = true;

unlock:
1921 1922 1923
	mutex_unlock(&gpd_list_lock);

	return ret;
1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934
}
EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);

/**
 * of_genpd_add_provider_onecell() - Register a onecell PM domain provider
 * @np: Device node pointer associated with the PM domain provider.
 * @data: Pointer to the data associated with the PM domain provider.
 */
int of_genpd_add_provider_onecell(struct device_node *np,
				  struct genpd_onecell_data *data)
{
1935
	struct generic_pm_domain *genpd;
1936
	unsigned int i;
1937
	int ret = -EINVAL;
1938 1939 1940 1941 1942 1943

	if (!np || !data)
		return -EINVAL;

	mutex_lock(&gpd_list_lock);

1944 1945 1946
	if (!data->xlate)
		data->xlate = genpd_xlate_onecell;

1947
	for (i = 0; i < data->num_domains; i++) {
1948 1949 1950
		genpd = data->domains[i];

		if (!genpd)
1951
			continue;
1952
		if (!genpd_present(genpd))
1953 1954
			goto error;

1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968
		genpd->dev.of_node = np;

		/* Parse genpd OPP table */
		if (genpd->set_performance_state) {
			ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
			if (ret) {
				dev_err(&genpd->dev, "Failed to add OPP table for index %d: %d\n",
					i, ret);
				goto error;
			}
		}

		genpd->provider = &np->fwnode;
		genpd->has_provider = true;
1969 1970
	}

1971
	ret = genpd_add_provider(np, data->xlate, data);
1972 1973 1974 1975 1976 1977 1978 1979 1980
	if (ret < 0)
		goto error;

	mutex_unlock(&gpd_list_lock);

	return 0;

error:
	while (i--) {
1981 1982 1983
		genpd = data->domains[i];

		if (!genpd)
1984
			continue;
1985 1986 1987 1988 1989 1990

		genpd->provider = NULL;
		genpd->has_provider = false;

		if (genpd->set_performance_state)
			dev_pm_opp_of_remove_table(&genpd->dev);
1991
	}
1992 1993 1994 1995

	mutex_unlock(&gpd_list_lock);

	return ret;
1996 1997
}
EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
1998 1999 2000 2001 2002 2003 2004

/**
 * of_genpd_del_provider() - Remove a previously registered PM domain provider
 * @np: Device node pointer associated with the PM domain provider
 */
void of_genpd_del_provider(struct device_node *np)
{
2005
	struct of_genpd_provider *cp, *tmp;
2006
	struct generic_pm_domain *gpd;
2007

2008
	mutex_lock(&gpd_list_lock);
2009
	mutex_lock(&of_genpd_mutex);
2010
	list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) {
2011
		if (cp->node == np) {
2012 2013 2014 2015 2016
			/*
			 * For each PM domain associated with the
			 * provider, set the 'has_provider' to false
			 * so that the PM domain can be safely removed.
			 */
2017 2018
			list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
				if (gpd->provider == &np->fwnode) {
2019 2020
					gpd->has_provider = false;

2021 2022 2023 2024 2025 2026 2027
					if (!gpd->set_performance_state)
						continue;

					dev_pm_opp_of_remove_table(&gpd->dev);
				}
			}

2028 2029 2030 2031 2032 2033 2034
			list_del(&cp->link);
			of_node_put(cp->node);
			kfree(cp);
			break;
		}
	}
	mutex_unlock(&of_genpd_mutex);
2035
	mutex_unlock(&gpd_list_lock);
2036 2037 2038 2039
}
EXPORT_SYMBOL_GPL(of_genpd_del_provider);

/**
2040
 * genpd_get_from_provider() - Look-up PM domain
2041 2042 2043 2044 2045 2046 2047 2048 2049
 * @genpdspec: OF phandle args to use for look-up
 *
 * Looks for a PM domain provider under the node specified by @genpdspec and if
 * found, uses xlate function of the provider to map phandle args to a PM
 * domain.
 *
 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
 * on failure.
 */
2050
static struct generic_pm_domain *genpd_get_from_provider(
2051 2052 2053 2054 2055
					struct of_phandle_args *genpdspec)
{
	struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
	struct of_genpd_provider *provider;

2056 2057 2058
	if (!genpdspec)
		return ERR_PTR(-EINVAL);

2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073
	mutex_lock(&of_genpd_mutex);

	/* Check if we have such a provider in our array */
	list_for_each_entry(provider, &of_genpd_providers, link) {
		if (provider->node == genpdspec->np)
			genpd = provider->xlate(genpdspec, provider->data);
		if (!IS_ERR(genpd))
			break;
	}

	mutex_unlock(&of_genpd_mutex);

	return genpd;
}

2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084
/**
 * of_genpd_add_device() - Add a device to an I/O PM domain
 * @genpdspec: OF phandle args to use for look-up PM domain
 * @dev: Device to be added.
 *
 * Looks-up an I/O PM domain based upon phandle args provided and adds
 * the device to the PM domain. Returns a negative error code on failure.
 */
int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev)
{
	struct generic_pm_domain *genpd;
2085 2086 2087
	int ret;

	mutex_lock(&gpd_list_lock);
2088

2089
	genpd = genpd_get_from_provider(genpdspec);
2090 2091 2092 2093 2094 2095
	if (IS_ERR(genpd)) {
		ret = PTR_ERR(genpd);
		goto out;
	}

	ret = genpd_add_device(genpd, dev, NULL);
2096

2097 2098 2099 2100
out:
	mutex_unlock(&gpd_list_lock);

	return ret;
2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116
}
EXPORT_SYMBOL_GPL(of_genpd_add_device);

/**
 * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
 * @parent_spec: OF phandle args to use for parent PM domain look-up
 * @subdomain_spec: OF phandle args to use for subdomain look-up
 *
 * Looks-up a parent PM domain and subdomain based upon phandle args
 * provided and adds the subdomain to the parent PM domain. Returns a
 * negative error code on failure.
 */
int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
			   struct of_phandle_args *subdomain_spec)
{
	struct generic_pm_domain *parent, *subdomain;
2117 2118 2119
	int ret;

	mutex_lock(&gpd_list_lock);
2120

2121
	parent = genpd_get_from_provider(parent_spec);
2122 2123 2124 2125
	if (IS_ERR(parent)) {
		ret = PTR_ERR(parent);
		goto out;
	}
2126

2127
	subdomain = genpd_get_from_provider(subdomain_spec);
2128 2129 2130 2131 2132 2133
	if (IS_ERR(subdomain)) {
		ret = PTR_ERR(subdomain);
		goto out;
	}

	ret = genpd_add_subdomain(parent, subdomain);
2134

2135 2136 2137 2138
out:
	mutex_unlock(&gpd_list_lock);

	return ret;
2139 2140 2141
}
EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);

2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156
/**
 * of_genpd_remove_last - Remove the last PM domain registered for a provider
 * @provider: Pointer to device structure associated with provider
 *
 * Find the last PM domain that was added by a particular provider and
 * remove this PM domain from the list of PM domains. The provider is
 * identified by the 'provider' device structure that is passed. The PM
 * domain will only be removed, if the provider associated with domain
 * has been removed.
 *
 * Returns a valid pointer to struct generic_pm_domain on success or
 * ERR_PTR() on failure.
 */
struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
{
2157
	struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT);
2158 2159 2160 2161 2162 2163
	int ret;

	if (IS_ERR_OR_NULL(np))
		return ERR_PTR(-EINVAL);

	mutex_lock(&gpd_list_lock);
2164
	list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) {
2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176
		if (gpd->provider == &np->fwnode) {
			ret = genpd_remove(gpd);
			genpd = ret ? ERR_PTR(ret) : gpd;
			break;
		}
	}
	mutex_unlock(&gpd_list_lock);

	return genpd;
}
EXPORT_SYMBOL_GPL(of_genpd_remove_last);

2177 2178
/**
 * genpd_dev_pm_detach - Detach a device from its PM domain.
2179
 * @dev: Device to detach.
2180 2181 2182 2183 2184 2185 2186
 * @power_off: Currently not used
 *
 * Try to locate a corresponding generic PM domain, which the device was
 * attached to previously. If such is found, the device is detached from it.
 */
static void genpd_dev_pm_detach(struct device *dev, bool power_off)
{
2187
	struct generic_pm_domain *pd;
2188
	unsigned int i;
2189 2190
	int ret = 0;

2191 2192
	pd = dev_to_genpd(dev);
	if (IS_ERR(pd))
2193 2194 2195 2196
		return;

	dev_dbg(dev, "removing from PM domain %s\n", pd->name);

2197
	for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
2198
		ret = genpd_remove_device(pd, dev);
2199 2200
		if (ret != -EAGAIN)
			break;
2201 2202

		mdelay(i);
2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215
		cond_resched();
	}

	if (ret < 0) {
		dev_err(dev, "failed to remove from PM domain %s: %d",
			pd->name, ret);
		return;
	}

	/* Check if PM domain can be powered off after removing this device. */
	genpd_queue_power_off_work(pd);
}

2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226
static void genpd_dev_pm_sync(struct device *dev)
{
	struct generic_pm_domain *pd;

	pd = dev_to_genpd(dev);
	if (IS_ERR(pd))
		return;

	genpd_queue_power_off_work(pd);
}

2227 2228 2229 2230 2231 2232 2233
/**
 * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
 * @dev: Device to attach.
 *
 * Parse device's OF node to find a PM domain specifier. If such is found,
 * attaches the device to retrieved pm_domain ops.
 *
2234 2235 2236 2237 2238
 * Returns 1 on successfully attached PM domain, 0 when the device don't need a
 * PM domain or a negative error code in case of failures. Note that if a
 * power-domain exists for the device, but it cannot be found or turned on,
 * then return -EPROBE_DEFER to ensure that the device is not probed and to
 * re-try again later.
2239 2240 2241 2242 2243 2244 2245 2246
 */
int genpd_dev_pm_attach(struct device *dev)
{
	struct of_phandle_args pd_args;
	struct generic_pm_domain *pd;
	int ret;

	if (!dev->of_node)
2247
		return 0;
2248 2249 2250

	ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
					"#power-domain-cells", 0, &pd_args);
2251
	if (ret < 0)
2252
		return 0;
2253

2254
	mutex_lock(&gpd_list_lock);
2255
	pd = genpd_get_from_provider(&pd_args);
2256
	of_node_put(pd_args.np);
2257
	if (IS_ERR(pd)) {
2258
		mutex_unlock(&gpd_list_lock);
2259 2260
		dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
			__func__, PTR_ERR(pd));
2261
		return -EPROBE_DEFER;
2262 2263 2264 2265
	}

	dev_dbg(dev, "adding to PM domain %s\n", pd->name);

2266
	ret = genpd_add_device(pd, dev, NULL);
2267
	mutex_unlock(&gpd_list_lock);
2268 2269

	if (ret < 0) {
2270 2271 2272
		if (ret != -EPROBE_DEFER)
			dev_err(dev, "failed to add to PM domain %s: %d",
				pd->name, ret);
2273
		return ret;
2274 2275 2276
	}

	dev->pm_domain->detach = genpd_dev_pm_detach;
2277
	dev->pm_domain->sync = genpd_dev_pm_sync;
2278

L
Lina Iyer 已提交
2279
	genpd_lock(pd);
2280
	ret = genpd_power_on(pd, 0);
L
Lina Iyer 已提交
2281
	genpd_unlock(pd);
2282 2283 2284

	if (ret)
		genpd_remove_device(pd, dev);
2285 2286

	return ret ? -EPROBE_DEFER : 1;
2287 2288
}
EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
2289 2290

static const struct of_device_id idle_state_match[] = {
2291
	{ .compatible = "domain-idle-state", },
2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304
	{ }
};

static int genpd_parse_state(struct genpd_power_state *genpd_state,
				    struct device_node *state_node)
{
	int err;
	u32 residency;
	u32 entry_latency, exit_latency;

	err = of_property_read_u32(state_node, "entry-latency-us",
						&entry_latency);
	if (err) {
2305 2306
		pr_debug(" * %pOF missing entry-latency-us property\n",
						state_node);
2307 2308 2309 2310 2311 2312
		return -EINVAL;
	}

	err = of_property_read_u32(state_node, "exit-latency-us",
						&exit_latency);
	if (err) {
2313 2314
		pr_debug(" * %pOF missing exit-latency-us property\n",
						state_node);
2315 2316 2317 2318 2319 2320 2321 2322 2323
		return -EINVAL;
	}

	err = of_property_read_u32(state_node, "min-residency-us", &residency);
	if (!err)
		genpd_state->residency_ns = 1000 * residency;

	genpd_state->power_on_latency_ns = 1000 * exit_latency;
	genpd_state->power_off_latency_ns = 1000 * entry_latency;
2324
	genpd_state->fwnode = &state_node->fwnode;
2325 2326 2327 2328

	return 0;
}

2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360
static int genpd_iterate_idle_states(struct device_node *dn,
				     struct genpd_power_state *states)
{
	int ret;
	struct of_phandle_iterator it;
	struct device_node *np;
	int i = 0;

	ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
	if (ret <= 0)
		return ret;

	/* Loop over the phandles until all the requested entry is found */
	of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) {
		np = it.node;
		if (!of_match_node(idle_state_match, np))
			continue;
		if (states) {
			ret = genpd_parse_state(&states[i], np);
			if (ret) {
				pr_err("Parsing idle state node %pOF failed with err %d\n",
				       np, ret);
				of_node_put(np);
				return ret;
			}
		}
		i++;
	}

	return i;
}

2361 2362 2363 2364 2365 2366 2367 2368 2369
/**
 * of_genpd_parse_idle_states: Return array of idle states for the genpd.
 *
 * @dn: The genpd device node
 * @states: The pointer to which the state array will be saved.
 * @n: The count of elements in the array returned from this function.
 *
 * Returns the device states parsed from the OF node. The memory for the states
 * is allocated by this function and is the responsibility of the caller to
2370 2371
 * free the memory after use. If no domain idle states is found it returns
 * -EINVAL and in case of errors, a negative error code.
2372 2373 2374 2375 2376
 */
int of_genpd_parse_idle_states(struct device_node *dn,
			struct genpd_power_state **states, int *n)
{
	struct genpd_power_state *st;
2377
	int ret;
2378

2379 2380 2381
	ret = genpd_iterate_idle_states(dn, NULL);
	if (ret <= 0)
		return ret < 0 ? ret : -EINVAL;
2382

2383
	st = kcalloc(ret, sizeof(*st), GFP_KERNEL);
2384 2385 2386
	if (!st)
		return -ENOMEM;

2387 2388 2389 2390
	ret = genpd_iterate_idle_states(dn, st);
	if (ret <= 0) {
		kfree(st);
		return ret < 0 ? ret : -EINVAL;
2391 2392
	}

2393 2394
	*states = st;
	*n = ret;
2395 2396 2397 2398 2399

	return 0;
}
EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);

2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433
/**
 * of_genpd_opp_to_performance_state- Gets performance state of device's
 * power domain corresponding to a DT node's "required-opps" property.
 *
 * @dev: Device for which the performance-state needs to be found.
 * @opp_node: DT node where the "required-opps" property is present. This can be
 *	the device node itself (if it doesn't have an OPP table) or a node
 *	within the OPP table of a device (if device has an OPP table).
 * @state: Pointer to return performance state.
 *
 * Returns performance state corresponding to the "required-opps" property of
 * a DT node. This calls platform specific genpd->opp_to_performance_state()
 * callback to translate power domain OPP to performance state.
 *
 * Returns performance state on success and 0 on failure.
 */
unsigned int of_genpd_opp_to_performance_state(struct device *dev,
					       struct device_node *opp_node)
{
	struct generic_pm_domain *genpd;
	struct dev_pm_opp *opp;
	int state = 0;

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return 0;

	if (unlikely(!genpd->set_performance_state))
		return 0;

	genpd_lock(genpd);

	opp = of_dev_pm_opp_find_required_opp(&genpd->dev, opp_node);
	if (IS_ERR(opp)) {
2434 2435
		dev_err(dev, "Failed to find required OPP: %ld\n",
			PTR_ERR(opp));
2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448
		goto unlock;
	}

	state = genpd->opp_to_performance_state(genpd, opp);
	dev_pm_opp_put(opp);

unlock:
	genpd_unlock(genpd);

	return state;
}
EXPORT_SYMBOL_GPL(of_genpd_opp_to_performance_state);

2449
#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
2450 2451 2452 2453


/***        debugfs support        ***/

2454
#ifdef CONFIG_DEBUG_FS
2455 2456 2457 2458 2459 2460
#include <linux/pm.h>
#include <linux/device.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/kobject.h>
2461
static struct dentry *genpd_debugfs_dir;
2462 2463 2464

/*
 * TODO: This function is a slightly modified version of rtpm_status_show
2465
 * from sysfs.c, so generalize it.
2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488
 */
static void rtpm_status_str(struct seq_file *s, struct device *dev)
{
	static const char * const status_lookup[] = {
		[RPM_ACTIVE] = "active",
		[RPM_RESUMING] = "resuming",
		[RPM_SUSPENDED] = "suspended",
		[RPM_SUSPENDING] = "suspending"
	};
	const char *p = "";

	if (dev->power.runtime_error)
		p = "error";
	else if (dev->power.disable_depth)
		p = "unsupported";
	else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
		p = status_lookup[dev->power.runtime_status];
	else
		WARN_ON(1);

	seq_puts(s, p);
}

2489 2490
static int genpd_summary_one(struct seq_file *s,
			struct generic_pm_domain *genpd)
2491 2492 2493 2494 2495 2496 2497 2498
{
	static const char * const status_lookup[] = {
		[GPD_STATE_ACTIVE] = "on",
		[GPD_STATE_POWER_OFF] = "off"
	};
	struct pm_domain_data *pm_data;
	const char *kobj_path;
	struct gpd_link *link;
2499
	char state[16];
2500 2501
	int ret;

L
Lina Iyer 已提交
2502
	ret = genpd_lock_interruptible(genpd);
2503 2504 2505
	if (ret)
		return -ERESTARTSYS;

2506
	if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
2507
		goto exit;
2508
	if (!genpd_status_on(genpd))
2509
		snprintf(state, sizeof(state), "%s-%u",
2510
			 status_lookup[genpd->status], genpd->state_idx);
2511
	else
2512 2513 2514
		snprintf(state, sizeof(state), "%s",
			 status_lookup[genpd->status]);
	seq_printf(s, "%-30s  %-15s ", genpd->name, state);
2515 2516 2517 2518

	/*
	 * Modifications on the list require holding locks on both
	 * master and slave, so we are safe.
2519
	 * Also genpd->name is immutable.
2520
	 */
2521
	list_for_each_entry(link, &genpd->master_links, master_node) {
2522
		seq_printf(s, "%s", link->slave->name);
2523
		if (!list_is_last(&link->master_node, &genpd->master_links))
2524 2525 2526
			seq_puts(s, ", ");
	}

2527
	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
2528 2529 2530
		kobj_path = kobject_get_path(&pm_data->dev->kobj,
				genpd_is_irq_safe(genpd) ?
				GFP_ATOMIC : GFP_KERNEL);
2531 2532 2533 2534 2535 2536 2537 2538 2539 2540
		if (kobj_path == NULL)
			continue;

		seq_printf(s, "\n    %-50s  ", kobj_path);
		rtpm_status_str(s, pm_data->dev);
		kfree(kobj_path);
	}

	seq_puts(s, "\n");
exit:
L
Lina Iyer 已提交
2541
	genpd_unlock(genpd);
2542 2543 2544 2545

	return 0;
}

2546
static int genpd_summary_show(struct seq_file *s, void *data)
2547
{
2548
	struct generic_pm_domain *genpd;
2549 2550
	int ret = 0;

2551 2552
	seq_puts(s, "domain                          status          slaves\n");
	seq_puts(s, "    /device                                             runtime status\n");
2553 2554 2555 2556 2557 2558
	seq_puts(s, "----------------------------------------------------------------------\n");

	ret = mutex_lock_interruptible(&gpd_list_lock);
	if (ret)
		return -ERESTARTSYS;

2559
	list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
2560
		ret = genpd_summary_one(s, genpd);
2561 2562 2563 2564 2565 2566 2567 2568
		if (ret)
			break;
	}
	mutex_unlock(&gpd_list_lock);

	return ret;
}

2569
static int genpd_status_show(struct seq_file *s, void *data)
2570
{
2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593
	static const char * const status_lookup[] = {
		[GPD_STATE_ACTIVE] = "on",
		[GPD_STATE_POWER_OFF] = "off"
	};

	struct generic_pm_domain *genpd = s->private;
	int ret = 0;

	ret = genpd_lock_interruptible(genpd);
	if (ret)
		return -ERESTARTSYS;

	if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
		goto exit;

	if (genpd->status == GPD_STATE_POWER_OFF)
		seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
			genpd->state_idx);
	else
		seq_printf(s, "%s\n", status_lookup[genpd->status]);
exit:
	genpd_unlock(genpd);
	return ret;
2594 2595
}

2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744
static int genpd_sub_domains_show(struct seq_file *s, void *data)
{
	struct generic_pm_domain *genpd = s->private;
	struct gpd_link *link;
	int ret = 0;

	ret = genpd_lock_interruptible(genpd);
	if (ret)
		return -ERESTARTSYS;

	list_for_each_entry(link, &genpd->master_links, master_node)
		seq_printf(s, "%s\n", link->slave->name);

	genpd_unlock(genpd);
	return ret;
}

static int genpd_idle_states_show(struct seq_file *s, void *data)
{
	struct generic_pm_domain *genpd = s->private;
	unsigned int i;
	int ret = 0;

	ret = genpd_lock_interruptible(genpd);
	if (ret)
		return -ERESTARTSYS;

	seq_puts(s, "State          Time Spent(ms)\n");

	for (i = 0; i < genpd->state_count; i++) {
		ktime_t delta = 0;
		s64 msecs;

		if ((genpd->status == GPD_STATE_POWER_OFF) &&
				(genpd->state_idx == i))
			delta = ktime_sub(ktime_get(), genpd->accounting_time);

		msecs = ktime_to_ms(
			ktime_add(genpd->states[i].idle_time, delta));
		seq_printf(s, "S%-13i %lld\n", i, msecs);
	}

	genpd_unlock(genpd);
	return ret;
}

static int genpd_active_time_show(struct seq_file *s, void *data)
{
	struct generic_pm_domain *genpd = s->private;
	ktime_t delta = 0;
	int ret = 0;

	ret = genpd_lock_interruptible(genpd);
	if (ret)
		return -ERESTARTSYS;

	if (genpd->status == GPD_STATE_ACTIVE)
		delta = ktime_sub(ktime_get(), genpd->accounting_time);

	seq_printf(s, "%lld ms\n", ktime_to_ms(
				ktime_add(genpd->on_time, delta)));

	genpd_unlock(genpd);
	return ret;
}

static int genpd_total_idle_time_show(struct seq_file *s, void *data)
{
	struct generic_pm_domain *genpd = s->private;
	ktime_t delta = 0, total = 0;
	unsigned int i;
	int ret = 0;

	ret = genpd_lock_interruptible(genpd);
	if (ret)
		return -ERESTARTSYS;

	for (i = 0; i < genpd->state_count; i++) {

		if ((genpd->status == GPD_STATE_POWER_OFF) &&
				(genpd->state_idx == i))
			delta = ktime_sub(ktime_get(), genpd->accounting_time);

		total = ktime_add(total, genpd->states[i].idle_time);
	}
	total = ktime_add(total, delta);

	seq_printf(s, "%lld ms\n", ktime_to_ms(total));

	genpd_unlock(genpd);
	return ret;
}


static int genpd_devices_show(struct seq_file *s, void *data)
{
	struct generic_pm_domain *genpd = s->private;
	struct pm_domain_data *pm_data;
	const char *kobj_path;
	int ret = 0;

	ret = genpd_lock_interruptible(genpd);
	if (ret)
		return -ERESTARTSYS;

	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
		kobj_path = kobject_get_path(&pm_data->dev->kobj,
				genpd_is_irq_safe(genpd) ?
				GFP_ATOMIC : GFP_KERNEL);
		if (kobj_path == NULL)
			continue;

		seq_printf(s, "%s\n", kobj_path);
		kfree(kobj_path);
	}

	genpd_unlock(genpd);
	return ret;
}

#define define_genpd_open_function(name) \
static int genpd_##name##_open(struct inode *inode, struct file *file) \
{ \
	return single_open(file, genpd_##name##_show, inode->i_private); \
}

define_genpd_open_function(summary);
define_genpd_open_function(status);
define_genpd_open_function(sub_domains);
define_genpd_open_function(idle_states);
define_genpd_open_function(active_time);
define_genpd_open_function(total_idle_time);
define_genpd_open_function(devices);

#define define_genpd_debugfs_fops(name) \
static const struct file_operations genpd_##name##_fops = { \
	.open = genpd_##name##_open, \
	.read = seq_read, \
	.llseek = seq_lseek, \
	.release = single_release, \
}

define_genpd_debugfs_fops(summary);
define_genpd_debugfs_fops(status);
define_genpd_debugfs_fops(sub_domains);
define_genpd_debugfs_fops(idle_states);
define_genpd_debugfs_fops(active_time);
define_genpd_debugfs_fops(total_idle_time);
define_genpd_debugfs_fops(devices);
2745

2746
static int __init genpd_debug_init(void)
2747 2748
{
	struct dentry *d;
2749
	struct generic_pm_domain *genpd;
2750

2751
	genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
2752

2753
	if (!genpd_debugfs_dir)
2754 2755 2756
		return -ENOMEM;

	d = debugfs_create_file("pm_genpd_summary", S_IRUGO,
2757
			genpd_debugfs_dir, NULL, &genpd_summary_fops);
2758 2759 2760
	if (!d)
		return -ENOMEM;

2761
	list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
2762
		d = debugfs_create_dir(genpd->name, genpd_debugfs_dir);
2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779
		if (!d)
			return -ENOMEM;

		debugfs_create_file("current_state", 0444,
				d, genpd, &genpd_status_fops);
		debugfs_create_file("sub_domains", 0444,
				d, genpd, &genpd_sub_domains_fops);
		debugfs_create_file("idle_states", 0444,
				d, genpd, &genpd_idle_states_fops);
		debugfs_create_file("active_time", 0444,
				d, genpd, &genpd_active_time_fops);
		debugfs_create_file("total_idle_time", 0444,
				d, genpd, &genpd_total_idle_time_fops);
		debugfs_create_file("devices", 0444,
				d, genpd, &genpd_devices_fops);
	}

2780 2781
	return 0;
}
2782
late_initcall(genpd_debug_init);
2783

2784
static void __exit genpd_debug_exit(void)
2785
{
2786
	debugfs_remove_recursive(genpd_debugfs_dir);
2787
}
2788
__exitcall(genpd_debug_exit);
2789
#endif /* CONFIG_DEBUG_FS */