domain.c 74.0 KB
Newer Older
1 2 3 4 5 6 7 8
/*
 * drivers/base/power/domain.c - Common code related to device power domains.
 *
 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
 *
 * This file is released under the GPLv2.
 */

9
#include <linux/delay.h>
10 11
#include <linux/kernel.h>
#include <linux/io.h>
12
#include <linux/platform_device.h>
13
#include <linux/pm_opp.h>
14 15
#include <linux/pm_runtime.h>
#include <linux/pm_domain.h>
16
#include <linux/pm_qos.h>
17
#include <linux/pm_clock.h>
18 19
#include <linux/slab.h>
#include <linux/err.h>
20 21
#include <linux/sched.h>
#include <linux/suspend.h>
22 23
#include <linux/export.h>

24 25
#include "power.h"

26 27
#define GENPD_RETRY_MAX_MS	250		/* Approximate */

28 29 30 31 32 33 34 35 36 37 38
#define GENPD_DEV_CALLBACK(genpd, type, callback, dev)		\
({								\
	type (*__routine)(struct device *__d); 			\
	type __ret = (type)0;					\
								\
	__routine = genpd->dev_ops.callback; 			\
	if (__routine) {					\
		__ret = __routine(dev); 			\
	}							\
	__ret;							\
})
39

40 41 42
static LIST_HEAD(gpd_list);
static DEFINE_MUTEX(gpd_list_lock);

L
Lina Iyer 已提交
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
struct genpd_lock_ops {
	void (*lock)(struct generic_pm_domain *genpd);
	void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
	int (*lock_interruptible)(struct generic_pm_domain *genpd);
	void (*unlock)(struct generic_pm_domain *genpd);
};

static void genpd_lock_mtx(struct generic_pm_domain *genpd)
{
	mutex_lock(&genpd->mlock);
}

static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
					int depth)
{
	mutex_lock_nested(&genpd->mlock, depth);
}

static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
{
	return mutex_lock_interruptible(&genpd->mlock);
}

static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
{
	return mutex_unlock(&genpd->mlock);
}

static const struct genpd_lock_ops genpd_mtx_ops = {
	.lock = genpd_lock_mtx,
	.lock_nested = genpd_lock_nested_mtx,
	.lock_interruptible = genpd_lock_interruptible_mtx,
	.unlock = genpd_unlock_mtx,
};

78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
static void genpd_lock_spin(struct generic_pm_domain *genpd)
	__acquires(&genpd->slock)
{
	unsigned long flags;

	spin_lock_irqsave(&genpd->slock, flags);
	genpd->lock_flags = flags;
}

static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
					int depth)
	__acquires(&genpd->slock)
{
	unsigned long flags;

	spin_lock_irqsave_nested(&genpd->slock, flags, depth);
	genpd->lock_flags = flags;
}

static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
	__acquires(&genpd->slock)
{
	unsigned long flags;

	spin_lock_irqsave(&genpd->slock, flags);
	genpd->lock_flags = flags;
	return 0;
}

static void genpd_unlock_spin(struct generic_pm_domain *genpd)
	__releases(&genpd->slock)
{
	spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
}

static const struct genpd_lock_ops genpd_spin_ops = {
	.lock = genpd_lock_spin,
	.lock_nested = genpd_lock_nested_spin,
	.lock_interruptible = genpd_lock_interruptible_spin,
	.unlock = genpd_unlock_spin,
};

L
Lina Iyer 已提交
120 121 122 123 124
#define genpd_lock(p)			p->lock_ops->lock(p)
#define genpd_lock_nested(p, d)		p->lock_ops->lock_nested(p, d)
#define genpd_lock_interruptible(p)	p->lock_ops->lock_interruptible(p)
#define genpd_unlock(p)			p->lock_ops->unlock(p)

125
#define genpd_status_on(genpd)		(genpd->status == GPD_STATE_ACTIVE)
126
#define genpd_is_irq_safe(genpd)	(genpd->flags & GENPD_FLAG_IRQ_SAFE)
127
#define genpd_is_always_on(genpd)	(genpd->flags & GENPD_FLAG_ALWAYS_ON)
128
#define genpd_is_active_wakeup(genpd)	(genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
129 130

static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
131
		const struct generic_pm_domain *genpd)
132 133 134 135 136
{
	bool ret;

	ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);

137 138 139 140 141 142
	/*
	 * Warn once if an IRQ safe device is attached to a no sleep domain, as
	 * to indicate a suboptimal configuration for PM. For an always on
	 * domain this isn't case, thus don't warn.
	 */
	if (ret && !genpd_is_always_on(genpd))
143 144 145 146 147 148
		dev_warn_once(dev, "PM domain %s will not be powered off\n",
				genpd->name);

	return ret;
}

149 150 151 152 153 154
/*
 * Get the generic PM domain for a particular struct device.
 * This validates the struct device pointer, the PM domain pointer,
 * and checks that the PM domain pointer is a real generic PM domain.
 * Any failure results in NULL being returned.
 */
155
static struct generic_pm_domain *genpd_lookup_dev(struct device *dev)
156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178
{
	struct generic_pm_domain *genpd = NULL, *gpd;

	if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
		return NULL;

	mutex_lock(&gpd_list_lock);
	list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
		if (&gpd->domain == dev->pm_domain) {
			genpd = gpd;
			break;
		}
	}
	mutex_unlock(&gpd_list_lock);

	return genpd;
}

/*
 * This should only be used where we are certain that the pm_domain
 * attached to the device is a genpd domain.
 */
static struct generic_pm_domain *dev_to_genpd(struct device *dev)
179 180 181 182
{
	if (IS_ERR_OR_NULL(dev->pm_domain))
		return ERR_PTR(-EINVAL);

183
	return pd_to_genpd(dev->pm_domain);
184
}
185

186 187
static int genpd_stop_dev(const struct generic_pm_domain *genpd,
			  struct device *dev)
188
{
189
	return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
190 191
}

192 193
static int genpd_start_dev(const struct generic_pm_domain *genpd,
			   struct device *dev)
194
{
195
	return GENPD_DEV_CALLBACK(genpd, int, start, dev);
196 197
}

198
static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
199
{
200 201 202 203 204 205 206 207 208 209 210
	bool ret = false;

	if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
		ret = !!atomic_dec_and_test(&genpd->sd_count);

	return ret;
}

static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
{
	atomic_inc(&genpd->sd_count);
211
	smp_mb__after_atomic();
212 213
}

214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
#ifdef CONFIG_DEBUG_FS
static void genpd_update_accounting(struct generic_pm_domain *genpd)
{
	ktime_t delta, now;

	now = ktime_get();
	delta = ktime_sub(now, genpd->accounting_time);

	/*
	 * If genpd->status is active, it means we are just
	 * out of off and so update the idle time and vice
	 * versa.
	 */
	if (genpd->status == GPD_STATE_ACTIVE) {
		int state_idx = genpd->state_idx;

		genpd->states[state_idx].idle_time =
			ktime_add(genpd->states[state_idx].idle_time, delta);
	} else {
		genpd->on_time = ktime_add(genpd->on_time, delta);
	}

	genpd->accounting_time = now;
}
#else
static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
#endif

242 243 244 245 246
static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
					   unsigned int state)
{
	struct generic_pm_domain_data *pd_data;
	struct pm_domain_data *pdd;
247
	struct gpd_link *link;
248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265

	/* New requested state is same as Max requested state */
	if (state == genpd->performance_state)
		return state;

	/* New requested state is higher than Max requested state */
	if (state > genpd->performance_state)
		return state;

	/* Traverse all devices within the domain */
	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
		pd_data = to_gpd_data(pdd);

		if (pd_data->performance_state > state)
			state = pd_data->performance_state;
	}

	/*
266 267 268 269 270 271 272 273 274 275 276 277
	 * Traverse all sub-domains within the domain. This can be
	 * done without any additional locking as the link->performance_state
	 * field is protected by the master genpd->lock, which is already taken.
	 *
	 * Also note that link->performance_state (subdomain's performance state
	 * requirement to master domain) is different from
	 * link->slave->performance_state (current performance state requirement
	 * of the devices/sub-domains of the subdomain) and so can have a
	 * different value.
	 *
	 * Note that we also take vote from powered-off sub-domains into account
	 * as the same is done for devices right now.
278
	 */
279 280 281 282 283
	list_for_each_entry(link, &genpd->master_links, master_node) {
		if (link->performance_state > state)
			state = link->performance_state;
	}

284 285 286 287
	return state;
}

static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
288
					unsigned int state, int depth)
289
{
290 291 292
	struct generic_pm_domain *master;
	struct gpd_link *link;
	int master_state, ret;
293 294 295 296

	if (state == genpd->performance_state)
		return 0;

297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328
	/* Propagate to masters of genpd */
	list_for_each_entry(link, &genpd->slave_links, slave_node) {
		master = link->master;

		if (!master->set_performance_state)
			continue;

		/* Find master's performance state */
		ret = dev_pm_opp_xlate_performance_state(genpd->opp_table,
							 master->opp_table,
							 state);
		if (unlikely(ret < 0))
			goto err;

		master_state = ret;

		genpd_lock_nested(master, depth + 1);

		link->prev_performance_state = link->performance_state;
		link->performance_state = master_state;
		master_state = _genpd_reeval_performance_state(master,
						master_state);
		ret = _genpd_set_performance_state(master, master_state, depth + 1);
		if (ret)
			link->performance_state = link->prev_performance_state;

		genpd_unlock(master);

		if (ret)
			goto err;
	}

329 330
	ret = genpd->set_performance_state(genpd, state);
	if (ret)
331
		goto err;
332 333 334

	genpd->performance_state = state;
	return 0;
335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360

err:
	/* Encountered an error, lets rollback */
	list_for_each_entry_continue_reverse(link, &genpd->slave_links,
					     slave_node) {
		master = link->master;

		if (!master->set_performance_state)
			continue;

		genpd_lock_nested(master, depth + 1);

		master_state = link->prev_performance_state;
		link->performance_state = master_state;

		master_state = _genpd_reeval_performance_state(master,
						master_state);
		if (_genpd_set_performance_state(master, master_state, depth + 1)) {
			pr_err("%s: Failed to roll back to %d performance state\n",
			       master->name, master_state);
		}

		genpd_unlock(master);
	}

	return ret;
361 362
}

363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380
/**
 * dev_pm_genpd_set_performance_state- Set performance state of device's power
 * domain.
 *
 * @dev: Device for which the performance-state needs to be set.
 * @state: Target performance state of the device. This can be set as 0 when the
 *	   device doesn't have any performance state constraints left (And so
 *	   the device wouldn't participate anymore to find the target
 *	   performance state of the genpd).
 *
 * It is assumed that the users guarantee that the genpd wouldn't be detached
 * while this routine is getting called.
 *
 * Returns 0 on success and negative error values on failures.
 */
int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
{
	struct generic_pm_domain *genpd;
381
	struct generic_pm_domain_data *gpd_data;
382
	unsigned int prev;
383
	int ret;
384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -ENODEV;

	if (unlikely(!genpd->set_performance_state))
		return -EINVAL;

	if (unlikely(!dev->power.subsys_data ||
		     !dev->power.subsys_data->domain_data)) {
		WARN_ON(1);
		return -EINVAL;
	}

	genpd_lock(genpd);

	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
	prev = gpd_data->performance_state;
	gpd_data->performance_state = state;

404
	state = _genpd_reeval_performance_state(genpd, state);
405
	ret = _genpd_set_performance_state(genpd, state, 0);
406
	if (ret)
407
		gpd_data->performance_state = prev;
408 409 410 411 412 413 414

	genpd_unlock(genpd);

	return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);

415
static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
416
{
417
	unsigned int state_idx = genpd->state_idx;
418 419 420 421 422 423 424
	ktime_t time_start;
	s64 elapsed_ns;
	int ret;

	if (!genpd->power_on)
		return 0;

425 426 427
	if (!timed)
		return genpd->power_on(genpd);

428 429 430 431 432 433
	time_start = ktime_get();
	ret = genpd->power_on(genpd);
	if (ret)
		return ret;

	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
434
	if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
435 436
		return ret;

437
	genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
438
	genpd->max_off_time_changed = true;
439 440
	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
		 genpd->name, "on", elapsed_ns);
441 442 443 444

	return ret;
}

445
static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
446
{
447
	unsigned int state_idx = genpd->state_idx;
448 449 450 451 452 453 454
	ktime_t time_start;
	s64 elapsed_ns;
	int ret;

	if (!genpd->power_off)
		return 0;

455 456 457
	if (!timed)
		return genpd->power_off(genpd);

458 459 460 461 462 463
	time_start = ktime_get();
	ret = genpd->power_off(genpd);
	if (ret == -EBUSY)
		return ret;

	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
464
	if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
465 466
		return ret;

467
	genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
468
	genpd->max_off_time_changed = true;
469 470
	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
		 genpd->name, "off", elapsed_ns);
471 472 473 474

	return ret;
}

475
/**
476
 * genpd_queue_power_off_work - Queue up the execution of genpd_power_off().
477
 * @genpd: PM domain to power off.
478
 *
479
 * Queue up the execution of genpd_power_off() unless it's already been done
480 481 482 483 484 485 486
 * before.
 */
static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
{
	queue_work(pm_wq, &genpd->power_off_work);
}

487 488 489
/**
 * genpd_power_off - Remove power from a given PM domain.
 * @genpd: PM domain to power down.
490 491 492 493
 * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the
 * RPM status of the releated device is in an intermediate state, not yet turned
 * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not
 * be RPM_SUSPENDED, while it tries to power off the PM domain.
494 495 496 497
 *
 * If all of the @genpd's devices have been suspended and all of its subdomains
 * have been powered down, remove power from @genpd.
 */
498 499
static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
			   unsigned int depth)
500 501 502 503 504 505 506 507 508 509
{
	struct pm_domain_data *pdd;
	struct gpd_link *link;
	unsigned int not_suspended = 0;

	/*
	 * Do not try to power off the domain in the following situations:
	 * (1) The domain is already in the "power off" state.
	 * (2) System suspend is in progress.
	 */
510
	if (!genpd_status_on(genpd) || genpd->prepared_count > 0)
511 512
		return 0;

513 514 515 516 517 518
	/*
	 * Abort power off for the PM domain in the following situations:
	 * (1) The domain is configured as always on.
	 * (2) When the domain has a subdomain being powered on.
	 */
	if (genpd_is_always_on(genpd) || atomic_read(&genpd->sd_count) > 0)
519 520 521 522 523
		return -EBUSY;

	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
		enum pm_qos_flags_status stat;

524
		stat = dev_pm_qos_flags(pdd->dev, PM_QOS_FLAG_NO_POWER_OFF);
525 526 527 528 529 530 531 532 533 534 535 536
		if (stat > PM_QOS_FLAGS_NONE)
			return -EBUSY;

		/*
		 * Do not allow PM domain to be powered off, when an IRQ safe
		 * device is part of a non-IRQ safe domain.
		 */
		if (!pm_runtime_suspended(pdd->dev) ||
			irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd))
			not_suspended++;
	}

537
	if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
538 539 540 541 542 543 544
		return -EBUSY;

	if (genpd->gov && genpd->gov->power_down_ok) {
		if (!genpd->gov->power_down_ok(&genpd->domain))
			return -EAGAIN;
	}

545 546 547 548
	/* Default to shallowest state. */
	if (!genpd->gov)
		genpd->state_idx = 0;

549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568
	if (genpd->power_off) {
		int ret;

		if (atomic_read(&genpd->sd_count) > 0)
			return -EBUSY;

		/*
		 * If sd_count > 0 at this point, one of the subdomains hasn't
		 * managed to call genpd_power_on() for the master yet after
		 * incrementing it.  In that case genpd_power_on() will wait
		 * for us to drop the lock, so we can call .power_off() and let
		 * the genpd_power_on() restore power for us (this shouldn't
		 * happen very often).
		 */
		ret = _genpd_power_off(genpd, true);
		if (ret)
			return ret;
	}

	genpd->status = GPD_STATE_POWER_OFF;
569
	genpd_update_accounting(genpd);
570 571 572

	list_for_each_entry(link, &genpd->slave_links, slave_node) {
		genpd_sd_counter_dec(link->master);
573 574 575
		genpd_lock_nested(link->master, depth + 1);
		genpd_power_off(link->master, false, depth + 1);
		genpd_unlock(link->master);
576 577 578 579 580
	}

	return 0;
}

581
/**
582
 * genpd_power_on - Restore power to a given PM domain and its masters.
583
 * @genpd: PM domain to power up.
584
 * @depth: nesting count for lockdep.
585
 *
586
 * Restore power to @genpd and all of its masters so that it is possible to
587 588
 * resume a device belonging to it.
 */
589
static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
590
{
591
	struct gpd_link *link;
592 593
	int ret = 0;

594
	if (genpd_status_on(genpd))
595
		return 0;
596

597 598 599 600 601 602
	/*
	 * The list is guaranteed not to change while the loop below is being
	 * executed, unless one of the masters' .power_on() callbacks fiddles
	 * with it.
	 */
	list_for_each_entry(link, &genpd->slave_links, slave_node) {
603 604 605 606
		struct generic_pm_domain *master = link->master;

		genpd_sd_counter_inc(master);

L
Lina Iyer 已提交
607
		genpd_lock_nested(master, depth + 1);
608
		ret = genpd_power_on(master, depth + 1);
L
Lina Iyer 已提交
609
		genpd_unlock(master);
610

611
		if (ret) {
612
			genpd_sd_counter_dec(master);
613
			goto err;
614
		}
615 616
	}

617
	ret = _genpd_power_on(genpd, true);
618 619
	if (ret)
		goto err;
620

621
	genpd->status = GPD_STATE_ACTIVE;
622 623
	genpd_update_accounting(genpd);

624
	return 0;
625 626

 err:
627 628 629
	list_for_each_entry_continue_reverse(link,
					&genpd->slave_links,
					slave_node) {
630
		genpd_sd_counter_dec(link->master);
631 632 633
		genpd_lock_nested(link->master, depth + 1);
		genpd_power_off(link->master, false, depth + 1);
		genpd_unlock(link->master);
634
	}
635

636 637 638
	return ret;
}

639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655
static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
				     unsigned long val, void *ptr)
{
	struct generic_pm_domain_data *gpd_data;
	struct device *dev;

	gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
	dev = gpd_data->base.dev;

	for (;;) {
		struct generic_pm_domain *genpd;
		struct pm_domain_data *pdd;

		spin_lock_irq(&dev->power.lock);

		pdd = dev->power.subsys_data ?
				dev->power.subsys_data->domain_data : NULL;
656
		if (pdd) {
657 658 659 660 661 662 663 664 665
			to_gpd_data(pdd)->td.constraint_changed = true;
			genpd = dev_to_genpd(dev);
		} else {
			genpd = ERR_PTR(-ENODATA);
		}

		spin_unlock_irq(&dev->power.lock);

		if (!IS_ERR(genpd)) {
L
Lina Iyer 已提交
666
			genpd_lock(genpd);
667
			genpd->max_off_time_changed = true;
L
Lina Iyer 已提交
668
			genpd_unlock(genpd);
669 670 671 672 673 674 675 676 677 678
		}

		dev = dev->parent;
		if (!dev || dev->power.ignore_children)
			break;
	}

	return NOTIFY_DONE;
}

679 680 681 682 683 684 685 686 687 688
/**
 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
 * @work: Work structure used for scheduling the execution of this function.
 */
static void genpd_power_off_work_fn(struct work_struct *work)
{
	struct generic_pm_domain *genpd;

	genpd = container_of(work, struct generic_pm_domain, power_off_work);

L
Lina Iyer 已提交
689
	genpd_lock(genpd);
690
	genpd_power_off(genpd, false, 0);
L
Lina Iyer 已提交
691
	genpd_unlock(genpd);
692 693
}

694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739
/**
 * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks
 * @dev: Device to handle.
 */
static int __genpd_runtime_suspend(struct device *dev)
{
	int (*cb)(struct device *__dev);

	if (dev->type && dev->type->pm)
		cb = dev->type->pm->runtime_suspend;
	else if (dev->class && dev->class->pm)
		cb = dev->class->pm->runtime_suspend;
	else if (dev->bus && dev->bus->pm)
		cb = dev->bus->pm->runtime_suspend;
	else
		cb = NULL;

	if (!cb && dev->driver && dev->driver->pm)
		cb = dev->driver->pm->runtime_suspend;

	return cb ? cb(dev) : 0;
}

/**
 * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks
 * @dev: Device to handle.
 */
static int __genpd_runtime_resume(struct device *dev)
{
	int (*cb)(struct device *__dev);

	if (dev->type && dev->type->pm)
		cb = dev->type->pm->runtime_resume;
	else if (dev->class && dev->class->pm)
		cb = dev->class->pm->runtime_resume;
	else if (dev->bus && dev->bus->pm)
		cb = dev->bus->pm->runtime_resume;
	else
		cb = NULL;

	if (!cb && dev->driver && dev->driver->pm)
		cb = dev->driver->pm->runtime_resume;

	return cb ? cb(dev) : 0;
}

740
/**
741
 * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
742 743 744 745 746 747
 * @dev: Device to suspend.
 *
 * Carry out a runtime suspend of a device under the assumption that its
 * pm_domain field points to the domain member of an object of type
 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
 */
748
static int genpd_runtime_suspend(struct device *dev)
749 750
{
	struct generic_pm_domain *genpd;
751
	bool (*suspend_ok)(struct device *__dev);
752
	struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
753
	bool runtime_pm = pm_runtime_enabled(dev);
754 755
	ktime_t time_start;
	s64 elapsed_ns;
756
	int ret;
757 758 759

	dev_dbg(dev, "%s()\n", __func__);

760 761
	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
762 763
		return -EINVAL;

764 765 766 767 768 769
	/*
	 * A runtime PM centric subsystem/driver may re-use the runtime PM
	 * callbacks for other purposes than runtime PM. In those scenarios
	 * runtime PM is disabled. Under these circumstances, we shall skip
	 * validating/measuring the PM QoS latency.
	 */
770 771
	suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
	if (runtime_pm && suspend_ok && !suspend_ok(dev))
772 773
		return -EBUSY;

774
	/* Measure suspend latency. */
775
	time_start = 0;
776 777
	if (runtime_pm)
		time_start = ktime_get();
778

779
	ret = __genpd_runtime_suspend(dev);
780 781
	if (ret)
		return ret;
782

783
	ret = genpd_stop_dev(genpd, dev);
784
	if (ret) {
785
		__genpd_runtime_resume(dev);
786 787 788
		return ret;
	}

789
	/* Update suspend latency value if the measured time exceeds it. */
790 791 792 793 794 795 796 797 798
	if (runtime_pm) {
		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
		if (elapsed_ns > td->suspend_latency_ns) {
			td->suspend_latency_ns = elapsed_ns;
			dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
				elapsed_ns);
			genpd->max_off_time_changed = true;
			td->constraint_changed = true;
		}
799 800
	}

801
	/*
802 803
	 * If power.irq_safe is set, this routine may be run with
	 * IRQs disabled, so suspend only if the PM domain also is irq_safe.
804
	 */
805
	if (irq_safe_dev_in_no_sleep_domain(dev, genpd))
806 807
		return 0;

L
Lina Iyer 已提交
808
	genpd_lock(genpd);
809
	genpd_power_off(genpd, true, 0);
L
Lina Iyer 已提交
810
	genpd_unlock(genpd);
811 812 813 814 815

	return 0;
}

/**
816
 * genpd_runtime_resume - Resume a device belonging to I/O PM domain.
817 818 819 820 821 822
 * @dev: Device to resume.
 *
 * Carry out a runtime resume of a device under the assumption that its
 * pm_domain field points to the domain member of an object of type
 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
 */
823
static int genpd_runtime_resume(struct device *dev)
824 825
{
	struct generic_pm_domain *genpd;
826
	struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
827
	bool runtime_pm = pm_runtime_enabled(dev);
828 829
	ktime_t time_start;
	s64 elapsed_ns;
830
	int ret;
831
	bool timed = true;
832 833 834

	dev_dbg(dev, "%s()\n", __func__);

835 836
	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
837 838
		return -EINVAL;

839 840 841 842 843
	/*
	 * As we don't power off a non IRQ safe domain, which holds
	 * an IRQ safe device, we don't need to restore power to it.
	 */
	if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) {
844 845 846
		timed = false;
		goto out;
	}
847

L
Lina Iyer 已提交
848
	genpd_lock(genpd);
849
	ret = genpd_power_on(genpd, 0);
L
Lina Iyer 已提交
850
	genpd_unlock(genpd);
851

852 853
	if (ret)
		return ret;
854

855
 out:
856
	/* Measure resume latency. */
857
	time_start = 0;
858
	if (timed && runtime_pm)
859 860
		time_start = ktime_get();

861 862 863 864
	ret = genpd_start_dev(genpd, dev);
	if (ret)
		goto err_poweroff;

865
	ret = __genpd_runtime_resume(dev);
866 867
	if (ret)
		goto err_stop;
868 869

	/* Update resume latency value if the measured time exceeds it. */
870
	if (timed && runtime_pm) {
871 872 873 874 875 876 877 878 879
		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
		if (elapsed_ns > td->resume_latency_ns) {
			td->resume_latency_ns = elapsed_ns;
			dev_dbg(dev, "resume latency exceeded, %lld ns\n",
				elapsed_ns);
			genpd->max_off_time_changed = true;
			td->constraint_changed = true;
		}
	}
880

881
	return 0;
882 883 884 885

err_stop:
	genpd_stop_dev(genpd, dev);
err_poweroff:
886 887
	if (!pm_runtime_is_irq_safe(dev) ||
		(pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) {
L
Lina Iyer 已提交
888
		genpd_lock(genpd);
889
		genpd_power_off(genpd, true, 0);
L
Lina Iyer 已提交
890
		genpd_unlock(genpd);
891 892 893
	}

	return ret;
894 895
}

896 897 898 899 900 901 902 903
static bool pd_ignore_unused;
static int __init pd_ignore_unused_setup(char *__unused)
{
	pd_ignore_unused = true;
	return 1;
}
__setup("pd_ignore_unused", pd_ignore_unused_setup);

904
/**
905
 * genpd_power_off_unused - Power off all PM domains with no devices in use.
906
 */
907
static int __init genpd_power_off_unused(void)
908 909 910
{
	struct generic_pm_domain *genpd;

911 912
	if (pd_ignore_unused) {
		pr_warn("genpd: Not disabling unused power domains\n");
913
		return 0;
914 915
	}

916 917 918 919 920 921 922
	mutex_lock(&gpd_list_lock);

	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
		genpd_queue_power_off_work(genpd);

	mutex_unlock(&gpd_list_lock);

923 924
	return 0;
}
925
late_initcall(genpd_power_off_unused);
926

927
#if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_GENERIC_DOMAINS_OF)
928

929
static bool genpd_present(const struct generic_pm_domain *genpd)
930
{
931
	const struct generic_pm_domain *gpd;
932 933 934 935 936 937 938 939 940 941 942

	if (IS_ERR_OR_NULL(genpd))
		return false;

	list_for_each_entry(gpd, &gpd_list, gpd_list_node)
		if (gpd == genpd)
			return true;

	return false;
}

943 944 945 946
#endif

#ifdef CONFIG_PM_SLEEP

947
/**
948
 * genpd_sync_power_off - Synchronously power off a PM domain and its masters.
949
 * @genpd: PM domain to power off, if possible.
950 951
 * @use_lock: use the lock.
 * @depth: nesting count for lockdep.
952 953
 *
 * Check if the given PM domain can be powered off (during system suspend or
954
 * hibernation) and do that if so.  Also, in that case propagate to its masters.
955
 *
956
 * This function is only called in "noirq" and "syscore" stages of system power
957 958
 * transitions. The "noirq" callbacks may be executed asynchronously, thus in
 * these cases the lock must be held.
959
 */
960 961
static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
				 unsigned int depth)
962
{
963
	struct gpd_link *link;
964

965
	if (!genpd_status_on(genpd) || genpd_is_always_on(genpd))
966 967
		return;

968 969
	if (genpd->suspended_count != genpd->device_count
	    || atomic_read(&genpd->sd_count) > 0)
970 971
		return;

972 973
	/* Choose the deepest state when suspending */
	genpd->state_idx = genpd->state_count - 1;
974 975
	if (_genpd_power_off(genpd, false))
		return;
976

977
	genpd->status = GPD_STATE_POWER_OFF;
978 979 980

	list_for_each_entry(link, &genpd->slave_links, slave_node) {
		genpd_sd_counter_dec(link->master);
981 982 983 984 985 986 987 988

		if (use_lock)
			genpd_lock_nested(link->master, depth + 1);

		genpd_sync_power_off(link->master, use_lock, depth + 1);

		if (use_lock)
			genpd_unlock(link->master);
989 990 991
	}
}

992
/**
993
 * genpd_sync_power_on - Synchronously power on a PM domain and its masters.
994
 * @genpd: PM domain to power on.
995 996
 * @use_lock: use the lock.
 * @depth: nesting count for lockdep.
997
 *
998
 * This function is only called in "noirq" and "syscore" stages of system power
999 1000
 * transitions. The "noirq" callbacks may be executed asynchronously, thus in
 * these cases the lock must be held.
1001
 */
1002 1003
static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
				unsigned int depth)
1004 1005 1006
{
	struct gpd_link *link;

1007
	if (genpd_status_on(genpd))
1008 1009 1010 1011
		return;

	list_for_each_entry(link, &genpd->slave_links, slave_node) {
		genpd_sd_counter_inc(link->master);
1012 1013 1014 1015 1016 1017 1018 1019

		if (use_lock)
			genpd_lock_nested(link->master, depth + 1);

		genpd_sync_power_on(link->master, use_lock, depth + 1);

		if (use_lock)
			genpd_unlock(link->master);
1020 1021
	}

1022
	_genpd_power_on(genpd, false);
1023 1024 1025 1026

	genpd->status = GPD_STATE_ACTIVE;
}

1027 1028 1029 1030 1031 1032
/**
 * resume_needed - Check whether to resume a device before system suspend.
 * @dev: Device to check.
 * @genpd: PM domain the device belongs to.
 *
 * There are two cases in which a device that can wake up the system from sleep
1033
 * states should be resumed by genpd_prepare(): (1) if the device is enabled
1034 1035 1036 1037 1038 1039 1040 1041 1042
 * to wake up the system and it has to remain active for this purpose while the
 * system is in the sleep state and (2) if the device is not enabled to wake up
 * the system from sleep states and it generally doesn't generate wakeup signals
 * by itself (those signals are generated on its behalf by other parts of the
 * system).  In the latter case it may be necessary to reconfigure the device's
 * wakeup settings during system suspend, because it may have been set up to
 * signal remote wakeup from the system's working state as needed by runtime PM.
 * Return 'true' in either of the above cases.
 */
1043 1044
static bool resume_needed(struct device *dev,
			  const struct generic_pm_domain *genpd)
1045 1046 1047 1048 1049 1050
{
	bool active_wakeup;

	if (!device_can_wakeup(dev))
		return false;

1051
	active_wakeup = genpd_is_active_wakeup(genpd);
1052 1053 1054
	return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
}

1055
/**
1056
 * genpd_prepare - Start power transition of a device in a PM domain.
1057 1058 1059 1060 1061 1062 1063
 * @dev: Device to start the transition of.
 *
 * Start a power transition of a device (during a system-wide power transition)
 * under the assumption that its pm_domain field points to the domain member of
 * an object of type struct generic_pm_domain representing a PM domain
 * consisting of I/O devices.
 */
1064
static int genpd_prepare(struct device *dev)
1065 1066
{
	struct generic_pm_domain *genpd;
1067
	int ret;
1068 1069 1070 1071 1072 1073 1074

	dev_dbg(dev, "%s()\n", __func__);

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;

1075 1076 1077 1078 1079
	/*
	 * If a wakeup request is pending for the device, it should be woken up
	 * at this point and a system wakeup event should be reported if it's
	 * set up to wake up the system from sleep states.
	 */
1080 1081 1082
	if (resume_needed(dev, genpd))
		pm_runtime_resume(dev);

L
Lina Iyer 已提交
1083
	genpd_lock(genpd);
1084

1085
	if (genpd->prepared_count++ == 0)
1086
		genpd->suspended_count = 0;
1087

L
Lina Iyer 已提交
1088
	genpd_unlock(genpd);
1089

1090
	ret = pm_generic_prepare(dev);
1091
	if (ret < 0) {
L
Lina Iyer 已提交
1092
		genpd_lock(genpd);
1093

1094
		genpd->prepared_count--;
1095

L
Lina Iyer 已提交
1096
		genpd_unlock(genpd);
1097
	}
1098

1099 1100
	/* Never return 1, as genpd don't cope with the direct_complete path. */
	return ret >= 0 ? 0 : ret;
1101 1102
}

1103
/**
1104 1105
 * genpd_finish_suspend - Completion of suspend or hibernation of device in an
 *   I/O pm domain.
1106
 * @dev: Device to suspend.
1107
 * @poweroff: Specifies if this is a poweroff_noirq or suspend_noirq callback.
1108 1109 1110 1111
 *
 * Stop the device and remove power from the domain if all devices in it have
 * been stopped.
 */
1112
static int genpd_finish_suspend(struct device *dev, bool poweroff)
1113 1114
{
	struct generic_pm_domain *genpd;
1115
	int ret = 0;
1116 1117 1118 1119

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;
1120

1121 1122 1123 1124 1125 1126 1127
	if (poweroff)
		ret = pm_generic_poweroff_noirq(dev);
	else
		ret = pm_generic_suspend_noirq(dev);
	if (ret)
		return ret;

1128 1129 1130
	if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
		return 0;

1131 1132 1133
	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
	    !pm_runtime_status_suspended(dev)) {
		ret = genpd_stop_dev(genpd, dev);
1134 1135 1136 1137 1138
		if (ret) {
			if (poweroff)
				pm_generic_restore_noirq(dev);
			else
				pm_generic_resume_noirq(dev);
1139
			return ret;
1140
		}
1141 1142
	}

1143
	genpd_lock(genpd);
1144
	genpd->suspended_count++;
1145 1146
	genpd_sync_power_off(genpd, true, 0);
	genpd_unlock(genpd);
1147 1148 1149 1150

	return 0;
}

1151
/**
1152
 * genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
1153 1154 1155 1156 1157
 * @dev: Device to suspend.
 *
 * Stop the device and remove power from the domain if all devices in it have
 * been stopped.
 */
1158
static int genpd_suspend_noirq(struct device *dev)
1159 1160 1161 1162 1163 1164
{
	dev_dbg(dev, "%s()\n", __func__);

	return genpd_finish_suspend(dev, false);
}

1165
/**
1166
 * genpd_resume_noirq - Start of resume of device in an I/O PM domain.
1167 1168
 * @dev: Device to resume.
 *
1169
 * Restore power to the device's PM domain, if necessary, and start the device.
1170
 */
1171
static int genpd_resume_noirq(struct device *dev)
1172 1173
{
	struct generic_pm_domain *genpd;
1174
	int ret;
1175 1176 1177 1178 1179 1180 1181

	dev_dbg(dev, "%s()\n", __func__);

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;

1182
	if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
1183
		return pm_generic_resume_noirq(dev);
1184

1185 1186
	genpd_lock(genpd);
	genpd_sync_power_on(genpd, true, 0);
1187
	genpd->suspended_count--;
1188
	genpd_unlock(genpd);
1189

1190 1191 1192
	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
	    !pm_runtime_status_suspended(dev)) {
		ret = genpd_start_dev(genpd, dev);
1193 1194 1195
		if (ret)
			return ret;
	}
1196

1197
	return pm_generic_resume_noirq(dev);
1198 1199
}

1200
/**
1201
 * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
1202 1203 1204 1205 1206 1207 1208
 * @dev: Device to freeze.
 *
 * Carry out a late freeze of a device under the assumption that its
 * pm_domain field points to the domain member of an object of type
 * struct generic_pm_domain representing a power domain consisting of I/O
 * devices.
 */
1209
static int genpd_freeze_noirq(struct device *dev)
1210
{
1211
	const struct generic_pm_domain *genpd;
1212
	int ret = 0;
1213 1214 1215 1216 1217 1218 1219

	dev_dbg(dev, "%s()\n", __func__);

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;

1220 1221 1222 1223
	ret = pm_generic_freeze_noirq(dev);
	if (ret)
		return ret;

1224 1225 1226
	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
	    !pm_runtime_status_suspended(dev))
		ret = genpd_stop_dev(genpd, dev);
1227 1228

	return ret;
1229
}
1230

1231
/**
1232
 * genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
1233 1234 1235 1236 1237
 * @dev: Device to thaw.
 *
 * Start the device, unless power has been removed from the domain already
 * before the system transition.
 */
1238
static int genpd_thaw_noirq(struct device *dev)
1239
{
1240
	const struct generic_pm_domain *genpd;
1241
	int ret = 0;
1242

1243
	dev_dbg(dev, "%s()\n", __func__);
1244

1245 1246 1247 1248
	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;

1249 1250 1251
	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
	    !pm_runtime_status_suspended(dev)) {
		ret = genpd_start_dev(genpd, dev);
1252 1253 1254
		if (ret)
			return ret;
	}
1255

1256 1257 1258 1259
	return pm_generic_thaw_noirq(dev);
}

/**
1260
 * genpd_poweroff_noirq - Completion of hibernation of device in an
1261 1262 1263 1264 1265 1266
 *   I/O PM domain.
 * @dev: Device to poweroff.
 *
 * Stop the device and remove power from the domain if all devices in it have
 * been stopped.
 */
1267
static int genpd_poweroff_noirq(struct device *dev)
1268 1269 1270 1271
{
	dev_dbg(dev, "%s()\n", __func__);

	return genpd_finish_suspend(dev, true);
1272 1273 1274
}

/**
1275
 * genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1276 1277
 * @dev: Device to resume.
 *
1278 1279
 * Make sure the domain will be in the same power state as before the
 * hibernation the system is resuming from and start the device if necessary.
1280
 */
1281
static int genpd_restore_noirq(struct device *dev)
1282 1283
{
	struct generic_pm_domain *genpd;
1284
	int ret = 0;
1285 1286 1287 1288 1289 1290 1291 1292

	dev_dbg(dev, "%s()\n", __func__);

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;

	/*
1293 1294
	 * At this point suspended_count == 0 means we are being run for the
	 * first time for the given domain in the present cycle.
1295
	 */
1296
	genpd_lock(genpd);
1297
	if (genpd->suspended_count++ == 0)
1298
		/*
1299
		 * The boot kernel might put the domain into arbitrary state,
1300
		 * so make it appear as powered off to genpd_sync_power_on(),
1301
		 * so that it tries to power it on in case it was really off.
1302
		 */
1303
		genpd->status = GPD_STATE_POWER_OFF;
1304

1305 1306
	genpd_sync_power_on(genpd, true, 0);
	genpd_unlock(genpd);
1307

1308 1309 1310
	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
	    !pm_runtime_status_suspended(dev)) {
		ret = genpd_start_dev(genpd, dev);
1311 1312 1313
		if (ret)
			return ret;
	}
1314

1315
	return pm_generic_restore_noirq(dev);
1316 1317 1318
}

/**
1319
 * genpd_complete - Complete power transition of a device in a power domain.
1320 1321 1322 1323 1324 1325 1326
 * @dev: Device to complete the transition of.
 *
 * Complete a power transition of a device (during a system-wide power
 * transition) under the assumption that its pm_domain field points to the
 * domain member of an object of type struct generic_pm_domain representing
 * a power domain consisting of I/O devices.
 */
1327
static void genpd_complete(struct device *dev)
1328 1329 1330 1331 1332 1333 1334 1335 1336
{
	struct generic_pm_domain *genpd;

	dev_dbg(dev, "%s()\n", __func__);

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return;

1337 1338
	pm_generic_complete(dev);

L
Lina Iyer 已提交
1339
	genpd_lock(genpd);
1340

1341
	genpd->prepared_count--;
1342 1343
	if (!genpd->prepared_count)
		genpd_queue_power_off_work(genpd);
1344

L
Lina Iyer 已提交
1345
	genpd_unlock(genpd);
1346 1347
}

1348
/**
1349
 * genpd_syscore_switch - Switch power during system core suspend or resume.
1350 1351 1352 1353 1354
 * @dev: Device that normally is marked as "always on" to switch power for.
 *
 * This routine may only be called during the system core (syscore) suspend or
 * resume phase for devices whose "always on" flags are set.
 */
1355
static void genpd_syscore_switch(struct device *dev, bool suspend)
1356 1357 1358
{
	struct generic_pm_domain *genpd;

1359
	genpd = dev_to_genpd(dev);
1360
	if (!genpd_present(genpd))
1361 1362 1363 1364
		return;

	if (suspend) {
		genpd->suspended_count++;
1365
		genpd_sync_power_off(genpd, false, 0);
1366
	} else {
1367
		genpd_sync_power_on(genpd, false, 0);
1368 1369 1370
		genpd->suspended_count--;
	}
}
1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382

void pm_genpd_syscore_poweroff(struct device *dev)
{
	genpd_syscore_switch(dev, true);
}
EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff);

void pm_genpd_syscore_poweron(struct device *dev)
{
	genpd_syscore_switch(dev, false);
}
EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
1383

1384
#else /* !CONFIG_PM_SLEEP */
1385

1386 1387 1388 1389 1390 1391 1392 1393
#define genpd_prepare		NULL
#define genpd_suspend_noirq	NULL
#define genpd_resume_noirq	NULL
#define genpd_freeze_noirq	NULL
#define genpd_thaw_noirq	NULL
#define genpd_poweroff_noirq	NULL
#define genpd_restore_noirq	NULL
#define genpd_complete		NULL
1394 1395 1396

#endif /* CONFIG_PM_SLEEP */

1397 1398
static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
					struct gpd_timing_data *td)
1399 1400
{
	struct generic_pm_domain_data *gpd_data;
1401 1402 1403 1404 1405
	int ret;

	ret = dev_pm_get_subsys_data(dev);
	if (ret)
		return ERR_PTR(ret);
1406 1407

	gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1408 1409 1410 1411
	if (!gpd_data) {
		ret = -ENOMEM;
		goto err_put;
	}
1412

1413 1414 1415 1416 1417
	if (td)
		gpd_data->td = *td;

	gpd_data->base.dev = dev;
	gpd_data->td.constraint_changed = true;
1418
	gpd_data->td.effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431
	gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;

	spin_lock_irq(&dev->power.lock);

	if (dev->power.subsys_data->domain_data) {
		ret = -EINVAL;
		goto err_free;
	}

	dev->power.subsys_data->domain_data = &gpd_data->base;

	spin_unlock_irq(&dev->power.lock);

1432
	return gpd_data;
1433

1434 1435 1436
 err_free:
	spin_unlock_irq(&dev->power.lock);
	kfree(gpd_data);
1437 1438 1439
 err_put:
	dev_pm_put_subsys_data(dev);
	return ERR_PTR(ret);
1440 1441
}

1442 1443
static void genpd_free_dev_data(struct device *dev,
				struct generic_pm_domain_data *gpd_data)
1444
{
1445 1446 1447 1448 1449 1450
	spin_lock_irq(&dev->power.lock);

	dev->power.subsys_data->domain_data = NULL;

	spin_unlock_irq(&dev->power.lock);

1451
	kfree(gpd_data);
1452
	dev_pm_put_subsys_data(dev);
1453 1454
}

1455 1456
static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
			    struct gpd_timing_data *td)
1457
{
1458
	struct generic_pm_domain_data *gpd_data;
1459
	int ret;
1460 1461 1462 1463 1464 1465

	dev_dbg(dev, "%s()\n", __func__);

	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
		return -EINVAL;

1466
	gpd_data = genpd_alloc_dev_data(dev, td);
1467 1468
	if (IS_ERR(gpd_data))
		return PTR_ERR(gpd_data);
1469

L
Lina Iyer 已提交
1470
	genpd_lock(genpd);
1471

1472 1473 1474
	ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
	if (ret)
		goto out;
1475

1476 1477
	dev_pm_domain_set(dev, &genpd->domain);

1478 1479 1480
	genpd->device_count++;
	genpd->max_off_time_changed = true;

1481
	list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1482

1483
 out:
L
Lina Iyer 已提交
1484
	genpd_unlock(genpd);
1485

1486 1487 1488 1489
	if (ret)
		genpd_free_dev_data(dev, gpd_data);
	else
		dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1490

1491 1492
	return ret;
}
1493 1494

/**
1495
 * pm_genpd_add_device - Add a device to an I/O PM domain.
1496 1497 1498
 * @genpd: PM domain to add the device to.
 * @dev: Device to be added.
 */
1499
int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
1500 1501 1502 1503
{
	int ret;

	mutex_lock(&gpd_list_lock);
1504
	ret = genpd_add_device(genpd, dev, NULL);
1505 1506 1507 1508
	mutex_unlock(&gpd_list_lock);

	return ret;
}
1509
EXPORT_SYMBOL_GPL(pm_genpd_add_device);
1510

1511 1512
static int genpd_remove_device(struct generic_pm_domain *genpd,
			       struct device *dev)
1513
{
1514
	struct generic_pm_domain_data *gpd_data;
1515
	struct pm_domain_data *pdd;
1516
	int ret = 0;
1517 1518 1519

	dev_dbg(dev, "%s()\n", __func__);

1520 1521 1522 1523
	pdd = dev->power.subsys_data->domain_data;
	gpd_data = to_gpd_data(pdd);
	dev_pm_qos_remove_notifier(dev, &gpd_data->nb);

L
Lina Iyer 已提交
1524
	genpd_lock(genpd);
1525

1526 1527 1528 1529 1530
	if (genpd->prepared_count > 0) {
		ret = -EAGAIN;
		goto out;
	}

1531 1532 1533
	genpd->device_count--;
	genpd->max_off_time_changed = true;

1534
	if (genpd->detach_dev)
1535
		genpd->detach_dev(genpd, dev);
1536

1537 1538
	dev_pm_domain_set(dev, NULL);

1539
	list_del_init(&pdd->list_node);
1540

L
Lina Iyer 已提交
1541
	genpd_unlock(genpd);
1542

1543
	genpd_free_dev_data(dev, gpd_data);
1544

1545
	return 0;
1546

1547
 out:
L
Lina Iyer 已提交
1548
	genpd_unlock(genpd);
1549
	dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1550 1551 1552

	return ret;
}
1553 1554 1555 1556 1557

/**
 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
 * @dev: Device to be removed.
 */
1558
int pm_genpd_remove_device(struct device *dev)
1559
{
1560 1561 1562
	struct generic_pm_domain *genpd = genpd_lookup_dev(dev);

	if (!genpd)
1563 1564 1565 1566
		return -EINVAL;

	return genpd_remove_device(genpd, dev);
}
1567
EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
1568

1569 1570
static int genpd_add_subdomain(struct generic_pm_domain *genpd,
			       struct generic_pm_domain *subdomain)
1571
{
1572
	struct gpd_link *link, *itr;
1573 1574
	int ret = 0;

1575 1576
	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
	    || genpd == subdomain)
1577 1578
		return -EINVAL;

1579 1580 1581 1582 1583 1584
	/*
	 * If the domain can be powered on/off in an IRQ safe
	 * context, ensure that the subdomain can also be
	 * powered on/off in that context.
	 */
	if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
1585
		WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
1586 1587 1588 1589
				genpd->name, subdomain->name);
		return -EINVAL;
	}

1590 1591 1592 1593
	link = kzalloc(sizeof(*link), GFP_KERNEL);
	if (!link)
		return -ENOMEM;

L
Lina Iyer 已提交
1594 1595
	genpd_lock(subdomain);
	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1596

1597
	if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) {
1598 1599 1600 1601
		ret = -EINVAL;
		goto out;
	}

1602 1603
	list_for_each_entry(itr, &genpd->master_links, master_node) {
		if (itr->slave == subdomain && itr->master == genpd) {
1604 1605 1606 1607 1608
			ret = -EINVAL;
			goto out;
		}
	}

1609 1610
	link->master = genpd;
	list_add_tail(&link->master_node, &genpd->master_links);
1611 1612
	link->slave = subdomain;
	list_add_tail(&link->slave_node, &subdomain->slave_links);
1613
	if (genpd_status_on(subdomain))
1614
		genpd_sd_counter_inc(genpd);
1615 1616

 out:
L
Lina Iyer 已提交
1617 1618
	genpd_unlock(genpd);
	genpd_unlock(subdomain);
1619 1620
	if (ret)
		kfree(link);
1621 1622
	return ret;
}
1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639

/**
 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
 * @genpd: Master PM domain to add the subdomain to.
 * @subdomain: Subdomain to be added.
 */
int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
			   struct generic_pm_domain *subdomain)
{
	int ret;

	mutex_lock(&gpd_list_lock);
	ret = genpd_add_subdomain(genpd, subdomain);
	mutex_unlock(&gpd_list_lock);

	return ret;
}
1640
EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
1641 1642 1643 1644

/**
 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
 * @genpd: Master PM domain to remove the subdomain from.
1645
 * @subdomain: Subdomain to be removed.
1646 1647
 */
int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1648
			      struct generic_pm_domain *subdomain)
1649
{
1650
	struct gpd_link *l, *link;
1651 1652
	int ret = -EINVAL;

1653
	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1654 1655
		return -EINVAL;

L
Lina Iyer 已提交
1656 1657
	genpd_lock(subdomain);
	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1658

1659
	if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
1660 1661 1662 1663 1664 1665
		pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
			subdomain->name);
		ret = -EBUSY;
		goto out;
	}

1666
	list_for_each_entry_safe(link, l, &genpd->master_links, master_node) {
1667
		if (link->slave != subdomain)
1668 1669
			continue;

1670 1671 1672
		list_del(&link->master_node);
		list_del(&link->slave_node);
		kfree(link);
1673
		if (genpd_status_on(subdomain))
1674 1675 1676 1677 1678 1679
			genpd_sd_counter_dec(genpd);

		ret = 0;
		break;
	}

1680
out:
L
Lina Iyer 已提交
1681 1682
	genpd_unlock(genpd);
	genpd_unlock(subdomain);
1683 1684 1685

	return ret;
}
1686
EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
1687

1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702
static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
{
	struct genpd_power_state *state;

	state = kzalloc(sizeof(*state), GFP_KERNEL);
	if (!state)
		return -ENOMEM;

	genpd->states = state;
	genpd->state_count = 1;
	genpd->free = state;

	return 0;
}

1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713
static void genpd_lock_init(struct generic_pm_domain *genpd)
{
	if (genpd->flags & GENPD_FLAG_IRQ_SAFE) {
		spin_lock_init(&genpd->slock);
		genpd->lock_ops = &genpd_spin_ops;
	} else {
		mutex_init(&genpd->mlock);
		genpd->lock_ops = &genpd_mtx_ops;
	}
}

1714 1715 1716 1717 1718
/**
 * pm_genpd_init - Initialize a generic I/O PM domain object.
 * @genpd: PM domain object to initialize.
 * @gov: PM domain governor to associate with the domain (may be NULL).
 * @is_off: Initial value of the domain's power_is_off field.
1719 1720
 *
 * Returns 0 on successful initialization, else a negative error code.
1721
 */
1722 1723
int pm_genpd_init(struct generic_pm_domain *genpd,
		  struct dev_power_governor *gov, bool is_off)
1724
{
1725 1726
	int ret;

1727
	if (IS_ERR_OR_NULL(genpd))
1728
		return -EINVAL;
1729

1730 1731
	INIT_LIST_HEAD(&genpd->master_links);
	INIT_LIST_HEAD(&genpd->slave_links);
1732
	INIT_LIST_HEAD(&genpd->dev_list);
1733
	genpd_lock_init(genpd);
1734 1735
	genpd->gov = gov;
	INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1736
	atomic_set(&genpd->sd_count, 0);
1737
	genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1738
	genpd->device_count = 0;
1739
	genpd->max_off_time_ns = -1;
1740
	genpd->max_off_time_changed = true;
1741 1742
	genpd->provider = NULL;
	genpd->has_provider = false;
1743
	genpd->accounting_time = ktime_get();
1744 1745
	genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
	genpd->domain.ops.runtime_resume = genpd_runtime_resume;
1746 1747 1748 1749 1750 1751 1752 1753
	genpd->domain.ops.prepare = genpd_prepare;
	genpd->domain.ops.suspend_noirq = genpd_suspend_noirq;
	genpd->domain.ops.resume_noirq = genpd_resume_noirq;
	genpd->domain.ops.freeze_noirq = genpd_freeze_noirq;
	genpd->domain.ops.thaw_noirq = genpd_thaw_noirq;
	genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq;
	genpd->domain.ops.restore_noirq = genpd_restore_noirq;
	genpd->domain.ops.complete = genpd_complete;
1754 1755 1756 1757 1758 1759

	if (genpd->flags & GENPD_FLAG_PM_CLK) {
		genpd->dev_ops.stop = pm_clk_suspend;
		genpd->dev_ops.start = pm_clk_resume;
	}

1760 1761 1762 1763
	/* Always-on domains must be powered on at initialization. */
	if (genpd_is_always_on(genpd) && !genpd_status_on(genpd))
		return -EINVAL;

1764
	/* Use only one "off" state if there were no states declared */
1765 1766 1767 1768
	if (genpd->state_count == 0) {
		ret = genpd_set_default_power_state(genpd);
		if (ret)
			return ret;
1769
	} else if (!gov && genpd->state_count > 1) {
1770
		pr_warn("%s : no governor for states\n", genpd->name);
1771
	}
1772

1773 1774 1775
	device_initialize(&genpd->dev);
	dev_set_name(&genpd->dev, "%s", genpd->name);

1776 1777 1778
	mutex_lock(&gpd_list_lock);
	list_add(&genpd->gpd_list_node, &gpd_list);
	mutex_unlock(&gpd_list_lock);
1779 1780

	return 0;
1781
}
1782
EXPORT_SYMBOL_GPL(pm_genpd_init);
1783

1784 1785 1786 1787 1788 1789 1790
static int genpd_remove(struct generic_pm_domain *genpd)
{
	struct gpd_link *l, *link;

	if (IS_ERR_OR_NULL(genpd))
		return -EINVAL;

L
Lina Iyer 已提交
1791
	genpd_lock(genpd);
1792 1793

	if (genpd->has_provider) {
L
Lina Iyer 已提交
1794
		genpd_unlock(genpd);
1795 1796 1797 1798 1799
		pr_err("Provider present, unable to remove %s\n", genpd->name);
		return -EBUSY;
	}

	if (!list_empty(&genpd->master_links) || genpd->device_count) {
L
Lina Iyer 已提交
1800
		genpd_unlock(genpd);
1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811
		pr_err("%s: unable to remove %s\n", __func__, genpd->name);
		return -EBUSY;
	}

	list_for_each_entry_safe(link, l, &genpd->slave_links, slave_node) {
		list_del(&link->master_node);
		list_del(&link->slave_node);
		kfree(link);
	}

	list_del(&genpd->gpd_list_node);
L
Lina Iyer 已提交
1812
	genpd_unlock(genpd);
1813
	cancel_work_sync(&genpd->power_off_work);
1814
	kfree(genpd->free);
1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844
	pr_debug("%s: removed %s\n", __func__, genpd->name);

	return 0;
}

/**
 * pm_genpd_remove - Remove a generic I/O PM domain
 * @genpd: Pointer to PM domain that is to be removed.
 *
 * To remove the PM domain, this function:
 *  - Removes the PM domain as a subdomain to any parent domains,
 *    if it was added.
 *  - Removes the PM domain from the list of registered PM domains.
 *
 * The PM domain will only be removed, if the associated provider has
 * been removed, it is not a parent to any other PM domain and has no
 * devices associated with it.
 */
int pm_genpd_remove(struct generic_pm_domain *genpd)
{
	int ret;

	mutex_lock(&gpd_list_lock);
	ret = genpd_remove(genpd);
	mutex_unlock(&gpd_list_lock);

	return ret;
}
EXPORT_SYMBOL_GPL(pm_genpd_remove);

1845
#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
1846

1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857
/*
 * Device Tree based PM domain providers.
 *
 * The code below implements generic device tree based PM domain providers that
 * bind device tree nodes with generic PM domains registered in the system.
 *
 * Any driver that registers generic PM domains and needs to support binding of
 * devices to these domains is supposed to register a PM domain provider, which
 * maps a PM domain specifier retrieved from the device tree to a PM domain.
 *
 * Two simple mapping functions have been provided for convenience:
1858 1859
 *  - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
 *  - genpd_xlate_onecell() for mapping of multiple PM domains per node by
1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883
 *    index.
 */

/**
 * struct of_genpd_provider - PM domain provider registration structure
 * @link: Entry in global list of PM domain providers
 * @node: Pointer to device tree node of PM domain provider
 * @xlate: Provider-specific xlate callback mapping a set of specifier cells
 *         into a PM domain.
 * @data: context pointer to be passed into @xlate callback
 */
struct of_genpd_provider {
	struct list_head link;
	struct device_node *node;
	genpd_xlate_t xlate;
	void *data;
};

/* List of registered PM domain providers. */
static LIST_HEAD(of_genpd_providers);
/* Mutex to protect the list above. */
static DEFINE_MUTEX(of_genpd_mutex);

/**
1884
 * genpd_xlate_simple() - Xlate function for direct node-domain mapping
1885 1886 1887 1888 1889 1890 1891
 * @genpdspec: OF phandle args to map into a PM domain
 * @data: xlate function private data - pointer to struct generic_pm_domain
 *
 * This is a generic xlate function that can be used to model PM domains that
 * have their own device tree nodes. The private data of xlate function needs
 * to be a valid pointer to struct generic_pm_domain.
 */
1892
static struct generic_pm_domain *genpd_xlate_simple(
1893 1894 1895 1896 1897 1898 1899
					struct of_phandle_args *genpdspec,
					void *data)
{
	return data;
}

/**
1900
 * genpd_xlate_onecell() - Xlate function using a single index.
1901 1902 1903 1904 1905 1906 1907 1908
 * @genpdspec: OF phandle args to map into a PM domain
 * @data: xlate function private data - pointer to struct genpd_onecell_data
 *
 * This is a generic xlate function that can be used to model simple PM domain
 * controllers that have one device tree node and provide multiple PM domains.
 * A single cell is used as an index into an array of PM domains specified in
 * the genpd_onecell_data struct when registering the provider.
 */
1909
static struct generic_pm_domain *genpd_xlate_onecell(
1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930
					struct of_phandle_args *genpdspec,
					void *data)
{
	struct genpd_onecell_data *genpd_data = data;
	unsigned int idx = genpdspec->args[0];

	if (genpdspec->args_count != 1)
		return ERR_PTR(-EINVAL);

	if (idx >= genpd_data->num_domains) {
		pr_err("%s: invalid domain index %u\n", __func__, idx);
		return ERR_PTR(-EINVAL);
	}

	if (!genpd_data->domains[idx])
		return ERR_PTR(-ENOENT);

	return genpd_data->domains[idx];
}

/**
1931
 * genpd_add_provider() - Register a PM domain provider for a node
1932 1933 1934 1935
 * @np: Device node pointer associated with the PM domain provider.
 * @xlate: Callback for decoding PM domain from phandle arguments.
 * @data: Context pointer for @xlate callback.
 */
1936 1937
static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
			      void *data)
1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951
{
	struct of_genpd_provider *cp;

	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
	if (!cp)
		return -ENOMEM;

	cp->node = of_node_get(np);
	cp->data = data;
	cp->xlate = xlate;

	mutex_lock(&of_genpd_mutex);
	list_add(&cp->link, &of_genpd_providers);
	mutex_unlock(&of_genpd_mutex);
1952
	pr_debug("Added domain provider from %pOF\n", np);
1953 1954 1955

	return 0;
}
1956 1957 1958 1959 1960 1961 1962 1963 1964

/**
 * of_genpd_add_provider_simple() - Register a simple PM domain provider
 * @np: Device node pointer associated with the PM domain provider.
 * @genpd: Pointer to PM domain associated with the PM domain provider.
 */
int of_genpd_add_provider_simple(struct device_node *np,
				 struct generic_pm_domain *genpd)
{
1965 1966 1967 1968 1969 1970 1971
	int ret = -EINVAL;

	if (!np || !genpd)
		return -EINVAL;

	mutex_lock(&gpd_list_lock);

1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983
	if (!genpd_present(genpd))
		goto unlock;

	genpd->dev.of_node = np;

	/* Parse genpd OPP table */
	if (genpd->set_performance_state) {
		ret = dev_pm_opp_of_add_table(&genpd->dev);
		if (ret) {
			dev_err(&genpd->dev, "Failed to add OPP table: %d\n",
				ret);
			goto unlock;
1984
		}
1985 1986 1987 1988 1989 1990 1991

		/*
		 * Save table for faster processing while setting performance
		 * state.
		 */
		genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
		WARN_ON(!genpd->opp_table);
1992 1993
	}

1994 1995
	ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
	if (ret) {
1996 1997
		if (genpd->set_performance_state) {
			dev_pm_opp_put_opp_table(genpd->opp_table);
1998
			dev_pm_opp_of_remove_table(&genpd->dev);
1999
		}
2000 2001 2002 2003 2004 2005 2006 2007

		goto unlock;
	}

	genpd->provider = &np->fwnode;
	genpd->has_provider = true;

unlock:
2008 2009 2010
	mutex_unlock(&gpd_list_lock);

	return ret;
2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021
}
EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);

/**
 * of_genpd_add_provider_onecell() - Register a onecell PM domain provider
 * @np: Device node pointer associated with the PM domain provider.
 * @data: Pointer to the data associated with the PM domain provider.
 */
int of_genpd_add_provider_onecell(struct device_node *np,
				  struct genpd_onecell_data *data)
{
2022
	struct generic_pm_domain *genpd;
2023
	unsigned int i;
2024
	int ret = -EINVAL;
2025 2026 2027 2028 2029 2030

	if (!np || !data)
		return -EINVAL;

	mutex_lock(&gpd_list_lock);

2031 2032 2033
	if (!data->xlate)
		data->xlate = genpd_xlate_onecell;

2034
	for (i = 0; i < data->num_domains; i++) {
2035 2036 2037
		genpd = data->domains[i];

		if (!genpd)
2038
			continue;
2039
		if (!genpd_present(genpd))
2040 2041
			goto error;

2042 2043 2044 2045 2046 2047 2048 2049 2050 2051
		genpd->dev.of_node = np;

		/* Parse genpd OPP table */
		if (genpd->set_performance_state) {
			ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
			if (ret) {
				dev_err(&genpd->dev, "Failed to add OPP table for index %d: %d\n",
					i, ret);
				goto error;
			}
2052 2053 2054 2055 2056 2057 2058

			/*
			 * Save table for faster processing while setting
			 * performance state.
			 */
			genpd->opp_table = dev_pm_opp_get_opp_table_indexed(&genpd->dev, i);
			WARN_ON(!genpd->opp_table);
2059 2060 2061 2062
		}

		genpd->provider = &np->fwnode;
		genpd->has_provider = true;
2063 2064
	}

2065
	ret = genpd_add_provider(np, data->xlate, data);
2066 2067 2068 2069 2070 2071 2072 2073 2074
	if (ret < 0)
		goto error;

	mutex_unlock(&gpd_list_lock);

	return 0;

error:
	while (i--) {
2075 2076 2077
		genpd = data->domains[i];

		if (!genpd)
2078
			continue;
2079 2080 2081 2082

		genpd->provider = NULL;
		genpd->has_provider = false;

2083 2084
		if (genpd->set_performance_state) {
			dev_pm_opp_put_opp_table(genpd->opp_table);
2085
			dev_pm_opp_of_remove_table(&genpd->dev);
2086
		}
2087
	}
2088 2089 2090 2091

	mutex_unlock(&gpd_list_lock);

	return ret;
2092 2093
}
EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
2094 2095 2096 2097 2098 2099 2100

/**
 * of_genpd_del_provider() - Remove a previously registered PM domain provider
 * @np: Device node pointer associated with the PM domain provider
 */
void of_genpd_del_provider(struct device_node *np)
{
2101
	struct of_genpd_provider *cp, *tmp;
2102
	struct generic_pm_domain *gpd;
2103

2104
	mutex_lock(&gpd_list_lock);
2105
	mutex_lock(&of_genpd_mutex);
2106
	list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) {
2107
		if (cp->node == np) {
2108 2109 2110 2111 2112
			/*
			 * For each PM domain associated with the
			 * provider, set the 'has_provider' to false
			 * so that the PM domain can be safely removed.
			 */
2113 2114
			list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
				if (gpd->provider == &np->fwnode) {
2115 2116
					gpd->has_provider = false;

2117 2118 2119
					if (!gpd->set_performance_state)
						continue;

2120
					dev_pm_opp_put_opp_table(gpd->opp_table);
2121 2122 2123 2124
					dev_pm_opp_of_remove_table(&gpd->dev);
				}
			}

2125 2126 2127 2128 2129 2130 2131
			list_del(&cp->link);
			of_node_put(cp->node);
			kfree(cp);
			break;
		}
	}
	mutex_unlock(&of_genpd_mutex);
2132
	mutex_unlock(&gpd_list_lock);
2133 2134 2135 2136
}
EXPORT_SYMBOL_GPL(of_genpd_del_provider);

/**
2137
 * genpd_get_from_provider() - Look-up PM domain
2138 2139 2140 2141 2142 2143 2144 2145 2146
 * @genpdspec: OF phandle args to use for look-up
 *
 * Looks for a PM domain provider under the node specified by @genpdspec and if
 * found, uses xlate function of the provider to map phandle args to a PM
 * domain.
 *
 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
 * on failure.
 */
2147
static struct generic_pm_domain *genpd_get_from_provider(
2148 2149 2150 2151 2152
					struct of_phandle_args *genpdspec)
{
	struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
	struct of_genpd_provider *provider;

2153 2154 2155
	if (!genpdspec)
		return ERR_PTR(-EINVAL);

2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170
	mutex_lock(&of_genpd_mutex);

	/* Check if we have such a provider in our array */
	list_for_each_entry(provider, &of_genpd_providers, link) {
		if (provider->node == genpdspec->np)
			genpd = provider->xlate(genpdspec, provider->data);
		if (!IS_ERR(genpd))
			break;
	}

	mutex_unlock(&of_genpd_mutex);

	return genpd;
}

2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181
/**
 * of_genpd_add_device() - Add a device to an I/O PM domain
 * @genpdspec: OF phandle args to use for look-up PM domain
 * @dev: Device to be added.
 *
 * Looks-up an I/O PM domain based upon phandle args provided and adds
 * the device to the PM domain. Returns a negative error code on failure.
 */
int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev)
{
	struct generic_pm_domain *genpd;
2182 2183 2184
	int ret;

	mutex_lock(&gpd_list_lock);
2185

2186
	genpd = genpd_get_from_provider(genpdspec);
2187 2188 2189 2190 2191 2192
	if (IS_ERR(genpd)) {
		ret = PTR_ERR(genpd);
		goto out;
	}

	ret = genpd_add_device(genpd, dev, NULL);
2193

2194 2195 2196 2197
out:
	mutex_unlock(&gpd_list_lock);

	return ret;
2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213
}
EXPORT_SYMBOL_GPL(of_genpd_add_device);

/**
 * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
 * @parent_spec: OF phandle args to use for parent PM domain look-up
 * @subdomain_spec: OF phandle args to use for subdomain look-up
 *
 * Looks-up a parent PM domain and subdomain based upon phandle args
 * provided and adds the subdomain to the parent PM domain. Returns a
 * negative error code on failure.
 */
int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
			   struct of_phandle_args *subdomain_spec)
{
	struct generic_pm_domain *parent, *subdomain;
2214 2215 2216
	int ret;

	mutex_lock(&gpd_list_lock);
2217

2218
	parent = genpd_get_from_provider(parent_spec);
2219 2220 2221 2222
	if (IS_ERR(parent)) {
		ret = PTR_ERR(parent);
		goto out;
	}
2223

2224
	subdomain = genpd_get_from_provider(subdomain_spec);
2225 2226 2227 2228 2229 2230
	if (IS_ERR(subdomain)) {
		ret = PTR_ERR(subdomain);
		goto out;
	}

	ret = genpd_add_subdomain(parent, subdomain);
2231

2232 2233 2234 2235
out:
	mutex_unlock(&gpd_list_lock);

	return ret;
2236 2237 2238
}
EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);

2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253
/**
 * of_genpd_remove_last - Remove the last PM domain registered for a provider
 * @provider: Pointer to device structure associated with provider
 *
 * Find the last PM domain that was added by a particular provider and
 * remove this PM domain from the list of PM domains. The provider is
 * identified by the 'provider' device structure that is passed. The PM
 * domain will only be removed, if the provider associated with domain
 * has been removed.
 *
 * Returns a valid pointer to struct generic_pm_domain on success or
 * ERR_PTR() on failure.
 */
struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
{
2254
	struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT);
2255 2256 2257 2258 2259 2260
	int ret;

	if (IS_ERR_OR_NULL(np))
		return ERR_PTR(-EINVAL);

	mutex_lock(&gpd_list_lock);
2261
	list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) {
2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273
		if (gpd->provider == &np->fwnode) {
			ret = genpd_remove(gpd);
			genpd = ret ? ERR_PTR(ret) : gpd;
			break;
		}
	}
	mutex_unlock(&gpd_list_lock);

	return genpd;
}
EXPORT_SYMBOL_GPL(of_genpd_remove_last);

2274 2275 2276 2277 2278 2279 2280 2281 2282
static void genpd_release_dev(struct device *dev)
{
	kfree(dev);
}

static struct bus_type genpd_bus_type = {
	.name		= "genpd",
};

2283 2284
/**
 * genpd_dev_pm_detach - Detach a device from its PM domain.
2285
 * @dev: Device to detach.
2286 2287 2288 2289 2290 2291 2292
 * @power_off: Currently not used
 *
 * Try to locate a corresponding generic PM domain, which the device was
 * attached to previously. If such is found, the device is detached from it.
 */
static void genpd_dev_pm_detach(struct device *dev, bool power_off)
{
2293
	struct generic_pm_domain *pd;
2294
	unsigned int i;
2295 2296
	int ret = 0;

2297 2298
	pd = dev_to_genpd(dev);
	if (IS_ERR(pd))
2299 2300 2301 2302
		return;

	dev_dbg(dev, "removing from PM domain %s\n", pd->name);

2303
	for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
2304
		ret = genpd_remove_device(pd, dev);
2305 2306
		if (ret != -EAGAIN)
			break;
2307 2308

		mdelay(i);
2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319
		cond_resched();
	}

	if (ret < 0) {
		dev_err(dev, "failed to remove from PM domain %s: %d",
			pd->name, ret);
		return;
	}

	/* Check if PM domain can be powered off after removing this device. */
	genpd_queue_power_off_work(pd);
2320 2321 2322 2323

	/* Unregister the device if it was created by genpd. */
	if (dev->bus == &genpd_bus_type)
		device_unregister(dev);
2324 2325
}

2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336
static void genpd_dev_pm_sync(struct device *dev)
{
	struct generic_pm_domain *pd;

	pd = dev_to_genpd(dev);
	if (IS_ERR(pd))
		return;

	genpd_queue_power_off_work(pd);
}

2337
static int __genpd_dev_pm_attach(struct device *dev, struct device_node *np,
2338
				 unsigned int index, bool power_on)
2339 2340 2341 2342 2343
{
	struct of_phandle_args pd_args;
	struct generic_pm_domain *pd;
	int ret;

2344 2345
	ret = of_parse_phandle_with_args(np, "power-domains",
				"#power-domain-cells", index, &pd_args);
2346
	if (ret < 0)
2347
		return ret;
2348

2349
	mutex_lock(&gpd_list_lock);
2350
	pd = genpd_get_from_provider(&pd_args);
2351
	of_node_put(pd_args.np);
2352
	if (IS_ERR(pd)) {
2353
		mutex_unlock(&gpd_list_lock);
2354 2355
		dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
			__func__, PTR_ERR(pd));
2356
		return driver_deferred_probe_check_state(dev);
2357 2358 2359 2360
	}

	dev_dbg(dev, "adding to PM domain %s\n", pd->name);

2361
	ret = genpd_add_device(pd, dev, NULL);
2362
	mutex_unlock(&gpd_list_lock);
2363 2364

	if (ret < 0) {
2365 2366 2367
		if (ret != -EPROBE_DEFER)
			dev_err(dev, "failed to add to PM domain %s: %d",
				pd->name, ret);
2368
		return ret;
2369 2370 2371
	}

	dev->pm_domain->detach = genpd_dev_pm_detach;
2372
	dev->pm_domain->sync = genpd_dev_pm_sync;
2373

2374 2375 2376 2377 2378
	if (power_on) {
		genpd_lock(pd);
		ret = genpd_power_on(pd, 0);
		genpd_unlock(pd);
	}
2379 2380 2381

	if (ret)
		genpd_remove_device(pd, dev);
2382 2383

	return ret ? -EPROBE_DEFER : 1;
2384
}
2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411

/**
 * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
 * @dev: Device to attach.
 *
 * Parse device's OF node to find a PM domain specifier. If such is found,
 * attaches the device to retrieved pm_domain ops.
 *
 * Returns 1 on successfully attached PM domain, 0 when the device don't need a
 * PM domain or when multiple power-domains exists for it, else a negative error
 * code. Note that if a power-domain exists for the device, but it cannot be
 * found or turned on, then return -EPROBE_DEFER to ensure that the device is
 * not probed and to re-try again later.
 */
int genpd_dev_pm_attach(struct device *dev)
{
	if (!dev->of_node)
		return 0;

	/*
	 * Devices with multiple PM domains must be attached separately, as we
	 * can only attach one PM domain per device.
	 */
	if (of_count_phandle_with_args(dev->of_node, "power-domains",
				       "#power-domain-cells") != 1)
		return 0;

2412
	return __genpd_dev_pm_attach(dev, dev->of_node, 0, true);
2413
}
2414
EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
2415

2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434
/**
 * genpd_dev_pm_attach_by_id - Associate a device with one of its PM domains.
 * @dev: The device used to lookup the PM domain.
 * @index: The index of the PM domain.
 *
 * Parse device's OF node to find a PM domain specifier at the provided @index.
 * If such is found, creates a virtual device and attaches it to the retrieved
 * pm_domain ops. To deal with detaching of the virtual device, the ->detach()
 * callback in the struct dev_pm_domain are assigned to genpd_dev_pm_detach().
 *
 * Returns the created virtual device if successfully attached PM domain, NULL
 * when the device don't need a PM domain, else an ERR_PTR() in case of
 * failures. If a power-domain exists for the device, but cannot be found or
 * turned on, then ERR_PTR(-EPROBE_DEFER) is returned to ensure that the device
 * is not probed and to re-try again later.
 */
struct device *genpd_dev_pm_attach_by_id(struct device *dev,
					 unsigned int index)
{
2435
	struct device *virt_dev;
2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448
	int num_domains;
	int ret;

	if (!dev->of_node)
		return NULL;

	/* Deal only with devices using multiple PM domains. */
	num_domains = of_count_phandle_with_args(dev->of_node, "power-domains",
						 "#power-domain-cells");
	if (num_domains < 2 || index >= num_domains)
		return NULL;

	/* Allocate and register device on the genpd bus. */
2449 2450
	virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL);
	if (!virt_dev)
2451 2452
		return ERR_PTR(-ENOMEM);

2453 2454 2455
	dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev));
	virt_dev->bus = &genpd_bus_type;
	virt_dev->release = genpd_release_dev;
2456

2457
	ret = device_register(virt_dev);
2458
	if (ret) {
2459
		kfree(virt_dev);
2460 2461 2462 2463
		return ERR_PTR(ret);
	}

	/* Try to attach the device to the PM domain at the specified index. */
2464
	ret = __genpd_dev_pm_attach(virt_dev, dev->of_node, index, false);
2465
	if (ret < 1) {
2466
		device_unregister(virt_dev);
2467 2468 2469
		return ret ? ERR_PTR(ret) : NULL;
	}

2470 2471
	pm_runtime_enable(virt_dev);
	genpd_queue_power_off_work(dev_to_genpd(virt_dev));
2472

2473
	return virt_dev;
2474 2475 2476
}
EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id);

2477 2478 2479 2480 2481 2482 2483 2484 2485
/**
 * genpd_dev_pm_attach_by_name - Associate a device with one of its PM domains.
 * @dev: The device used to lookup the PM domain.
 * @name: The name of the PM domain.
 *
 * Parse device's OF node to find a PM domain specifier using the
 * power-domain-names DT property. For further description see
 * genpd_dev_pm_attach_by_id().
 */
2486
struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name)
2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500
{
	int index;

	if (!dev->of_node)
		return NULL;

	index = of_property_match_string(dev->of_node, "power-domain-names",
					 name);
	if (index < 0)
		return NULL;

	return genpd_dev_pm_attach_by_id(dev, index);
}

2501
static const struct of_device_id idle_state_match[] = {
2502
	{ .compatible = "domain-idle-state", },
2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515
	{ }
};

static int genpd_parse_state(struct genpd_power_state *genpd_state,
				    struct device_node *state_node)
{
	int err;
	u32 residency;
	u32 entry_latency, exit_latency;

	err = of_property_read_u32(state_node, "entry-latency-us",
						&entry_latency);
	if (err) {
2516 2517
		pr_debug(" * %pOF missing entry-latency-us property\n",
						state_node);
2518 2519 2520 2521 2522 2523
		return -EINVAL;
	}

	err = of_property_read_u32(state_node, "exit-latency-us",
						&exit_latency);
	if (err) {
2524 2525
		pr_debug(" * %pOF missing exit-latency-us property\n",
						state_node);
2526 2527 2528 2529 2530 2531 2532 2533 2534
		return -EINVAL;
	}

	err = of_property_read_u32(state_node, "min-residency-us", &residency);
	if (!err)
		genpd_state->residency_ns = 1000 * residency;

	genpd_state->power_on_latency_ns = 1000 * exit_latency;
	genpd_state->power_off_latency_ns = 1000 * entry_latency;
2535
	genpd_state->fwnode = &state_node->fwnode;
2536 2537 2538 2539

	return 0;
}

2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571
static int genpd_iterate_idle_states(struct device_node *dn,
				     struct genpd_power_state *states)
{
	int ret;
	struct of_phandle_iterator it;
	struct device_node *np;
	int i = 0;

	ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
	if (ret <= 0)
		return ret;

	/* Loop over the phandles until all the requested entry is found */
	of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) {
		np = it.node;
		if (!of_match_node(idle_state_match, np))
			continue;
		if (states) {
			ret = genpd_parse_state(&states[i], np);
			if (ret) {
				pr_err("Parsing idle state node %pOF failed with err %d\n",
				       np, ret);
				of_node_put(np);
				return ret;
			}
		}
		i++;
	}

	return i;
}

2572 2573 2574 2575 2576 2577 2578 2579 2580
/**
 * of_genpd_parse_idle_states: Return array of idle states for the genpd.
 *
 * @dn: The genpd device node
 * @states: The pointer to which the state array will be saved.
 * @n: The count of elements in the array returned from this function.
 *
 * Returns the device states parsed from the OF node. The memory for the states
 * is allocated by this function and is the responsibility of the caller to
2581 2582
 * free the memory after use. If any or zero compatible domain idle states is
 * found it returns 0 and in case of errors, a negative error code is returned.
2583 2584 2585 2586 2587
 */
int of_genpd_parse_idle_states(struct device_node *dn,
			struct genpd_power_state **states, int *n)
{
	struct genpd_power_state *st;
2588
	int ret;
2589

2590
	ret = genpd_iterate_idle_states(dn, NULL);
2591 2592 2593 2594 2595 2596 2597 2598
	if (ret < 0)
		return ret;

	if (!ret) {
		*states = NULL;
		*n = 0;
		return 0;
	}
2599

2600
	st = kcalloc(ret, sizeof(*st), GFP_KERNEL);
2601 2602 2603
	if (!st)
		return -ENOMEM;

2604 2605 2606 2607
	ret = genpd_iterate_idle_states(dn, st);
	if (ret <= 0) {
		kfree(st);
		return ret < 0 ? ret : -EINVAL;
2608 2609
	}

2610 2611
	*states = st;
	*n = ret;
2612 2613 2614 2615 2616

	return 0;
}
EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);

2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648
/**
 * pm_genpd_opp_to_performance_state - Gets performance state of the genpd from its OPP node.
 *
 * @genpd_dev: Genpd's device for which the performance-state needs to be found.
 * @opp: struct dev_pm_opp of the OPP for which we need to find performance
 *	state.
 *
 * Returns performance state encoded in the OPP of the genpd. This calls
 * platform specific genpd->opp_to_performance_state() callback to translate
 * power domain OPP to performance state.
 *
 * Returns performance state on success and 0 on failure.
 */
unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev,
					       struct dev_pm_opp *opp)
{
	struct generic_pm_domain *genpd = NULL;
	int state;

	genpd = container_of(genpd_dev, struct generic_pm_domain, dev);

	if (unlikely(!genpd->opp_to_performance_state))
		return 0;

	genpd_lock(genpd);
	state = genpd->opp_to_performance_state(genpd, opp);
	genpd_unlock(genpd);

	return state;
}
EXPORT_SYMBOL_GPL(pm_genpd_opp_to_performance_state);

2649 2650 2651 2652 2653 2654
static int __init genpd_bus_init(void)
{
	return bus_register(&genpd_bus_type);
}
core_initcall(genpd_bus_init);

2655
#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
2656 2657 2658 2659


/***        debugfs support        ***/

2660
#ifdef CONFIG_DEBUG_FS
2661 2662 2663 2664 2665 2666
#include <linux/pm.h>
#include <linux/device.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/kobject.h>
2667
static struct dentry *genpd_debugfs_dir;
2668 2669 2670

/*
 * TODO: This function is a slightly modified version of rtpm_status_show
2671
 * from sysfs.c, so generalize it.
2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694
 */
static void rtpm_status_str(struct seq_file *s, struct device *dev)
{
	static const char * const status_lookup[] = {
		[RPM_ACTIVE] = "active",
		[RPM_RESUMING] = "resuming",
		[RPM_SUSPENDED] = "suspended",
		[RPM_SUSPENDING] = "suspending"
	};
	const char *p = "";

	if (dev->power.runtime_error)
		p = "error";
	else if (dev->power.disable_depth)
		p = "unsupported";
	else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
		p = status_lookup[dev->power.runtime_status];
	else
		WARN_ON(1);

	seq_puts(s, p);
}

2695 2696
static int genpd_summary_one(struct seq_file *s,
			struct generic_pm_domain *genpd)
2697 2698 2699 2700 2701 2702 2703 2704
{
	static const char * const status_lookup[] = {
		[GPD_STATE_ACTIVE] = "on",
		[GPD_STATE_POWER_OFF] = "off"
	};
	struct pm_domain_data *pm_data;
	const char *kobj_path;
	struct gpd_link *link;
2705
	char state[16];
2706 2707
	int ret;

L
Lina Iyer 已提交
2708
	ret = genpd_lock_interruptible(genpd);
2709 2710 2711
	if (ret)
		return -ERESTARTSYS;

2712
	if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
2713
		goto exit;
2714
	if (!genpd_status_on(genpd))
2715
		snprintf(state, sizeof(state), "%s-%u",
2716
			 status_lookup[genpd->status], genpd->state_idx);
2717
	else
2718 2719 2720
		snprintf(state, sizeof(state), "%s",
			 status_lookup[genpd->status]);
	seq_printf(s, "%-30s  %-15s ", genpd->name, state);
2721 2722 2723 2724

	/*
	 * Modifications on the list require holding locks on both
	 * master and slave, so we are safe.
2725
	 * Also genpd->name is immutable.
2726
	 */
2727
	list_for_each_entry(link, &genpd->master_links, master_node) {
2728
		seq_printf(s, "%s", link->slave->name);
2729
		if (!list_is_last(&link->master_node, &genpd->master_links))
2730 2731 2732
			seq_puts(s, ", ");
	}

2733
	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
2734 2735 2736
		kobj_path = kobject_get_path(&pm_data->dev->kobj,
				genpd_is_irq_safe(genpd) ?
				GFP_ATOMIC : GFP_KERNEL);
2737 2738 2739 2740 2741 2742 2743 2744 2745 2746
		if (kobj_path == NULL)
			continue;

		seq_printf(s, "\n    %-50s  ", kobj_path);
		rtpm_status_str(s, pm_data->dev);
		kfree(kobj_path);
	}

	seq_puts(s, "\n");
exit:
L
Lina Iyer 已提交
2747
	genpd_unlock(genpd);
2748 2749 2750 2751

	return 0;
}

2752
static int summary_show(struct seq_file *s, void *data)
2753
{
2754
	struct generic_pm_domain *genpd;
2755 2756
	int ret = 0;

2757 2758
	seq_puts(s, "domain                          status          slaves\n");
	seq_puts(s, "    /device                                             runtime status\n");
2759 2760 2761 2762 2763 2764
	seq_puts(s, "----------------------------------------------------------------------\n");

	ret = mutex_lock_interruptible(&gpd_list_lock);
	if (ret)
		return -ERESTARTSYS;

2765
	list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
2766
		ret = genpd_summary_one(s, genpd);
2767 2768 2769 2770 2771 2772 2773 2774
		if (ret)
			break;
	}
	mutex_unlock(&gpd_list_lock);

	return ret;
}

2775
static int status_show(struct seq_file *s, void *data)
2776
{
2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799
	static const char * const status_lookup[] = {
		[GPD_STATE_ACTIVE] = "on",
		[GPD_STATE_POWER_OFF] = "off"
	};

	struct generic_pm_domain *genpd = s->private;
	int ret = 0;

	ret = genpd_lock_interruptible(genpd);
	if (ret)
		return -ERESTARTSYS;

	if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
		goto exit;

	if (genpd->status == GPD_STATE_POWER_OFF)
		seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
			genpd->state_idx);
	else
		seq_printf(s, "%s\n", status_lookup[genpd->status]);
exit:
	genpd_unlock(genpd);
	return ret;
2800 2801
}

2802
static int sub_domains_show(struct seq_file *s, void *data)
2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818
{
	struct generic_pm_domain *genpd = s->private;
	struct gpd_link *link;
	int ret = 0;

	ret = genpd_lock_interruptible(genpd);
	if (ret)
		return -ERESTARTSYS;

	list_for_each_entry(link, &genpd->master_links, master_node)
		seq_printf(s, "%s\n", link->slave->name);

	genpd_unlock(genpd);
	return ret;
}

2819
static int idle_states_show(struct seq_file *s, void *data)
2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847
{
	struct generic_pm_domain *genpd = s->private;
	unsigned int i;
	int ret = 0;

	ret = genpd_lock_interruptible(genpd);
	if (ret)
		return -ERESTARTSYS;

	seq_puts(s, "State          Time Spent(ms)\n");

	for (i = 0; i < genpd->state_count; i++) {
		ktime_t delta = 0;
		s64 msecs;

		if ((genpd->status == GPD_STATE_POWER_OFF) &&
				(genpd->state_idx == i))
			delta = ktime_sub(ktime_get(), genpd->accounting_time);

		msecs = ktime_to_ms(
			ktime_add(genpd->states[i].idle_time, delta));
		seq_printf(s, "S%-13i %lld\n", i, msecs);
	}

	genpd_unlock(genpd);
	return ret;
}

2848
static int active_time_show(struct seq_file *s, void *data)
2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867
{
	struct generic_pm_domain *genpd = s->private;
	ktime_t delta = 0;
	int ret = 0;

	ret = genpd_lock_interruptible(genpd);
	if (ret)
		return -ERESTARTSYS;

	if (genpd->status == GPD_STATE_ACTIVE)
		delta = ktime_sub(ktime_get(), genpd->accounting_time);

	seq_printf(s, "%lld ms\n", ktime_to_ms(
				ktime_add(genpd->on_time, delta)));

	genpd_unlock(genpd);
	return ret;
}

2868
static int total_idle_time_show(struct seq_file *s, void *data)
2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895
{
	struct generic_pm_domain *genpd = s->private;
	ktime_t delta = 0, total = 0;
	unsigned int i;
	int ret = 0;

	ret = genpd_lock_interruptible(genpd);
	if (ret)
		return -ERESTARTSYS;

	for (i = 0; i < genpd->state_count; i++) {

		if ((genpd->status == GPD_STATE_POWER_OFF) &&
				(genpd->state_idx == i))
			delta = ktime_sub(ktime_get(), genpd->accounting_time);

		total = ktime_add(total, genpd->states[i].idle_time);
	}
	total = ktime_add(total, delta);

	seq_printf(s, "%lld ms\n", ktime_to_ms(total));

	genpd_unlock(genpd);
	return ret;
}


2896
static int devices_show(struct seq_file *s, void *data)
2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921
{
	struct generic_pm_domain *genpd = s->private;
	struct pm_domain_data *pm_data;
	const char *kobj_path;
	int ret = 0;

	ret = genpd_lock_interruptible(genpd);
	if (ret)
		return -ERESTARTSYS;

	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
		kobj_path = kobject_get_path(&pm_data->dev->kobj,
				genpd_is_irq_safe(genpd) ?
				GFP_ATOMIC : GFP_KERNEL);
		if (kobj_path == NULL)
			continue;

		seq_printf(s, "%s\n", kobj_path);
		kfree(kobj_path);
	}

	genpd_unlock(genpd);
	return ret;
}

2922
static int perf_state_show(struct seq_file *s, void *data)
2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934
{
	struct generic_pm_domain *genpd = s->private;

	if (genpd_lock_interruptible(genpd))
		return -ERESTARTSYS;

	seq_printf(s, "%u\n", genpd->performance_state);

	genpd_unlock(genpd);
	return 0;
}

2935 2936 2937 2938 2939 2940 2941 2942
DEFINE_SHOW_ATTRIBUTE(summary);
DEFINE_SHOW_ATTRIBUTE(status);
DEFINE_SHOW_ATTRIBUTE(sub_domains);
DEFINE_SHOW_ATTRIBUTE(idle_states);
DEFINE_SHOW_ATTRIBUTE(active_time);
DEFINE_SHOW_ATTRIBUTE(total_idle_time);
DEFINE_SHOW_ATTRIBUTE(devices);
DEFINE_SHOW_ATTRIBUTE(perf_state);
2943

2944
static int __init genpd_debug_init(void)
2945 2946
{
	struct dentry *d;
2947
	struct generic_pm_domain *genpd;
2948

2949
	genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
2950

2951 2952
	debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir,
			    NULL, &summary_fops);
2953

2954
	list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
2955
		d = debugfs_create_dir(genpd->name, genpd_debugfs_dir);
2956 2957

		debugfs_create_file("current_state", 0444,
2958
				d, genpd, &status_fops);
2959
		debugfs_create_file("sub_domains", 0444,
2960
				d, genpd, &sub_domains_fops);
2961
		debugfs_create_file("idle_states", 0444,
2962
				d, genpd, &idle_states_fops);
2963
		debugfs_create_file("active_time", 0444,
2964
				d, genpd, &active_time_fops);
2965
		debugfs_create_file("total_idle_time", 0444,
2966
				d, genpd, &total_idle_time_fops);
2967
		debugfs_create_file("devices", 0444,
2968
				d, genpd, &devices_fops);
2969 2970
		if (genpd->set_performance_state)
			debugfs_create_file("perf_state", 0444,
2971
					    d, genpd, &perf_state_fops);
2972 2973
	}

2974 2975
	return 0;
}
2976
late_initcall(genpd_debug_init);
2977

2978
static void __exit genpd_debug_exit(void)
2979
{
2980
	debugfs_remove_recursive(genpd_debugfs_dir);
2981
}
2982
__exitcall(genpd_debug_exit);
2983
#endif /* CONFIG_DEBUG_FS */