domain.c 48.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12
/*
 * drivers/base/power/domain.c - Common code related to device power domains.
 *
 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
 *
 * This file is released under the GPLv2.
 */

#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/pm_runtime.h>
#include <linux/pm_domain.h>
13
#include <linux/pm_qos.h>
14 15
#include <linux/slab.h>
#include <linux/err.h>
16 17
#include <linux/sched.h>
#include <linux/suspend.h>
18 19 20 21 22 23 24 25 26 27 28 29 30
#include <linux/export.h>

#define GENPD_DEV_CALLBACK(genpd, type, callback, dev)		\
({								\
	type (*__routine)(struct device *__d); 			\
	type __ret = (type)0;					\
								\
	__routine = genpd->dev_ops.callback; 			\
	if (__routine) {					\
		__ret = __routine(dev); 			\
	}							\
	__ret;							\
})
31

32 33 34 35 36
#define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name)	\
({										\
	ktime_t __start = ktime_get();						\
	type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev);		\
	s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start));		\
37 38 39
	struct gpd_timing_data *__td = &dev_gpd_data(dev)->td;			\
	if (!__retval && __elapsed > __td->field) {				\
		__td->field = __elapsed;					\
40
		dev_dbg(dev, name " latency exceeded, new value %lld ns\n",	\
41
			__elapsed);						\
42 43
		genpd->max_off_time_changed = true;				\
		__td->constraint_changed = true;				\
44 45 46 47
	}									\
	__retval;								\
})

48 49 50
static LIST_HEAD(gpd_list);
static DEFINE_MUTEX(gpd_list_lock);

51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68
static struct generic_pm_domain *pm_genpd_lookup_name(const char *domain_name)
{
	struct generic_pm_domain *genpd = NULL, *gpd;

	if (IS_ERR_OR_NULL(domain_name))
		return NULL;

	mutex_lock(&gpd_list_lock);
	list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
		if (!strcmp(gpd->name, domain_name)) {
			genpd = gpd;
			break;
		}
	}
	mutex_unlock(&gpd_list_lock);
	return genpd;
}

69
struct generic_pm_domain *dev_to_genpd(struct device *dev)
70 71 72 73
{
	if (IS_ERR_OR_NULL(dev->pm_domain))
		return ERR_PTR(-EINVAL);

74
	return pd_to_genpd(dev->pm_domain);
75
}
76

77 78
static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
{
79 80
	return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev,
					stop_latency_ns, "stop");
81 82 83 84
}

static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
{
85 86
	return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev,
					start_latency_ns, "start");
87 88
}

89
static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
90
{
91 92 93 94 95 96 97 98 99 100 101
	bool ret = false;

	if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
		ret = !!atomic_dec_and_test(&genpd->sd_count);

	return ret;
}

static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
{
	atomic_inc(&genpd->sd_count);
102
	smp_mb__after_atomic();
103 104
}

105 106 107 108 109 110 111 112 113 114 115 116
static void genpd_acquire_lock(struct generic_pm_domain *genpd)
{
	DEFINE_WAIT(wait);

	mutex_lock(&genpd->lock);
	/*
	 * Wait for the domain to transition into either the active,
	 * or the power off state.
	 */
	for (;;) {
		prepare_to_wait(&genpd->status_wait_queue, &wait,
				TASK_UNINTERRUPTIBLE);
117 118
		if (genpd->status == GPD_STATE_ACTIVE
		    || genpd->status == GPD_STATE_POWER_OFF)
119 120 121 122 123 124 125 126 127 128 129 130 131 132 133
			break;
		mutex_unlock(&genpd->lock);

		schedule();

		mutex_lock(&genpd->lock);
	}
	finish_wait(&genpd->status_wait_queue, &wait);
}

static void genpd_release_lock(struct generic_pm_domain *genpd)
{
	mutex_unlock(&genpd->lock);
}

134 135 136 137 138 139
static void genpd_set_active(struct generic_pm_domain *genpd)
{
	if (genpd->resume_count == 0)
		genpd->status = GPD_STATE_ACTIVE;
}

140 141 142 143 144 145 146 147 148 149 150 151 152
static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd)
{
	s64 usecs64;

	if (!genpd->cpu_data)
		return;

	usecs64 = genpd->power_on_latency_ns;
	do_div(usecs64, NSEC_PER_USEC);
	usecs64 += genpd->cpu_data->saved_exit_latency;
	genpd->cpu_data->idle_state->exit_latency = usecs64;
}

153
/**
154
 * __pm_genpd_poweron - Restore power to a given PM domain and its masters.
155 156
 * @genpd: PM domain to power up.
 *
157
 * Restore power to @genpd and all of its masters so that it is possible to
158 159
 * resume a device belonging to it.
 */
160
static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
161
	__releases(&genpd->lock) __acquires(&genpd->lock)
162
{
163
	struct gpd_link *link;
164
	DEFINE_WAIT(wait);
165 166
	int ret = 0;

167
	/* If the domain's master is being waited for, we have to wait too. */
168 169 170
	for (;;) {
		prepare_to_wait(&genpd->status_wait_queue, &wait,
				TASK_UNINTERRUPTIBLE);
171
		if (genpd->status != GPD_STATE_WAIT_MASTER)
172 173
			break;
		mutex_unlock(&genpd->lock);
174

175 176 177 178 179
		schedule();

		mutex_lock(&genpd->lock);
	}
	finish_wait(&genpd->status_wait_queue, &wait);
180

181
	if (genpd->status == GPD_STATE_ACTIVE
182
	    || (genpd->prepared_count > 0 && genpd->suspend_power_off))
183
		return 0;
184

185 186
	if (genpd->status != GPD_STATE_POWER_OFF) {
		genpd_set_active(genpd);
187
		return 0;
188 189
	}

190 191 192 193 194 195 196
	if (genpd->cpu_data) {
		cpuidle_pause_and_lock();
		genpd->cpu_data->idle_state->disabled = true;
		cpuidle_resume_and_unlock();
		goto out;
	}

197 198 199 200 201 202 203
	/*
	 * The list is guaranteed not to change while the loop below is being
	 * executed, unless one of the masters' .power_on() callbacks fiddles
	 * with it.
	 */
	list_for_each_entry(link, &genpd->slave_links, slave_node) {
		genpd_sd_counter_inc(link->master);
204
		genpd->status = GPD_STATE_WAIT_MASTER;
205

206 207
		mutex_unlock(&genpd->lock);

208
		ret = pm_genpd_poweron(link->master);
209 210 211

		mutex_lock(&genpd->lock);

212 213
		/*
		 * The "wait for parent" status is guaranteed not to change
214
		 * while the master is powering on.
215 216 217
		 */
		genpd->status = GPD_STATE_POWER_OFF;
		wake_up_all(&genpd->status_wait_queue);
218 219
		if (ret) {
			genpd_sd_counter_dec(link->master);
220
			goto err;
221
		}
222 223
	}

224
	if (genpd->power_on) {
225 226 227
		ktime_t time_start = ktime_get();
		s64 elapsed_ns;

228
		ret = genpd->power_on(genpd);
229 230
		if (ret)
			goto err;
231 232

		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
233
		if (elapsed_ns > genpd->power_on_latency_ns) {
234
			genpd->power_on_latency_ns = elapsed_ns;
235
			genpd->max_off_time_changed = true;
236
			genpd_recalc_cpu_exit_latency(genpd);
237 238 239 240 241
			if (genpd->name)
				pr_warning("%s: Power-on latency exceeded, "
					"new value %lld ns\n", genpd->name,
					elapsed_ns);
		}
242
	}
243

244
 out:
245 246
	genpd_set_active(genpd);

247
	return 0;
248 249

 err:
250 251
	list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node)
		genpd_sd_counter_dec(link->master);
252

253 254 255 256
	return ret;
}

/**
257
 * pm_genpd_poweron - Restore power to a given PM domain and its masters.
258 259 260 261 262 263 264 265 266 267
 * @genpd: PM domain to power up.
 */
int pm_genpd_poweron(struct generic_pm_domain *genpd)
{
	int ret;

	mutex_lock(&genpd->lock);
	ret = __pm_genpd_poweron(genpd);
	mutex_unlock(&genpd->lock);
	return ret;
268 269
}

270 271 272 273 274 275 276 277 278 279 280 281
/**
 * pm_genpd_name_poweron - Restore power to a given PM domain and its masters.
 * @domain_name: Name of the PM domain to power up.
 */
int pm_genpd_name_poweron(const char *domain_name)
{
	struct generic_pm_domain *genpd;

	genpd = pm_genpd_lookup_name(domain_name);
	return genpd ? pm_genpd_poweron(genpd) : -EINVAL;
}

282 283
#ifdef CONFIG_PM_RUNTIME

284 285 286 287 288 289
static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd,
				     struct device *dev)
{
	return GENPD_DEV_CALLBACK(genpd, int, start, dev);
}

290 291 292 293 294 295 296 297 298 299 300 301 302
static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
{
	return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
					save_state_latency_ns, "state save");
}

static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev)
{
	return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,
					restore_state_latency_ns,
					"state restore");
}

303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326
static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
				     unsigned long val, void *ptr)
{
	struct generic_pm_domain_data *gpd_data;
	struct device *dev;

	gpd_data = container_of(nb, struct generic_pm_domain_data, nb);

	mutex_lock(&gpd_data->lock);
	dev = gpd_data->base.dev;
	if (!dev) {
		mutex_unlock(&gpd_data->lock);
		return NOTIFY_DONE;
	}
	mutex_unlock(&gpd_data->lock);

	for (;;) {
		struct generic_pm_domain *genpd;
		struct pm_domain_data *pdd;

		spin_lock_irq(&dev->power.lock);

		pdd = dev->power.subsys_data ?
				dev->power.subsys_data->domain_data : NULL;
327
		if (pdd && pdd->dev) {
328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349
			to_gpd_data(pdd)->td.constraint_changed = true;
			genpd = dev_to_genpd(dev);
		} else {
			genpd = ERR_PTR(-ENODATA);
		}

		spin_unlock_irq(&dev->power.lock);

		if (!IS_ERR(genpd)) {
			mutex_lock(&genpd->lock);
			genpd->max_off_time_changed = true;
			mutex_unlock(&genpd->lock);
		}

		dev = dev->parent;
		if (!dev || dev->power.ignore_children)
			break;
	}

	return NOTIFY_DONE;
}

350 351
/**
 * __pm_genpd_save_device - Save the pre-suspend state of a device.
352
 * @pdd: Domain data of the device to save the state of.
353 354
 * @genpd: PM domain the device belongs to.
 */
355
static int __pm_genpd_save_device(struct pm_domain_data *pdd,
356
				  struct generic_pm_domain *genpd)
357
	__releases(&genpd->lock) __acquires(&genpd->lock)
358
{
359
	struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
360
	struct device *dev = pdd->dev;
361 362
	int ret = 0;

363
	if (gpd_data->need_restore)
364 365
		return 0;

366 367
	mutex_unlock(&genpd->lock);

368 369 370
	genpd_start_dev(genpd, dev);
	ret = genpd_save_dev(genpd, dev);
	genpd_stop_dev(genpd, dev);
371

372 373
	mutex_lock(&genpd->lock);

374
	if (!ret)
375
		gpd_data->need_restore = true;
376 377 378 379 380 381

	return ret;
}

/**
 * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
382
 * @pdd: Domain data of the device to restore the state of.
383 384
 * @genpd: PM domain the device belongs to.
 */
385
static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
386
				      struct generic_pm_domain *genpd)
387
	__releases(&genpd->lock) __acquires(&genpd->lock)
388
{
389
	struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
390
	struct device *dev = pdd->dev;
391
	bool need_restore = gpd_data->need_restore;
392

393
	gpd_data->need_restore = false;
394 395
	mutex_unlock(&genpd->lock);

396
	genpd_start_dev(genpd, dev);
397 398
	if (need_restore)
		genpd_restore_dev(genpd, dev);
399

400
	mutex_lock(&genpd->lock);
401 402
}

403 404 405 406 407 408 409 410 411 412 413
/**
 * genpd_abort_poweroff - Check if a PM domain power off should be aborted.
 * @genpd: PM domain to check.
 *
 * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during
 * a "power off" operation, which means that a "power on" has occured in the
 * meantime, or if its resume_count field is different from zero, which means
 * that one of its devices has been resumed in the meantime.
 */
static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
{
414
	return genpd->status == GPD_STATE_WAIT_MASTER
415
		|| genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
416 417
}

418 419 420 421 422 423 424
/**
 * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
 * @genpd: PM domait to power off.
 *
 * Queue up the execution of pm_genpd_poweroff() unless it's already been done
 * before.
 */
425
void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
426
{
427
	queue_work(pm_wq, &genpd->power_off_work);
428 429
}

430 431 432 433 434 435 436 437 438
/**
 * pm_genpd_poweroff - Remove power from a given PM domain.
 * @genpd: PM domain to power down.
 *
 * If all of the @genpd's devices have been suspended and all of its subdomains
 * have been powered down, run the runtime suspend callbacks provided by all of
 * the @genpd's devices' drivers and remove power from @genpd.
 */
static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
439
	__releases(&genpd->lock) __acquires(&genpd->lock)
440
{
441
	struct pm_domain_data *pdd;
442
	struct gpd_link *link;
443
	unsigned int not_suspended;
444
	int ret = 0;
445

446 447 448 449
 start:
	/*
	 * Do not try to power off the domain in the following situations:
	 * (1) The domain is already in the "power off" state.
450
	 * (2) The domain is waiting for its master to power up.
451
	 * (3) One of the domain's devices is being resumed right now.
452
	 * (4) System suspend is in progress.
453
	 */
454
	if (genpd->status == GPD_STATE_POWER_OFF
455
	    || genpd->status == GPD_STATE_WAIT_MASTER
456
	    || genpd->resume_count > 0 || genpd->prepared_count > 0)
457 458
		return 0;

459
	if (atomic_read(&genpd->sd_count) > 0)
460 461 462
		return -EBUSY;

	not_suspended = 0;
463 464 465 466 467 468 469 470 471
	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
		enum pm_qos_flags_status stat;

		stat = dev_pm_qos_flags(pdd->dev,
					PM_QOS_FLAG_NO_POWER_OFF
						| PM_QOS_FLAG_REMOTE_WAKEUP);
		if (stat > PM_QOS_FLAGS_NONE)
			return -EBUSY;

472
		if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
473
		    || pdd->dev->power.irq_safe))
474
			not_suspended++;
475
	}
476 477 478 479

	if (not_suspended > genpd->in_progress)
		return -EBUSY;

480 481 482 483 484 485 486 487 488
	if (genpd->poweroff_task) {
		/*
		 * Another instance of pm_genpd_poweroff() is executing
		 * callbacks, so tell it to start over and return.
		 */
		genpd->status = GPD_STATE_REPEAT;
		return 0;
	}

489 490 491 492 493
	if (genpd->gov && genpd->gov->power_down_ok) {
		if (!genpd->gov->power_down_ok(&genpd->domain))
			return -EAGAIN;
	}

494
	genpd->status = GPD_STATE_BUSY;
495
	genpd->poweroff_task = current;
496

497
	list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
498
		ret = atomic_read(&genpd->sd_count) == 0 ?
499
			__pm_genpd_save_device(pdd, genpd) : -EBUSY;
500 501 502 503

		if (genpd_abort_poweroff(genpd))
			goto out;

504 505 506 507
		if (ret) {
			genpd_set_active(genpd);
			goto out;
		}
508

509 510 511 512 513
		if (genpd->status == GPD_STATE_REPEAT) {
			genpd->poweroff_task = NULL;
			goto start;
		}
	}
514

515 516 517 518 519 520 521 522 523 524 525 526 527 528 529
	if (genpd->cpu_data) {
		/*
		 * If cpu_data is set, cpuidle should turn the domain off when
		 * the CPU in it is idle.  In that case we don't decrement the
		 * subdomain counts of the master domains, so that power is not
		 * removed from the current domain prematurely as a result of
		 * cutting off the masters' power.
		 */
		genpd->status = GPD_STATE_POWER_OFF;
		cpuidle_pause_and_lock();
		genpd->cpu_data->idle_state->disabled = false;
		cpuidle_resume_and_unlock();
		goto out;
	}

530
	if (genpd->power_off) {
531 532 533
		ktime_t time_start;
		s64 elapsed_ns;

534 535
		if (atomic_read(&genpd->sd_count) > 0) {
			ret = -EBUSY;
536 537
			goto out;
		}
538

539 540
		time_start = ktime_get();

541
		/*
542 543
		 * If sd_count > 0 at this point, one of the subdomains hasn't
		 * managed to call pm_genpd_poweron() for the master yet after
544 545 546 547 548
		 * incrementing it.  In that case pm_genpd_poweron() will wait
		 * for us to drop the lock, so we can call .power_off() and let
		 * the pm_genpd_poweron() restore power for us (this shouldn't
		 * happen very often).
		 */
549 550 551 552 553
		ret = genpd->power_off(genpd);
		if (ret == -EBUSY) {
			genpd_set_active(genpd);
			goto out;
		}
554 555

		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
556
		if (elapsed_ns > genpd->power_off_latency_ns) {
557
			genpd->power_off_latency_ns = elapsed_ns;
558
			genpd->max_off_time_changed = true;
559 560 561 562 563
			if (genpd->name)
				pr_warning("%s: Power-off latency exceeded, "
					"new value %lld ns\n", genpd->name,
					elapsed_ns);
		}
564
	}
565

566
	genpd->status = GPD_STATE_POWER_OFF;
567

568 569 570 571
	list_for_each_entry(link, &genpd->slave_links, slave_node) {
		genpd_sd_counter_dec(link->master);
		genpd_queue_power_off_work(link->master);
	}
572

573 574 575 576
 out:
	genpd->poweroff_task = NULL;
	wake_up_all(&genpd->status_wait_queue);
	return ret;
577 578 579 580 581 582 583 584 585 586 587 588
}

/**
 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
 * @work: Work structure used for scheduling the execution of this function.
 */
static void genpd_power_off_work_fn(struct work_struct *work)
{
	struct generic_pm_domain *genpd;

	genpd = container_of(work, struct generic_pm_domain, power_off_work);

589
	genpd_acquire_lock(genpd);
590
	pm_genpd_poweroff(genpd);
591
	genpd_release_lock(genpd);
592 593 594 595 596 597 598 599 600 601 602 603 604
}

/**
 * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
 * @dev: Device to suspend.
 *
 * Carry out a runtime suspend of a device under the assumption that its
 * pm_domain field points to the domain member of an object of type
 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
 */
static int pm_genpd_runtime_suspend(struct device *dev)
{
	struct generic_pm_domain *genpd;
605
	bool (*stop_ok)(struct device *__dev);
606
	int ret;
607 608 609

	dev_dbg(dev, "%s()\n", __func__);

610 611
	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
612 613
		return -EINVAL;

614 615 616 617
	stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
	if (stop_ok && !stop_ok(dev))
		return -EBUSY;

618 619 620
	ret = genpd_stop_dev(genpd, dev);
	if (ret)
		return ret;
621

622 623 624 625 626 627 628
	/*
	 * If power.irq_safe is set, this routine will be run with interrupts
	 * off, so it can't use mutexes.
	 */
	if (dev->power.irq_safe)
		return 0;

629
	mutex_lock(&genpd->lock);
630 631 632
	genpd->in_progress++;
	pm_genpd_poweroff(genpd);
	genpd->in_progress--;
633
	mutex_unlock(&genpd->lock);
634 635 636 637 638 639 640 641 642 643 644 645 646 647 648

	return 0;
}

/**
 * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
 * @dev: Device to resume.
 *
 * Carry out a runtime resume of a device under the assumption that its
 * pm_domain field points to the domain member of an object of type
 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
 */
static int pm_genpd_runtime_resume(struct device *dev)
{
	struct generic_pm_domain *genpd;
649
	DEFINE_WAIT(wait);
650 651 652 653
	int ret;

	dev_dbg(dev, "%s()\n", __func__);

654 655
	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
656 657
		return -EINVAL;

658 659
	/* If power.irq_safe, the PM domain is never powered off. */
	if (dev->power.irq_safe)
660
		return genpd_start_dev_no_timing(genpd, dev);
661

662
	mutex_lock(&genpd->lock);
663 664 665 666 667
	ret = __pm_genpd_poweron(genpd);
	if (ret) {
		mutex_unlock(&genpd->lock);
		return ret;
	}
668
	genpd->status = GPD_STATE_BUSY;
669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686
	genpd->resume_count++;
	for (;;) {
		prepare_to_wait(&genpd->status_wait_queue, &wait,
				TASK_UNINTERRUPTIBLE);
		/*
		 * If current is the powering off task, we have been called
		 * reentrantly from one of the device callbacks, so we should
		 * not wait.
		 */
		if (!genpd->poweroff_task || genpd->poweroff_task == current)
			break;
		mutex_unlock(&genpd->lock);

		schedule();

		mutex_lock(&genpd->lock);
	}
	finish_wait(&genpd->status_wait_queue, &wait);
687
	__pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd);
688 689
	genpd->resume_count--;
	genpd_set_active(genpd);
690
	wake_up_all(&genpd->status_wait_queue);
691
	mutex_unlock(&genpd->lock);
692

693 694 695
	return 0;
}

696 697 698 699 700 701 702 703
static bool pd_ignore_unused;
static int __init pd_ignore_unused_setup(char *__unused)
{
	pd_ignore_unused = true;
	return 1;
}
__setup("pd_ignore_unused", pd_ignore_unused_setup);

704 705 706 707 708 709 710
/**
 * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
 */
void pm_genpd_poweroff_unused(void)
{
	struct generic_pm_domain *genpd;

711 712 713 714 715
	if (pd_ignore_unused) {
		pr_warn("genpd: Not disabling unused power domains\n");
		return;
	}

716 717 718 719 720 721 722 723
	mutex_lock(&gpd_list_lock);

	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
		genpd_queue_power_off_work(genpd);

	mutex_unlock(&gpd_list_lock);
}

724 725
#else

726 727 728 729 730 731
static inline int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
					    unsigned long val, void *ptr)
{
	return NOTIFY_DONE;
}

732 733 734 735 736 737 738
static inline void genpd_power_off_work_fn(struct work_struct *work) {}

#define pm_genpd_runtime_suspend	NULL
#define pm_genpd_runtime_resume		NULL

#endif /* CONFIG_PM_RUNTIME */

739 740
#ifdef CONFIG_PM_SLEEP

741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758
/**
 * pm_genpd_present - Check if the given PM domain has been initialized.
 * @genpd: PM domain to check.
 */
static bool pm_genpd_present(struct generic_pm_domain *genpd)
{
	struct generic_pm_domain *gpd;

	if (IS_ERR_OR_NULL(genpd))
		return false;

	list_for_each_entry(gpd, &gpd_list, gpd_list_node)
		if (gpd == genpd)
			return true;

	return false;
}

759 760 761 762 763 764
static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
				    struct device *dev)
{
	return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
}

765
/**
766
 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
767 768 769
 * @genpd: PM domain to power off, if possible.
 *
 * Check if the given PM domain can be powered off (during system suspend or
770
 * hibernation) and do that if so.  Also, in that case propagate to its masters.
771
 *
772 773 774 775
 * This function is only called in "noirq" and "syscore" stages of system power
 * transitions, so it need not acquire locks (all of the "noirq" callbacks are
 * executed sequentially, so it is guaranteed that it will never run twice in
 * parallel).
776 777 778
 */
static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
{
779
	struct gpd_link *link;
780

781
	if (genpd->status == GPD_STATE_POWER_OFF)
782 783
		return;

784 785
	if (genpd->suspended_count != genpd->device_count
	    || atomic_read(&genpd->sd_count) > 0)
786 787 788 789 790
		return;

	if (genpd->power_off)
		genpd->power_off(genpd);

791
	genpd->status = GPD_STATE_POWER_OFF;
792 793 794 795

	list_for_each_entry(link, &genpd->slave_links, slave_node) {
		genpd_sd_counter_dec(link->master);
		pm_genpd_sync_poweroff(link->master);
796 797 798
	}
}

799 800 801 802
/**
 * pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters.
 * @genpd: PM domain to power on.
 *
803 804 805 806
 * This function is only called in "noirq" and "syscore" stages of system power
 * transitions, so it need not acquire locks (all of the "noirq" callbacks are
 * executed sequentially, so it is guaranteed that it will never run twice in
 * parallel).
807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825
 */
static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd)
{
	struct gpd_link *link;

	if (genpd->status != GPD_STATE_POWER_OFF)
		return;

	list_for_each_entry(link, &genpd->slave_links, slave_node) {
		pm_genpd_sync_poweron(link->master);
		genpd_sd_counter_inc(link->master);
	}

	if (genpd->power_on)
		genpd->power_on(genpd);

	genpd->status = GPD_STATE_ACTIVE;
}

826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848
/**
 * resume_needed - Check whether to resume a device before system suspend.
 * @dev: Device to check.
 * @genpd: PM domain the device belongs to.
 *
 * There are two cases in which a device that can wake up the system from sleep
 * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
 * to wake up the system and it has to remain active for this purpose while the
 * system is in the sleep state and (2) if the device is not enabled to wake up
 * the system from sleep states and it generally doesn't generate wakeup signals
 * by itself (those signals are generated on its behalf by other parts of the
 * system).  In the latter case it may be necessary to reconfigure the device's
 * wakeup settings during system suspend, because it may have been set up to
 * signal remote wakeup from the system's working state as needed by runtime PM.
 * Return 'true' in either of the above cases.
 */
static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
{
	bool active_wakeup;

	if (!device_can_wakeup(dev))
		return false;

849
	active_wakeup = genpd_dev_active_wakeup(genpd, dev);
850 851 852
	return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
}

853 854 855 856 857 858 859 860 861 862 863 864
/**
 * pm_genpd_prepare - Start power transition of a device in a PM domain.
 * @dev: Device to start the transition of.
 *
 * Start a power transition of a device (during a system-wide power transition)
 * under the assumption that its pm_domain field points to the domain member of
 * an object of type struct generic_pm_domain representing a PM domain
 * consisting of I/O devices.
 */
static int pm_genpd_prepare(struct device *dev)
{
	struct generic_pm_domain *genpd;
865
	int ret;
866 867 868 869 870 871 872

	dev_dbg(dev, "%s()\n", __func__);

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;

873 874 875 876 877 878 879 880 881 882
	/*
	 * If a wakeup request is pending for the device, it should be woken up
	 * at this point and a system wakeup event should be reported if it's
	 * set up to wake up the system from sleep states.
	 */
	pm_runtime_get_noresume(dev);
	if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
		pm_wakeup_event(dev, 0);

	if (pm_wakeup_pending()) {
883
		pm_runtime_put(dev);
884 885 886
		return -EBUSY;
	}

887 888 889
	if (resume_needed(dev, genpd))
		pm_runtime_resume(dev);

890
	genpd_acquire_lock(genpd);
891

892 893
	if (genpd->prepared_count++ == 0) {
		genpd->suspended_count = 0;
894
		genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
895
	}
896 897

	genpd_release_lock(genpd);
898 899

	if (genpd->suspend_power_off) {
900
		pm_runtime_put_noidle(dev);
901 902 903 904
		return 0;
	}

	/*
905 906
	 * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
	 * so pm_genpd_poweron() will return immediately, but if the device
907
	 * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need
908
	 * to make it operational.
909
	 */
910
	pm_runtime_resume(dev);
911 912
	__pm_runtime_disable(dev, false);

913 914 915 916 917 918 919 920
	ret = pm_generic_prepare(dev);
	if (ret) {
		mutex_lock(&genpd->lock);

		if (--genpd->prepared_count == 0)
			genpd->suspend_power_off = false;

		mutex_unlock(&genpd->lock);
921
		pm_runtime_enable(dev);
922
	}
923

924
	pm_runtime_put(dev);
925
	return ret;
926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945
}

/**
 * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
 * @dev: Device to suspend.
 *
 * Suspend a device under the assumption that its pm_domain field points to the
 * domain member of an object of type struct generic_pm_domain representing
 * a PM domain consisting of I/O devices.
 */
static int pm_genpd_suspend(struct device *dev)
{
	struct generic_pm_domain *genpd;

	dev_dbg(dev, "%s()\n", __func__);

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;

946
	return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev);
947 948 949
}

/**
950
 * pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain.
951 952 953 954 955 956
 * @dev: Device to suspend.
 *
 * Carry out a late suspend of a device under the assumption that its
 * pm_domain field points to the domain member of an object of type
 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
 */
957
static int pm_genpd_suspend_late(struct device *dev)
958 959 960 961 962 963 964 965 966
{
	struct generic_pm_domain *genpd;

	dev_dbg(dev, "%s()\n", __func__);

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;

967
	return genpd->suspend_power_off ? 0 : pm_generic_suspend_late(dev);
968
}
969

970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985
/**
 * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
 * @dev: Device to suspend.
 *
 * Stop the device and remove power from the domain if all devices in it have
 * been stopped.
 */
static int pm_genpd_suspend_noirq(struct device *dev)
{
	struct generic_pm_domain *genpd;

	dev_dbg(dev, "%s()\n", __func__);

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;
986

987
	if (genpd->suspend_power_off
988
	    || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
989 990
		return 0;

991
	genpd_stop_dev(genpd, dev);
992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004

	/*
	 * Since all of the "noirq" callbacks are executed sequentially, it is
	 * guaranteed that this function will never run twice in parallel for
	 * the same PM domain, so it is not necessary to use locking here.
	 */
	genpd->suspended_count++;
	pm_genpd_sync_poweroff(genpd);

	return 0;
}

/**
1005
 * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain.
1006 1007
 * @dev: Device to resume.
 *
1008
 * Restore power to the device's PM domain, if necessary, and start the device.
1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019
 */
static int pm_genpd_resume_noirq(struct device *dev)
{
	struct generic_pm_domain *genpd;

	dev_dbg(dev, "%s()\n", __func__);

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;

1020
	if (genpd->suspend_power_off
1021
	    || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
1022 1023 1024 1025 1026 1027 1028
		return 0;

	/*
	 * Since all of the "noirq" callbacks are executed sequentially, it is
	 * guaranteed that this function will never run twice in parallel for
	 * the same PM domain, so it is not necessary to use locking here.
	 */
1029
	pm_genpd_sync_poweron(genpd);
1030 1031
	genpd->suspended_count--;

1032
	return genpd_start_dev(genpd, dev);
1033 1034 1035
}

/**
1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053
 * pm_genpd_resume_early - Early resume of a device in an I/O PM domain.
 * @dev: Device to resume.
 *
 * Carry out an early resume of a device under the assumption that its
 * pm_domain field points to the domain member of an object of type
 * struct generic_pm_domain representing a power domain consisting of I/O
 * devices.
 */
static int pm_genpd_resume_early(struct device *dev)
{
	struct generic_pm_domain *genpd;

	dev_dbg(dev, "%s()\n", __func__);

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;

1054
	return genpd->suspend_power_off ? 0 : pm_generic_resume_early(dev);
1055 1056 1057 1058
}

/**
 * pm_genpd_resume - Resume of device in an I/O PM domain.
1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074
 * @dev: Device to resume.
 *
 * Resume a device under the assumption that its pm_domain field points to the
 * domain member of an object of type struct generic_pm_domain representing
 * a power domain consisting of I/O devices.
 */
static int pm_genpd_resume(struct device *dev)
{
	struct generic_pm_domain *genpd;

	dev_dbg(dev, "%s()\n", __func__);

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;

1075
	return genpd->suspend_power_off ? 0 : pm_generic_resume(dev);
1076 1077 1078
}

/**
1079
 * pm_genpd_freeze - Freezing a device in an I/O PM domain.
1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095
 * @dev: Device to freeze.
 *
 * Freeze a device under the assumption that its pm_domain field points to the
 * domain member of an object of type struct generic_pm_domain representing
 * a power domain consisting of I/O devices.
 */
static int pm_genpd_freeze(struct device *dev)
{
	struct generic_pm_domain *genpd;

	dev_dbg(dev, "%s()\n", __func__);

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;

1096
	return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev);
1097 1098 1099
}

/**
1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117
 * pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain.
 * @dev: Device to freeze.
 *
 * Carry out a late freeze of a device under the assumption that its
 * pm_domain field points to the domain member of an object of type
 * struct generic_pm_domain representing a power domain consisting of I/O
 * devices.
 */
static int pm_genpd_freeze_late(struct device *dev)
{
	struct generic_pm_domain *genpd;

	dev_dbg(dev, "%s()\n", __func__);

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;

1118
	return genpd->suspend_power_off ? 0 : pm_generic_freeze_late(dev);
1119 1120 1121 1122
}

/**
 * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139
 * @dev: Device to freeze.
 *
 * Carry out a late freeze of a device under the assumption that its
 * pm_domain field points to the domain member of an object of type
 * struct generic_pm_domain representing a power domain consisting of I/O
 * devices.
 */
static int pm_genpd_freeze_noirq(struct device *dev)
{
	struct generic_pm_domain *genpd;

	dev_dbg(dev, "%s()\n", __func__);

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;

1140
	return genpd->suspend_power_off ? 0 : genpd_stop_dev(genpd, dev);
1141
}
1142

1143 1144 1145 1146 1147 1148 1149 1150 1151 1152
/**
 * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
 * @dev: Device to thaw.
 *
 * Start the device, unless power has been removed from the domain already
 * before the system transition.
 */
static int pm_genpd_thaw_noirq(struct device *dev)
{
	struct generic_pm_domain *genpd;
1153

1154
	dev_dbg(dev, "%s()\n", __func__);
1155

1156 1157 1158 1159
	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;

1160
	return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev);
1161 1162 1163
}

/**
1164
 * pm_genpd_thaw_early - Early thaw of device in an I/O PM domain.
1165 1166 1167 1168 1169 1170 1171
 * @dev: Device to thaw.
 *
 * Carry out an early thaw of a device under the assumption that its
 * pm_domain field points to the domain member of an object of type
 * struct generic_pm_domain representing a power domain consisting of I/O
 * devices.
 */
1172
static int pm_genpd_thaw_early(struct device *dev)
1173 1174 1175 1176 1177 1178 1179 1180 1181
{
	struct generic_pm_domain *genpd;

	dev_dbg(dev, "%s()\n", __func__);

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;

1182
	return genpd->suspend_power_off ? 0 : pm_generic_thaw_early(dev);
1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202
}

/**
 * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
 * @dev: Device to thaw.
 *
 * Thaw a device under the assumption that its pm_domain field points to the
 * domain member of an object of type struct generic_pm_domain representing
 * a power domain consisting of I/O devices.
 */
static int pm_genpd_thaw(struct device *dev)
{
	struct generic_pm_domain *genpd;

	dev_dbg(dev, "%s()\n", __func__);

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;

1203
	return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev);
1204 1205 1206
}

/**
1207
 * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1208 1209
 * @dev: Device to resume.
 *
1210 1211
 * Make sure the domain will be in the same power state as before the
 * hibernation the system is resuming from and start the device if necessary.
1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226
 */
static int pm_genpd_restore_noirq(struct device *dev)
{
	struct generic_pm_domain *genpd;

	dev_dbg(dev, "%s()\n", __func__);

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return -EINVAL;

	/*
	 * Since all of the "noirq" callbacks are executed sequentially, it is
	 * guaranteed that this function will never run twice in parallel for
	 * the same PM domain, so it is not necessary to use locking here.
1227 1228 1229
	 *
	 * At this point suspended_count == 0 means we are being run for the
	 * first time for the given domain in the present cycle.
1230
	 */
1231
	if (genpd->suspended_count++ == 0) {
1232
		/*
1233
		 * The boot kernel might put the domain into arbitrary state,
1234 1235
		 * so make it appear as powered off to pm_genpd_sync_poweron(),
		 * so that it tries to power it on in case it was really off.
1236
		 */
1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247
		genpd->status = GPD_STATE_POWER_OFF;
		if (genpd->suspend_power_off) {
			/*
			 * If the domain was off before the hibernation, make
			 * sure it will be off going forward.
			 */
			if (genpd->power_off)
				genpd->power_off(genpd);

			return 0;
		}
1248 1249
	}

1250 1251 1252
	if (genpd->suspend_power_off)
		return 0;

1253
	pm_genpd_sync_poweron(genpd);
1254

1255
	return genpd_start_dev(genpd, dev);
1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287
}

/**
 * pm_genpd_complete - Complete power transition of a device in a power domain.
 * @dev: Device to complete the transition of.
 *
 * Complete a power transition of a device (during a system-wide power
 * transition) under the assumption that its pm_domain field points to the
 * domain member of an object of type struct generic_pm_domain representing
 * a power domain consisting of I/O devices.
 */
static void pm_genpd_complete(struct device *dev)
{
	struct generic_pm_domain *genpd;
	bool run_complete;

	dev_dbg(dev, "%s()\n", __func__);

	genpd = dev_to_genpd(dev);
	if (IS_ERR(genpd))
		return;

	mutex_lock(&genpd->lock);

	run_complete = !genpd->suspend_power_off;
	if (--genpd->prepared_count == 0)
		genpd->suspend_power_off = false;

	mutex_unlock(&genpd->lock);

	if (run_complete) {
		pm_generic_complete(dev);
1288
		pm_runtime_set_active(dev);
1289
		pm_runtime_enable(dev);
1290
		pm_request_idle(dev);
1291 1292 1293
	}
}

1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318
/**
 * pm_genpd_syscore_switch - Switch power during system core suspend or resume.
 * @dev: Device that normally is marked as "always on" to switch power for.
 *
 * This routine may only be called during the system core (syscore) suspend or
 * resume phase for devices whose "always on" flags are set.
 */
void pm_genpd_syscore_switch(struct device *dev, bool suspend)
{
	struct generic_pm_domain *genpd;

	genpd = dev_to_genpd(dev);
	if (!pm_genpd_present(genpd))
		return;

	if (suspend) {
		genpd->suspended_count++;
		pm_genpd_sync_poweroff(genpd);
	} else {
		pm_genpd_sync_poweron(genpd);
		genpd->suspended_count--;
	}
}
EXPORT_SYMBOL_GPL(pm_genpd_syscore_switch);

1319 1320 1321 1322
#else

#define pm_genpd_prepare		NULL
#define pm_genpd_suspend		NULL
1323
#define pm_genpd_suspend_late		NULL
1324
#define pm_genpd_suspend_noirq		NULL
1325
#define pm_genpd_resume_early		NULL
1326 1327 1328
#define pm_genpd_resume_noirq		NULL
#define pm_genpd_resume			NULL
#define pm_genpd_freeze			NULL
1329
#define pm_genpd_freeze_late		NULL
1330
#define pm_genpd_freeze_noirq		NULL
1331
#define pm_genpd_thaw_early		NULL
1332 1333 1334 1335 1336 1337 1338
#define pm_genpd_thaw_noirq		NULL
#define pm_genpd_thaw			NULL
#define pm_genpd_restore_noirq		NULL
#define pm_genpd_complete		NULL

#endif /* CONFIG_PM_SLEEP */

1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359
static struct generic_pm_domain_data *__pm_genpd_alloc_dev_data(struct device *dev)
{
	struct generic_pm_domain_data *gpd_data;

	gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
	if (!gpd_data)
		return NULL;

	mutex_init(&gpd_data->lock);
	gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
	dev_pm_qos_add_notifier(dev, &gpd_data->nb);
	return gpd_data;
}

static void __pm_genpd_free_dev_data(struct device *dev,
				     struct generic_pm_domain_data *gpd_data)
{
	dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
	kfree(gpd_data);
}

1360
/**
1361
 * __pm_genpd_add_device - Add a device to an I/O PM domain.
1362 1363
 * @genpd: PM domain to add the device to.
 * @dev: Device to be added.
1364
 * @td: Set of PM QoS timing parameters to attach to the device.
1365
 */
1366 1367
int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
			  struct gpd_timing_data *td)
1368
{
1369
	struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL;
1370
	struct pm_domain_data *pdd;
1371 1372 1373 1374 1375 1376 1377
	int ret = 0;

	dev_dbg(dev, "%s()\n", __func__);

	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
		return -EINVAL;

1378 1379
	gpd_data_new = __pm_genpd_alloc_dev_data(dev);
	if (!gpd_data_new)
1380 1381
		return -ENOMEM;

1382
	genpd_acquire_lock(genpd);
1383

1384 1385 1386 1387 1388
	if (genpd->prepared_count > 0) {
		ret = -EAGAIN;
		goto out;
	}

1389 1390
	list_for_each_entry(pdd, &genpd->dev_list, list_node)
		if (pdd->dev == dev) {
1391 1392 1393 1394
			ret = -EINVAL;
			goto out;
		}

1395 1396 1397 1398
	ret = dev_pm_get_subsys_data(dev);
	if (ret)
		goto out;

1399
	genpd->device_count++;
1400
	genpd->max_off_time_changed = true;
1401

1402
	spin_lock_irq(&dev->power.lock);
1403

1404
	dev->pm_domain = &genpd->domain;
1405 1406 1407 1408 1409 1410 1411
	if (dev->power.subsys_data->domain_data) {
		gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
	} else {
		gpd_data = gpd_data_new;
		dev->power.subsys_data->domain_data = &gpd_data->base;
	}
	gpd_data->refcount++;
1412 1413
	if (td)
		gpd_data->td = *td;
1414

1415 1416 1417 1418 1419 1420
	spin_unlock_irq(&dev->power.lock);

	mutex_lock(&gpd_data->lock);
	gpd_data->base.dev = dev;
	list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
	gpd_data->need_restore = genpd->status == GPD_STATE_POWER_OFF;
1421 1422 1423 1424
	gpd_data->td.constraint_changed = true;
	gpd_data->td.effective_constraint_ns = -1;
	mutex_unlock(&gpd_data->lock);

1425
 out:
1426
	genpd_release_lock(genpd);
1427

1428 1429 1430
	if (gpd_data != gpd_data_new)
		__pm_genpd_free_dev_data(dev, gpd_data_new);

1431 1432 1433
	return ret;
}

T
Thomas Abraham 已提交
1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465
/**
 * __pm_genpd_of_add_device - Add a device to an I/O PM domain.
 * @genpd_node: Device tree node pointer representing a PM domain to which the
 *   the device is added to.
 * @dev: Device to be added.
 * @td: Set of PM QoS timing parameters to attach to the device.
 */
int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev,
			     struct gpd_timing_data *td)
{
	struct generic_pm_domain *genpd = NULL, *gpd;

	dev_dbg(dev, "%s()\n", __func__);

	if (IS_ERR_OR_NULL(genpd_node) || IS_ERR_OR_NULL(dev))
		return -EINVAL;

	mutex_lock(&gpd_list_lock);
	list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
		if (gpd->of_node == genpd_node) {
			genpd = gpd;
			break;
		}
	}
	mutex_unlock(&gpd_list_lock);

	if (!genpd)
		return -EINVAL;

	return __pm_genpd_add_device(genpd, dev, td);
}

1466 1467 1468 1469 1470 1471 1472 1473 1474 1475

/**
 * __pm_genpd_name_add_device - Find I/O PM domain and add a device to it.
 * @domain_name: Name of the PM domain to add the device to.
 * @dev: Device to be added.
 * @td: Set of PM QoS timing parameters to attach to the device.
 */
int __pm_genpd_name_add_device(const char *domain_name, struct device *dev,
			       struct gpd_timing_data *td)
{
1476
	return __pm_genpd_add_device(pm_genpd_lookup_name(domain_name), dev, td);
1477 1478
}

1479 1480 1481 1482 1483 1484 1485 1486
/**
 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
 * @genpd: PM domain to remove the device from.
 * @dev: Device to be removed.
 */
int pm_genpd_remove_device(struct generic_pm_domain *genpd,
			   struct device *dev)
{
1487
	struct generic_pm_domain_data *gpd_data;
1488
	struct pm_domain_data *pdd;
1489
	bool remove = false;
1490
	int ret = 0;
1491 1492 1493

	dev_dbg(dev, "%s()\n", __func__);

1494 1495 1496
	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)
	    ||  IS_ERR_OR_NULL(dev->pm_domain)
	    ||  pd_to_genpd(dev->pm_domain) != genpd)
1497 1498
		return -EINVAL;

1499
	genpd_acquire_lock(genpd);
1500

1501 1502 1503 1504 1505
	if (genpd->prepared_count > 0) {
		ret = -EAGAIN;
		goto out;
	}

1506 1507 1508 1509
	genpd->device_count--;
	genpd->max_off_time_changed = true;

	spin_lock_irq(&dev->power.lock);
1510

1511 1512 1513
	dev->pm_domain = NULL;
	pdd = dev->power.subsys_data->domain_data;
	list_del_init(&pdd->list_node);
1514 1515 1516 1517 1518 1519
	gpd_data = to_gpd_data(pdd);
	if (--gpd_data->refcount == 0) {
		dev->power.subsys_data->domain_data = NULL;
		remove = true;
	}

1520
	spin_unlock_irq(&dev->power.lock);
1521

1522 1523 1524 1525 1526 1527 1528
	mutex_lock(&gpd_data->lock);
	pdd->dev = NULL;
	mutex_unlock(&gpd_data->lock);

	genpd_release_lock(genpd);

	dev_pm_put_subsys_data(dev);
1529 1530 1531
	if (remove)
		__pm_genpd_free_dev_data(dev, gpd_data);

1532
	return 0;
1533

1534
 out:
1535
	genpd_release_lock(genpd);
1536 1537 1538 1539

	return ret;
}

1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559
/**
 * pm_genpd_dev_need_restore - Set/unset the device's "need restore" flag.
 * @dev: Device to set/unset the flag for.
 * @val: The new value of the device's "need restore" flag.
 */
void pm_genpd_dev_need_restore(struct device *dev, bool val)
{
	struct pm_subsys_data *psd;
	unsigned long flags;

	spin_lock_irqsave(&dev->power.lock, flags);

	psd = dev_to_psd(dev);
	if (psd && psd->domain_data)
		to_gpd_data(psd->domain_data)->need_restore = val;

	spin_unlock_irqrestore(&dev->power.lock, flags);
}
EXPORT_SYMBOL_GPL(pm_genpd_dev_need_restore);

1560 1561 1562
/**
 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
 * @genpd: Master PM domain to add the subdomain to.
1563
 * @subdomain: Subdomain to be added.
1564 1565
 */
int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1566
			   struct generic_pm_domain *subdomain)
1567
{
1568
	struct gpd_link *link;
1569 1570
	int ret = 0;

1571 1572
	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
	    || genpd == subdomain)
1573 1574
		return -EINVAL;

1575 1576
 start:
	genpd_acquire_lock(genpd);
1577
	mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1578

1579 1580 1581
	if (subdomain->status != GPD_STATE_POWER_OFF
	    && subdomain->status != GPD_STATE_ACTIVE) {
		mutex_unlock(&subdomain->lock);
1582 1583 1584 1585 1586
		genpd_release_lock(genpd);
		goto start;
	}

	if (genpd->status == GPD_STATE_POWER_OFF
1587
	    &&  subdomain->status != GPD_STATE_POWER_OFF) {
1588 1589 1590 1591
		ret = -EINVAL;
		goto out;
	}

1592
	list_for_each_entry(link, &genpd->master_links, master_node) {
1593
		if (link->slave == subdomain && link->master == genpd) {
1594 1595 1596 1597 1598
			ret = -EINVAL;
			goto out;
		}
	}

1599 1600 1601 1602 1603 1604 1605
	link = kzalloc(sizeof(*link), GFP_KERNEL);
	if (!link) {
		ret = -ENOMEM;
		goto out;
	}
	link->master = genpd;
	list_add_tail(&link->master_node, &genpd->master_links);
1606 1607 1608
	link->slave = subdomain;
	list_add_tail(&link->slave_node, &subdomain->slave_links);
	if (subdomain->status != GPD_STATE_POWER_OFF)
1609
		genpd_sd_counter_inc(genpd);
1610 1611

 out:
1612
	mutex_unlock(&subdomain->lock);
1613
	genpd_release_lock(genpd);
1614 1615 1616 1617

	return ret;
}

1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646
/**
 * pm_genpd_add_subdomain_names - Add a subdomain to an I/O PM domain.
 * @master_name: Name of the master PM domain to add the subdomain to.
 * @subdomain_name: Name of the subdomain to be added.
 */
int pm_genpd_add_subdomain_names(const char *master_name,
				 const char *subdomain_name)
{
	struct generic_pm_domain *master = NULL, *subdomain = NULL, *gpd;

	if (IS_ERR_OR_NULL(master_name) || IS_ERR_OR_NULL(subdomain_name))
		return -EINVAL;

	mutex_lock(&gpd_list_lock);
	list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
		if (!master && !strcmp(gpd->name, master_name))
			master = gpd;

		if (!subdomain && !strcmp(gpd->name, subdomain_name))
			subdomain = gpd;

		if (master && subdomain)
			break;
	}
	mutex_unlock(&gpd_list_lock);

	return pm_genpd_add_subdomain(master, subdomain);
}

1647 1648 1649
/**
 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
 * @genpd: Master PM domain to remove the subdomain from.
1650
 * @subdomain: Subdomain to be removed.
1651 1652
 */
int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1653
			      struct generic_pm_domain *subdomain)
1654
{
1655
	struct gpd_link *link;
1656 1657
	int ret = -EINVAL;

1658
	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1659 1660
		return -EINVAL;

1661 1662
 start:
	genpd_acquire_lock(genpd);
1663

1664 1665
	list_for_each_entry(link, &genpd->master_links, master_node) {
		if (link->slave != subdomain)
1666 1667 1668 1669
			continue;

		mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);

1670 1671 1672 1673 1674 1675 1676
		if (subdomain->status != GPD_STATE_POWER_OFF
		    && subdomain->status != GPD_STATE_ACTIVE) {
			mutex_unlock(&subdomain->lock);
			genpd_release_lock(genpd);
			goto start;
		}

1677 1678 1679
		list_del(&link->master_node);
		list_del(&link->slave_node);
		kfree(link);
1680
		if (subdomain->status != GPD_STATE_POWER_OFF)
1681 1682 1683 1684 1685 1686 1687 1688
			genpd_sd_counter_dec(genpd);

		mutex_unlock(&subdomain->lock);

		ret = 0;
		break;
	}

1689
	genpd_release_lock(genpd);
1690 1691 1692 1693

	return ret;
}

1694 1695 1696 1697 1698 1699 1700 1701 1702 1703
/**
 * pm_genpd_attach_cpuidle - Connect the given PM domain with cpuidle.
 * @genpd: PM domain to be connected with cpuidle.
 * @state: cpuidle state this domain can disable/enable.
 *
 * Make a PM domain behave as though it contained a CPU core, that is, instead
 * of calling its power down routine it will enable the given cpuidle state so
 * that the cpuidle subsystem can power it down (if possible and desirable).
 */
int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726
{
	struct cpuidle_driver *cpuidle_drv;
	struct gpd_cpu_data *cpu_data;
	struct cpuidle_state *idle_state;
	int ret = 0;

	if (IS_ERR_OR_NULL(genpd) || state < 0)
		return -EINVAL;

	genpd_acquire_lock(genpd);

	if (genpd->cpu_data) {
		ret = -EEXIST;
		goto out;
	}
	cpu_data = kzalloc(sizeof(*cpu_data), GFP_KERNEL);
	if (!cpu_data) {
		ret = -ENOMEM;
		goto out;
	}
	cpuidle_drv = cpuidle_driver_ref();
	if (!cpuidle_drv) {
		ret = -ENODEV;
1727
		goto err_drv;
1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748
	}
	if (cpuidle_drv->state_count <= state) {
		ret = -EINVAL;
		goto err;
	}
	idle_state = &cpuidle_drv->states[state];
	if (!idle_state->disabled) {
		ret = -EAGAIN;
		goto err;
	}
	cpu_data->idle_state = idle_state;
	cpu_data->saved_exit_latency = idle_state->exit_latency;
	genpd->cpu_data = cpu_data;
	genpd_recalc_cpu_exit_latency(genpd);

 out:
	genpd_release_lock(genpd);
	return ret;

 err:
	cpuidle_driver_unref();
1749 1750 1751

 err_drv:
	kfree(cpu_data);
1752 1753 1754
	goto out;
}

1755 1756 1757 1758 1759 1760 1761 1762 1763 1764
/**
 * pm_genpd_name_attach_cpuidle - Find PM domain and connect cpuidle to it.
 * @name: Name of the domain to connect to cpuidle.
 * @state: cpuidle state this domain can manipulate.
 */
int pm_genpd_name_attach_cpuidle(const char *name, int state)
{
	return pm_genpd_attach_cpuidle(pm_genpd_lookup_name(name), state);
}

1765 1766 1767 1768 1769 1770 1771 1772
/**
 * pm_genpd_detach_cpuidle - Remove the cpuidle connection from a PM domain.
 * @genpd: PM domain to remove the cpuidle connection from.
 *
 * Remove the cpuidle connection set up by pm_genpd_attach_cpuidle() from the
 * given PM domain.
 */
int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802
{
	struct gpd_cpu_data *cpu_data;
	struct cpuidle_state *idle_state;
	int ret = 0;

	if (IS_ERR_OR_NULL(genpd))
		return -EINVAL;

	genpd_acquire_lock(genpd);

	cpu_data = genpd->cpu_data;
	if (!cpu_data) {
		ret = -ENODEV;
		goto out;
	}
	idle_state = cpu_data->idle_state;
	if (!idle_state->disabled) {
		ret = -EAGAIN;
		goto out;
	}
	idle_state->exit_latency = cpu_data->saved_exit_latency;
	cpuidle_driver_unref();
	genpd->cpu_data = NULL;
	kfree(cpu_data);

 out:
	genpd_release_lock(genpd);
	return ret;
}

1803 1804 1805 1806 1807 1808 1809 1810 1811
/**
 * pm_genpd_name_detach_cpuidle - Find PM domain and disconnect cpuidle from it.
 * @name: Name of the domain to disconnect cpuidle from.
 */
int pm_genpd_name_detach_cpuidle(const char *name)
{
	return pm_genpd_detach_cpuidle(pm_genpd_lookup_name(name));
}

1812 1813
/* Default device callbacks for generic PM domains. */

1814 1815 1816 1817 1818 1819 1820 1821
/**
 * pm_genpd_default_save_state - Default "save device state" for PM domians.
 * @dev: Device to handle.
 */
static int pm_genpd_default_save_state(struct device *dev)
{
	int (*cb)(struct device *__dev);

1822 1823 1824 1825 1826 1827 1828 1829
	if (dev->type && dev->type->pm)
		cb = dev->type->pm->runtime_suspend;
	else if (dev->class && dev->class->pm)
		cb = dev->class->pm->runtime_suspend;
	else if (dev->bus && dev->bus->pm)
		cb = dev->bus->pm->runtime_suspend;
	else
		cb = NULL;
1830

1831 1832 1833 1834
	if (!cb && dev->driver && dev->driver->pm)
		cb = dev->driver->pm->runtime_suspend;

	return cb ? cb(dev) : 0;
1835 1836 1837 1838 1839 1840 1841 1842 1843 1844
}

/**
 * pm_genpd_default_restore_state - Default PM domians "restore device state".
 * @dev: Device to handle.
 */
static int pm_genpd_default_restore_state(struct device *dev)
{
	int (*cb)(struct device *__dev);

1845 1846 1847 1848 1849 1850 1851 1852
	if (dev->type && dev->type->pm)
		cb = dev->type->pm->runtime_resume;
	else if (dev->class && dev->class->pm)
		cb = dev->class->pm->runtime_resume;
	else if (dev->bus && dev->bus->pm)
		cb = dev->bus->pm->runtime_resume;
	else
		cb = NULL;
1853

1854 1855 1856 1857
	if (!cb && dev->driver && dev->driver->pm)
		cb = dev->driver->pm->runtime_resume;

	return cb ? cb(dev) : 0;
1858 1859
}

1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871
/**
 * pm_genpd_init - Initialize a generic I/O PM domain object.
 * @genpd: PM domain object to initialize.
 * @gov: PM domain governor to associate with the domain (may be NULL).
 * @is_off: Initial value of the domain's power_is_off field.
 */
void pm_genpd_init(struct generic_pm_domain *genpd,
		   struct dev_power_governor *gov, bool is_off)
{
	if (IS_ERR_OR_NULL(genpd))
		return;

1872 1873
	INIT_LIST_HEAD(&genpd->master_links);
	INIT_LIST_HEAD(&genpd->slave_links);
1874 1875 1876 1877 1878
	INIT_LIST_HEAD(&genpd->dev_list);
	mutex_init(&genpd->lock);
	genpd->gov = gov;
	INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
	genpd->in_progress = 0;
1879
	atomic_set(&genpd->sd_count, 0);
1880 1881
	genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
	init_waitqueue_head(&genpd->status_wait_queue);
1882 1883
	genpd->poweroff_task = NULL;
	genpd->resume_count = 0;
1884
	genpd->device_count = 0;
1885
	genpd->max_off_time_ns = -1;
1886
	genpd->max_off_time_changed = true;
1887 1888
	genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
	genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
1889 1890
	genpd->domain.ops.prepare = pm_genpd_prepare;
	genpd->domain.ops.suspend = pm_genpd_suspend;
1891
	genpd->domain.ops.suspend_late = pm_genpd_suspend_late;
1892 1893
	genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
	genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
1894
	genpd->domain.ops.resume_early = pm_genpd_resume_early;
1895 1896
	genpd->domain.ops.resume = pm_genpd_resume;
	genpd->domain.ops.freeze = pm_genpd_freeze;
1897
	genpd->domain.ops.freeze_late = pm_genpd_freeze_late;
1898 1899
	genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
	genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
1900
	genpd->domain.ops.thaw_early = pm_genpd_thaw_early;
1901
	genpd->domain.ops.thaw = pm_genpd_thaw;
1902
	genpd->domain.ops.poweroff = pm_genpd_suspend;
1903
	genpd->domain.ops.poweroff_late = pm_genpd_suspend_late;
1904
	genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
1905
	genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
1906
	genpd->domain.ops.restore_early = pm_genpd_resume_early;
1907
	genpd->domain.ops.restore = pm_genpd_resume;
1908
	genpd->domain.ops.complete = pm_genpd_complete;
1909 1910
	genpd->dev_ops.save_state = pm_genpd_default_save_state;
	genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
1911 1912 1913 1914
	mutex_lock(&gpd_list_lock);
	list_add(&genpd->gpd_list_node, &gpd_list);
	mutex_unlock(&gpd_list_lock);
}