core.c 51.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * Generic OPP Interface
 *
 * Copyright (C) 2009-2010 Texas Instruments Incorporated.
 *	Nishanth Menon
 *	Romit Dasgupta
 *	Kevin Hilman
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

14 15
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

V
Viresh Kumar 已提交
16
#include <linux/clk.h>
17 18 19
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/slab.h>
20
#include <linux/device.h>
21
#include <linux/export.h>
22
#include <linux/regulator/consumer.h>
23

24
#include "opp.h"
25 26

/*
27 28
 * The root of the list of all opp-tables. All opp_table structures branch off
 * from here, with each opp_table containing the list of opps it supports in
29 30
 * various states of availability.
 */
31
LIST_HEAD(opp_tables);
32
/* Lock to allow exclusive modification to the device and opp lists */
33
DEFINE_MUTEX(opp_table_lock);
34

35 36
#define opp_rcu_lockdep_assert()					\
do {									\
37
	RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&			\
38 39 40
			 !lockdep_is_held(&opp_table_lock),		\
			 "Missing rcu_read_lock() or "			\
			 "opp_table_lock protection");			\
41 42
} while (0)

43 44
static struct opp_device *_find_opp_dev(const struct device *dev,
					struct opp_table *opp_table)
45
{
46
	struct opp_device *opp_dev;
47

48 49 50
	list_for_each_entry(opp_dev, &opp_table->dev_list, node)
		if (opp_dev->dev == dev)
			return opp_dev;
51 52 53 54

	return NULL;
}

55
/**
56 57
 * _find_opp_table() - find opp_table struct using device pointer
 * @dev:	device pointer used to lookup OPP table
58
 *
59 60
 * Search OPP table for one containing matching device. Does a RCU reader
 * operation to grab the pointer needed.
61
 *
62
 * Return: pointer to 'struct opp_table' if found, otherwise -ENODEV or
63 64
 * -EINVAL based on type of error.
 *
65
 * Locking: For readers, this function must be called under rcu_read_lock().
66
 * opp_table is a RCU protected pointer, which means that opp_table is valid
67 68
 * as long as we are under RCU lock.
 *
69
 * For Writers, this function must be called with opp_table_lock held.
70
 */
71
struct opp_table *_find_opp_table(struct device *dev)
72
{
73
	struct opp_table *opp_table;
74

75 76
	opp_rcu_lockdep_assert();

77
	if (IS_ERR_OR_NULL(dev)) {
78 79 80 81
		pr_err("%s: Invalid parameters\n", __func__);
		return ERR_PTR(-EINVAL);
	}

82 83 84
	list_for_each_entry_rcu(opp_table, &opp_tables, node)
		if (_find_opp_dev(dev, opp_table))
			return opp_table;
85

86
	return ERR_PTR(-ENODEV);
87 88 89
}

/**
90
 * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp
91 92
 * @opp:	opp for which voltage has to be returned for
 *
93
 * Return: voltage in micro volt corresponding to the opp, else
94 95
 * return 0
 *
96 97
 * This is useful only for devices with single power supply.
 *
98 99 100 101 102 103 104 105
 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
 * protected pointer. This means that opp which could have been fetched by
 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
 * under RCU lock. The pointer returned by the opp_find_freq family must be
 * used in the same section as the usage of this function with the pointer
 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
 * pointer.
 */
106
unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
107
{
108
	struct dev_pm_opp *tmp_opp;
109 110
	unsigned long v = 0;

111 112
	opp_rcu_lockdep_assert();

113
	tmp_opp = rcu_dereference(opp);
114
	if (IS_ERR_OR_NULL(tmp_opp))
115 116
		pr_err("%s: Invalid parameters\n", __func__);
	else
117
		v = tmp_opp->supplies[0].u_volt;
118 119 120

	return v;
}
121
EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
122 123

/**
124
 * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
125 126
 * @opp:	opp for which frequency has to be returned for
 *
127
 * Return: frequency in hertz corresponding to the opp, else
128 129 130 131 132 133 134 135 136 137
 * return 0
 *
 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
 * protected pointer. This means that opp which could have been fetched by
 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
 * under RCU lock. The pointer returned by the opp_find_freq family must be
 * used in the same section as the usage of this function with the pointer
 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
 * pointer.
 */
138
unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
139
{
140
	struct dev_pm_opp *tmp_opp;
141 142
	unsigned long f = 0;

143 144
	opp_rcu_lockdep_assert();

145
	tmp_opp = rcu_dereference(opp);
146
	if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available)
147 148 149 150 151 152
		pr_err("%s: Invalid parameters\n", __func__);
	else
		f = tmp_opp->rate;

	return f;
}
153
EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
154

155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
/**
 * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
 * @opp: opp for which turbo mode is being verified
 *
 * Turbo OPPs are not for normal use, and can be enabled (under certain
 * conditions) for short duration of times to finish high throughput work
 * quickly. Running on them for longer times may overheat the chip.
 *
 * Return: true if opp is turbo opp, else false.
 *
 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
 * protected pointer. This means that opp which could have been fetched by
 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
 * under RCU lock. The pointer returned by the opp_find_freq family must be
 * used in the same section as the usage of this function with the pointer
 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
 * pointer.
 */
bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
{
	struct dev_pm_opp *tmp_opp;

	opp_rcu_lockdep_assert();

	tmp_opp = rcu_dereference(opp);
	if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) {
		pr_err("%s: Invalid parameters\n", __func__);
		return false;
	}

	return tmp_opp->turbo;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);

189 190 191 192 193 194 195 196 197 198
/**
 * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds
 * @dev:	device for which we do this operation
 *
 * Return: This function returns the max clock latency in nanoseconds.
 *
 * Locking: This function takes rcu_read_lock().
 */
unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
{
199
	struct opp_table *opp_table;
200 201 202 203
	unsigned long clock_latency_ns;

	rcu_read_lock();

204 205
	opp_table = _find_opp_table(dev);
	if (IS_ERR(opp_table))
206 207
		clock_latency_ns = 0;
	else
208
		clock_latency_ns = opp_table->clock_latency_ns_max;
209 210 211 212 213 214

	rcu_read_unlock();
	return clock_latency_ns;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);

215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232
static int _get_regulator_count(struct device *dev)
{
	struct opp_table *opp_table;
	int count;

	rcu_read_lock();

	opp_table = _find_opp_table(dev);
	if (!IS_ERR(opp_table))
		count = opp_table->regulator_count;
	else
		count = 0;

	rcu_read_unlock();

	return count;
}

233 234 235 236 237 238 239 240 241 242
/**
 * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds
 * @dev: device for which we do this operation
 *
 * Return: This function returns the max voltage latency in nanoseconds.
 *
 * Locking: This function takes rcu_read_lock().
 */
unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
{
243
	struct opp_table *opp_table;
244
	struct dev_pm_opp *opp;
245
	struct regulator *reg, **regulators;
246
	unsigned long latency_ns = 0;
247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265
	int ret, i, count;
	struct {
		unsigned long min;
		unsigned long max;
	} *uV;

	count = _get_regulator_count(dev);

	/* Regulator may not be required for the device */
	if (!count)
		return 0;

	regulators = kmalloc_array(count, sizeof(*regulators), GFP_KERNEL);
	if (!regulators)
		return 0;

	uV = kmalloc_array(count, sizeof(*uV), GFP_KERNEL);
	if (!uV)
		goto free_regulators;
266 267 268

	rcu_read_lock();

269 270
	opp_table = _find_opp_table(dev);
	if (IS_ERR(opp_table)) {
271
		rcu_read_unlock();
272
		goto free_uV;
273 274
	}

275
	memcpy(regulators, opp_table->regulators, count * sizeof(*regulators));
276

277 278 279
	for (i = 0; i < count; i++) {
		uV[i].min = ~0;
		uV[i].max = 0;
280

281 282 283 284 285 286 287 288 289
		list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
			if (!opp->available)
				continue;

			if (opp->supplies[i].u_volt_min < uV[i].min)
				uV[i].min = opp->supplies[i].u_volt_min;
			if (opp->supplies[i].u_volt_max > uV[i].max)
				uV[i].max = opp->supplies[i].u_volt_max;
		}
290 291 292 293 294
	}

	rcu_read_unlock();

	/*
295
	 * The caller needs to ensure that opp_table (and hence the regulator)
296 297
	 * isn't freed, while we are executing this routine.
	 */
298 299 300 301 302 303 304 305 306 307
	for (i = 0; reg = regulators[i], i < count; i++) {
		ret = regulator_set_voltage_time(reg, uV[i].min, uV[i].max);
		if (ret > 0)
			latency_ns += ret * 1000;
	}

free_uV:
	kfree(uV);
free_regulators:
	kfree(regulators);
308 309 310 311 312

	return latency_ns;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency);

313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329
/**
 * dev_pm_opp_get_max_transition_latency() - Get max transition latency in
 *					     nanoseconds
 * @dev: device for which we do this operation
 *
 * Return: This function returns the max transition latency, in nanoseconds, to
 * switch from one OPP to other.
 *
 * Locking: This function takes rcu_read_lock().
 */
unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev)
{
	return dev_pm_opp_get_max_volt_latency(dev) +
		dev_pm_opp_get_max_clock_latency(dev);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency);

330
/**
331
 * dev_pm_opp_get_suspend_opp_freq() - Get frequency of suspend opp in Hz
332 333
 * @dev:	device for which we do this operation
 *
334 335
 * Return: This function returns the frequency of the OPP marked as suspend_opp
 * if one is available, else returns 0;
336
 */
337
unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev)
338
{
339
	struct opp_table *opp_table;
340
	unsigned long freq = 0;
341

342
	rcu_read_lock();
343

344 345 346
	opp_table = _find_opp_table(dev);
	if (IS_ERR(opp_table) || !opp_table->suspend_opp ||
	    !opp_table->suspend_opp->available)
347 348 349
		goto unlock;

	freq = dev_pm_opp_get_freq(opp_table->suspend_opp);
350

351 352 353
unlock:
	rcu_read_unlock();
	return freq;
354
}
355
EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp_freq);
356

357
/**
358
 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp table
359 360
 * @dev:	device for which we do this operation
 *
361
 * Return: This function returns the number of available opps if there are any,
362 363
 * else returns 0 if none or the corresponding error value.
 *
364
 * Locking: This function takes rcu_read_lock().
365
 */
366
int dev_pm_opp_get_opp_count(struct device *dev)
367
{
368
	struct opp_table *opp_table;
369
	struct dev_pm_opp *temp_opp;
370 371
	int count = 0;

372
	rcu_read_lock();
373

374 375 376 377
	opp_table = _find_opp_table(dev);
	if (IS_ERR(opp_table)) {
		count = PTR_ERR(opp_table);
		dev_err(dev, "%s: OPP table not found (%d)\n",
378 379
			__func__, count);
		goto out_unlock;
380 381
	}

382
	list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
383 384 385 386
		if (temp_opp->available)
			count++;
	}

387 388
out_unlock:
	rcu_read_unlock();
389 390
	return count;
}
391
EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
392 393

/**
394
 * dev_pm_opp_find_freq_exact() - search for an exact frequency
395 396
 * @dev:		device for which we do this operation
 * @freq:		frequency to search for
397
 * @available:		true/false - match for available opp
398
 *
399
 * Return: Searches for exact match in the opp table and returns pointer to the
400 401
 * matching opp if found, else returns ERR_PTR in case of error and should
 * be handled using IS_ERR. Error return values can be:
402 403 404
 * EINVAL:	for bad pointer
 * ERANGE:	no match found for search
 * ENODEV:	if device not found in list of registered devices
405 406 407 408 409 410 411 412 413 414 415 416 417 418
 *
 * Note: available is a modifier for the search. if available=true, then the
 * match is for exact matching frequency and is available in the stored OPP
 * table. if false, the match is for exact frequency which is not available.
 *
 * This provides a mechanism to enable an opp which is not available currently
 * or the opposite as well.
 *
 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
 * protected pointer. The reason for the same is that the opp pointer which is
 * returned will remain valid for use with opp_get_{voltage, freq} only while
 * under the locked area. The pointer returned must be used prior to unlocking
 * with rcu_read_unlock() to maintain the integrity of the pointer.
 */
419 420 421
struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
					      unsigned long freq,
					      bool available)
422
{
423
	struct opp_table *opp_table;
424
	struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
425

426 427
	opp_rcu_lockdep_assert();

428 429 430 431 432
	opp_table = _find_opp_table(dev);
	if (IS_ERR(opp_table)) {
		int r = PTR_ERR(opp_table);

		dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
433 434 435
		return ERR_PTR(r);
	}

436
	list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
437 438 439 440 441 442 443 444 445
		if (temp_opp->available == available &&
				temp_opp->rate == freq) {
			opp = temp_opp;
			break;
		}
	}

	return opp;
}
446
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
447

448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463
static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table,
						   unsigned long *freq)
{
	struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);

	list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
		if (temp_opp->available && temp_opp->rate >= *freq) {
			opp = temp_opp;
			*freq = opp->rate;
			break;
		}
	}

	return opp;
}

464
/**
465
 * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
466 467 468 469 470 471
 * @dev:	device for which we do this operation
 * @freq:	Start frequency
 *
 * Search for the matching ceil *available* OPP from a starting freq
 * for a device.
 *
472
 * Return: matching *opp and refreshes *freq accordingly, else returns
473 474 475 476 477
 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
 * values can be:
 * EINVAL:	for bad pointer
 * ERANGE:	no match found for search
 * ENODEV:	if device not found in list of registered devices
478 479 480 481 482 483 484
 *
 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
 * protected pointer. The reason for the same is that the opp pointer which is
 * returned will remain valid for use with opp_get_{voltage, freq} only while
 * under the locked area. The pointer returned must be used prior to unlocking
 * with rcu_read_unlock() to maintain the integrity of the pointer.
 */
485 486
struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
					     unsigned long *freq)
487
{
488
	struct opp_table *opp_table;
489

490 491
	opp_rcu_lockdep_assert();

492 493 494 495 496
	if (!dev || !freq) {
		dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
		return ERR_PTR(-EINVAL);
	}

497 498 499
	opp_table = _find_opp_table(dev);
	if (IS_ERR(opp_table))
		return ERR_CAST(opp_table);
500

501
	return _find_freq_ceil(opp_table, freq);
502
}
503
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
504 505

/**
506
 * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
507 508 509 510 511 512
 * @dev:	device for which we do this operation
 * @freq:	Start frequency
 *
 * Search for the matching floor *available* OPP from a starting freq
 * for a device.
 *
513
 * Return: matching *opp and refreshes *freq accordingly, else returns
514 515 516 517 518
 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
 * values can be:
 * EINVAL:	for bad pointer
 * ERANGE:	no match found for search
 * ENODEV:	if device not found in list of registered devices
519 520 521 522 523 524 525
 *
 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
 * protected pointer. The reason for the same is that the opp pointer which is
 * returned will remain valid for use with opp_get_{voltage, freq} only while
 * under the locked area. The pointer returned must be used prior to unlocking
 * with rcu_read_unlock() to maintain the integrity of the pointer.
 */
526 527
struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
					      unsigned long *freq)
528
{
529
	struct opp_table *opp_table;
530
	struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
531

532 533
	opp_rcu_lockdep_assert();

534 535 536 537 538
	if (!dev || !freq) {
		dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
		return ERR_PTR(-EINVAL);
	}

539 540 541
	opp_table = _find_opp_table(dev);
	if (IS_ERR(opp_table))
		return ERR_CAST(opp_table);
542

543
	list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
544 545 546 547 548 549 550 551 552 553 554 555 556
		if (temp_opp->available) {
			/* go to the next node, before choosing prev */
			if (temp_opp->rate > *freq)
				break;
			else
				opp = temp_opp;
		}
	}
	if (!IS_ERR(opp))
		*freq = opp->rate;

	return opp;
}
557
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
558

559
/*
560
 * The caller needs to ensure that opp_table (and hence the clk) isn't freed,
561 562 563 564
 * while clk returned here is used.
 */
static struct clk *_get_opp_clk(struct device *dev)
{
565
	struct opp_table *opp_table;
566 567 568 569
	struct clk *clk;

	rcu_read_lock();

570 571
	opp_table = _find_opp_table(dev);
	if (IS_ERR(opp_table)) {
572
		dev_err(dev, "%s: device opp doesn't exist\n", __func__);
573
		clk = ERR_CAST(opp_table);
574 575 576
		goto unlock;
	}

577
	clk = opp_table->clk;
578 579 580 581 582 583 584 585 586 587
	if (IS_ERR(clk))
		dev_err(dev, "%s: No clock available for the device\n",
			__func__);

unlock:
	rcu_read_unlock();
	return clk;
}

static int _set_opp_voltage(struct device *dev, struct regulator *reg,
588
			    struct dev_pm_opp_supply *supply)
589 590 591 592 593 594 595 596 597 598
{
	int ret;

	/* Regulator not available for device */
	if (IS_ERR(reg)) {
		dev_dbg(dev, "%s: regulator not available: %ld\n", __func__,
			PTR_ERR(reg));
		return 0;
	}

599 600
	dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__,
		supply->u_volt_min, supply->u_volt, supply->u_volt_max);
601

602 603
	ret = regulator_set_voltage_triplet(reg, supply->u_volt_min,
					    supply->u_volt, supply->u_volt_max);
604 605
	if (ret)
		dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n",
606 607
			__func__, supply->u_volt_min, supply->u_volt,
			supply->u_volt_max, ret);
608 609 610 611

	return ret;
}

612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674
static inline int
_generic_set_opp_clk_only(struct device *dev, struct clk *clk,
			  unsigned long old_freq, unsigned long freq)
{
	int ret;

	ret = clk_set_rate(clk, freq);
	if (ret) {
		dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
			ret);
	}

	return ret;
}

static int _generic_set_opp(struct dev_pm_set_opp_data *data)
{
	struct dev_pm_opp_supply *old_supply = data->old_opp.supplies;
	struct dev_pm_opp_supply *new_supply = data->new_opp.supplies;
	unsigned long old_freq = data->old_opp.rate, freq = data->new_opp.rate;
	struct regulator *reg = data->regulators[0];
	struct device *dev= data->dev;
	int ret;

	/* This function only supports single regulator per device */
	if (WARN_ON(data->regulator_count > 1)) {
		dev_err(dev, "multiple regulators are not supported\n");
		return -EINVAL;
	}

	/* Scaling up? Scale voltage before frequency */
	if (freq > old_freq) {
		ret = _set_opp_voltage(dev, reg, new_supply);
		if (ret)
			goto restore_voltage;
	}

	/* Change frequency */
	ret = _generic_set_opp_clk_only(dev, data->clk, old_freq, freq);
	if (ret)
		goto restore_voltage;

	/* Scaling down? Scale voltage after frequency */
	if (freq < old_freq) {
		ret = _set_opp_voltage(dev, reg, new_supply);
		if (ret)
			goto restore_freq;
	}

	return 0;

restore_freq:
	if (_generic_set_opp_clk_only(dev, data->clk, freq, old_freq))
		dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
			__func__, old_freq);
restore_voltage:
	/* This shouldn't harm even if the voltages weren't updated earlier */
	if (old_supply->u_volt)
		_set_opp_voltage(dev, reg, old_supply);

	return ret;
}

675 676 677 678 679 680 681 682 683 684 685 686
/**
 * dev_pm_opp_set_rate() - Configure new OPP based on frequency
 * @dev:	 device for which we do this operation
 * @target_freq: frequency to achieve
 *
 * This configures the power-supplies and clock source to the levels specified
 * by the OPP corresponding to the target_freq.
 *
 * Locking: This function takes rcu_read_lock().
 */
int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
{
687
	struct opp_table *opp_table;
688
	unsigned long freq, old_freq;
689
	int (*set_opp)(struct dev_pm_set_opp_data *data);
690
	struct dev_pm_opp *old_opp, *opp;
691 692
	struct regulator **regulators;
	struct dev_pm_set_opp_data *data;
693
	struct clk *clk;
694
	int ret, size;
695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720

	if (unlikely(!target_freq)) {
		dev_err(dev, "%s: Invalid target frequency %lu\n", __func__,
			target_freq);
		return -EINVAL;
	}

	clk = _get_opp_clk(dev);
	if (IS_ERR(clk))
		return PTR_ERR(clk);

	freq = clk_round_rate(clk, target_freq);
	if ((long)freq <= 0)
		freq = target_freq;

	old_freq = clk_get_rate(clk);

	/* Return early if nothing to do */
	if (old_freq == freq) {
		dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
			__func__, freq);
		return 0;
	}

	rcu_read_lock();

721 722
	opp_table = _find_opp_table(dev);
	if (IS_ERR(opp_table)) {
723 724
		dev_err(dev, "%s: device opp doesn't exist\n", __func__);
		rcu_read_unlock();
725
		return PTR_ERR(opp_table);
726 727
	}

728
	old_opp = _find_freq_ceil(opp_table, &old_freq);
729
	if (IS_ERR(old_opp)) {
730 731 732 733
		dev_err(dev, "%s: failed to find current OPP for freq %lu (%ld)\n",
			__func__, old_freq, PTR_ERR(old_opp));
	}

734
	opp = _find_freq_ceil(opp_table, &freq);
735 736 737 738 739 740 741 742
	if (IS_ERR(opp)) {
		ret = PTR_ERR(opp);
		dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
			__func__, freq, ret);
		rcu_read_unlock();
		return ret;
	}

743 744
	dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n", __func__,
		old_freq, freq);
745

746 747 748 749 750 751
	regulators = opp_table->regulators;

	/* Only frequency scaling */
	if (!regulators) {
		rcu_read_unlock();
		return _generic_set_opp_clk_only(dev, clk, old_freq, freq);
752 753
	}

754 755 756 757 758
	if (opp_table->set_opp)
		set_opp = opp_table->set_opp;
	else
		set_opp = _generic_set_opp;

759 760 761 762 763 764 765 766
	data = opp_table->set_opp_data;
	data->regulators = regulators;
	data->regulator_count = opp_table->regulator_count;
	data->clk = clk;
	data->dev = dev;

	data->old_opp.rate = old_freq;
	size = sizeof(*opp->supplies) * opp_table->regulator_count;
767
	if (IS_ERR(old_opp))
768
		memset(data->old_opp.supplies, 0, size);
769
	else
770
		memcpy(data->old_opp.supplies, old_opp->supplies, size);
771

772 773
	data->new_opp.rate = freq;
	memcpy(data->new_opp.supplies, opp->supplies, size);
774 775 776

	rcu_read_unlock();

777
	return set_opp(data);
778 779 780
}
EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);

781 782
/* OPP-dev Helpers */
static void _kfree_opp_dev_rcu(struct rcu_head *head)
783
{
784
	struct opp_device *opp_dev;
785

786 787
	opp_dev = container_of(head, struct opp_device, rcu_head);
	kfree_rcu(opp_dev, rcu_head);
788 789
}

790 791
static void _remove_opp_dev(struct opp_device *opp_dev,
			    struct opp_table *opp_table)
792
{
793 794 795 796
	opp_debug_unregister(opp_dev, opp_table);
	list_del(&opp_dev->node);
	call_srcu(&opp_table->srcu_head.srcu, &opp_dev->rcu_head,
		  _kfree_opp_dev_rcu);
797 798
}

799 800
struct opp_device *_add_opp_dev(const struct device *dev,
				struct opp_table *opp_table)
801
{
802
	struct opp_device *opp_dev;
V
Viresh Kumar 已提交
803
	int ret;
804

805 806
	opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL);
	if (!opp_dev)
807 808
		return NULL;

809 810 811
	/* Initialize opp-dev */
	opp_dev->dev = dev;
	list_add_rcu(&opp_dev->node, &opp_table->dev_list);
812

813 814
	/* Create debugfs entries for the opp_table */
	ret = opp_debug_register(opp_dev, opp_table);
V
Viresh Kumar 已提交
815 816 817 818
	if (ret)
		dev_err(dev, "%s: Failed to register opp debugfs (%d)\n",
			__func__, ret);

819
	return opp_dev;
820 821
}

822
static struct opp_table *_allocate_opp_table(struct device *dev)
823
{
824 825
	struct opp_table *opp_table;
	struct opp_device *opp_dev;
V
Viresh Kumar 已提交
826
	int ret;
827 828

	/*
829
	 * Allocate a new OPP table. In the infrequent case where a new
830 831
	 * device is needed to be added, we pay this penalty.
	 */
832 833
	opp_table = kzalloc(sizeof(*opp_table), GFP_KERNEL);
	if (!opp_table)
834 835
		return NULL;

836
	INIT_LIST_HEAD(&opp_table->dev_list);
837

838 839 840
	opp_dev = _add_opp_dev(dev, opp_table);
	if (!opp_dev) {
		kfree(opp_table);
841 842 843
		return NULL;
	}

844
	_of_init_opp_table(opp_table, dev);
845

V
Viresh Kumar 已提交
846
	/* Find clk for the device */
847 848 849
	opp_table->clk = clk_get(dev, NULL);
	if (IS_ERR(opp_table->clk)) {
		ret = PTR_ERR(opp_table->clk);
V
Viresh Kumar 已提交
850 851 852 853 854
		if (ret != -EPROBE_DEFER)
			dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__,
				ret);
	}

855 856
	srcu_init_notifier_head(&opp_table->srcu_head);
	INIT_LIST_HEAD(&opp_table->opp_list);
V
Viresh Kumar 已提交
857
	mutex_init(&opp_table->lock);
858
	kref_init(&opp_table->kref);
859

860 861 862
	/* Secure the device table modification */
	list_add_rcu(&opp_table->node, &opp_tables);
	return opp_table;
863 864
}

865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885
/**
 * _add_opp_table() - Find OPP table or allocate a new one
 * @dev:	device for which we do this operation
 *
 * It tries to find an existing table first, if it couldn't find one, it
 * allocates a new OPP table and returns that.
 *
 * Return: valid opp_table pointer if success, else NULL.
 */
struct opp_table *_add_opp_table(struct device *dev)
{
	struct opp_table *opp_table;

	/* Check for existing table for 'dev' first */
	opp_table = _find_opp_table(dev);
	if (!IS_ERR(opp_table))
		return opp_table;

	return _allocate_opp_table(dev);
}

886
/**
887
 * _kfree_device_rcu() - Free opp_table RCU handler
V
Viresh Kumar 已提交
888
 * @head:	RCU head
889
 */
V
Viresh Kumar 已提交
890
static void _kfree_device_rcu(struct rcu_head *head)
891
{
892 893
	struct opp_table *opp_table = container_of(head, struct opp_table,
						   rcu_head);
894

895
	kfree_rcu(opp_table, rcu_head);
896
}
897

898
void _get_opp_table_kref(struct opp_table *opp_table)
899
{
900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927
	kref_get(&opp_table->kref);
}

struct opp_table *dev_pm_opp_get_opp_table(struct device *dev)
{
	struct opp_table *opp_table;

	/* Hold our table modification lock here */
	mutex_lock(&opp_table_lock);

	opp_table = _find_opp_table(dev);
	if (!IS_ERR(opp_table)) {
		_get_opp_table_kref(opp_table);
		goto unlock;
	}

	opp_table = _allocate_opp_table(dev);

unlock:
	mutex_unlock(&opp_table_lock);

	return opp_table;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_table);

static void _opp_table_kref_release_unlocked(struct kref *kref)
{
	struct opp_table *opp_table = container_of(kref, struct opp_table, kref);
928 929 930 931 932 933 934 935 936 937 938 939 940 941
	struct opp_device *opp_dev;

	/* Release clk */
	if (!IS_ERR(opp_table->clk))
		clk_put(opp_table->clk);

	opp_dev = list_first_entry(&opp_table->dev_list, struct opp_device,
				   node);

	_remove_opp_dev(opp_dev, opp_table);

	/* dev_list must be empty now */
	WARN_ON(!list_empty(&opp_table->dev_list));

V
Viresh Kumar 已提交
942
	mutex_destroy(&opp_table->lock);
943 944 945 946 947
	list_del_rcu(&opp_table->node);
	call_srcu(&opp_table->srcu_head.srcu, &opp_table->rcu_head,
		  _kfree_device_rcu);
}

948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965
static void dev_pm_opp_put_opp_table_unlocked(struct opp_table *opp_table)
{
	kref_put(&opp_table->kref, _opp_table_kref_release_unlocked);
}

static void _opp_table_kref_release(struct kref *kref)
{
	_opp_table_kref_release_unlocked(kref);
	mutex_unlock(&opp_table_lock);
}

void dev_pm_opp_put_opp_table(struct opp_table *opp_table)
{
	kref_put_mutex(&opp_table->kref, _opp_table_kref_release,
		       &opp_table_lock);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_put_opp_table);

966
/**
967 968
 * _remove_opp_table() - Removes a OPP table
 * @opp_table: OPP table to be removed.
969
 *
970
 * Removes/frees OPP table if it doesn't contain any OPPs.
971
 */
972
static void _remove_opp_table(struct opp_table *opp_table)
973
{
974
	dev_pm_opp_put_opp_table_unlocked(opp_table);
975
}
976

977
void _opp_free(struct dev_pm_opp *opp)
978 979 980 981
{
	kfree(opp);
}

982 983 984 985
/**
 * _kfree_opp_rcu() - Free OPP RCU handler
 * @head:	RCU head
 */
986
static void _kfree_opp_rcu(struct rcu_head *head)
987 988 989 990 991 992
{
	struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head);

	kfree_rcu(opp, rcu_head);
}

993 994
/**
 * _opp_remove()  - Remove an OPP from a table definition
995
 * @opp_table:	points back to the opp_table struct this opp belongs to
996 997
 * @opp:	pointer to the OPP to remove
 *
998
 * This function removes an opp definition from the opp table.
999
 *
1000
 * Locking: The internal opp_table and opp structures are RCU protected.
1001 1002 1003
 * It is assumed that the caller holds required mutex for an RCU updater
 * strategy.
 */
1004
static void _opp_remove(struct opp_table *opp_table, struct dev_pm_opp *opp)
1005
{
V
Viresh Kumar 已提交
1006 1007
	mutex_lock(&opp_table->lock);

1008 1009 1010 1011
	/*
	 * Notify the changes in the availability of the operable
	 * frequency/voltage list.
	 */
1012
	srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_REMOVE, opp);
V
Viresh Kumar 已提交
1013
	opp_debug_remove_one(opp);
1014
	list_del_rcu(&opp->node);
1015
	call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
1016

V
Viresh Kumar 已提交
1017
	mutex_unlock(&opp_table->lock);
1018
	dev_pm_opp_put_opp_table(opp_table);
1019 1020 1021
}

/**
1022
 * dev_pm_opp_remove()  - Remove an OPP from OPP table
1023 1024 1025
 * @dev:	device for which we do this operation
 * @freq:	OPP to remove with matching 'freq'
 *
1026
 * This function removes an opp from the opp table.
1027
 *
1028
 * Locking: The internal opp_table and opp structures are RCU protected.
1029 1030 1031 1032
 * Hence this function internally uses RCU updater strategy with mutex locks
 * to keep the integrity of the internal data structures. Callers should ensure
 * that this function is *NOT* called under RCU protection or in contexts where
 * mutex cannot be locked.
1033 1034 1035 1036
 */
void dev_pm_opp_remove(struct device *dev, unsigned long freq)
{
	struct dev_pm_opp *opp;
1037
	struct opp_table *opp_table;
1038 1039
	bool found = false;

1040 1041
	/* Hold our table modification lock here */
	mutex_lock(&opp_table_lock);
1042

1043 1044
	opp_table = _find_opp_table(dev);
	if (IS_ERR(opp_table))
1045 1046
		goto unlock;

V
Viresh Kumar 已提交
1047 1048
	mutex_lock(&opp_table->lock);

1049
	list_for_each_entry(opp, &opp_table->opp_list, node) {
1050 1051 1052 1053 1054 1055
		if (opp->rate == freq) {
			found = true;
			break;
		}
	}

V
Viresh Kumar 已提交
1056 1057
	mutex_unlock(&opp_table->lock);

1058 1059 1060 1061 1062 1063
	if (!found) {
		dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
			 __func__, freq);
		goto unlock;
	}

1064
	_opp_remove(opp_table, opp);
1065
unlock:
1066
	mutex_unlock(&opp_table_lock);
1067 1068 1069
}
EXPORT_SYMBOL_GPL(dev_pm_opp_remove);

1070
struct dev_pm_opp *_opp_allocate(struct opp_table *table)
1071
{
1072
	struct dev_pm_opp *opp;
1073
	int count, supply_size;
1074

1075 1076 1077
	/* Allocate space for at least one supply */
	count = table->regulator_count ? table->regulator_count : 1;
	supply_size = sizeof(*opp->supplies) * count;
1078

1079 1080
	/* allocate new OPP node and supplies structures */
	opp = kzalloc(sizeof(*opp) + supply_size, GFP_KERNEL);
1081
	if (!opp)
1082 1083
		return NULL;

1084 1085 1086 1087
	/* Put the supplies at the end of the OPP structure as an empty array */
	opp->supplies = (struct dev_pm_opp_supply *)(opp + 1);
	INIT_LIST_HEAD(&opp->node);

1088 1089 1090
	return opp;
}

1091
static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
1092
					 struct opp_table *opp_table)
1093
{
1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107
	struct regulator *reg;
	int i;

	for (i = 0; i < opp_table->regulator_count; i++) {
		reg = opp_table->regulators[i];

		if (!regulator_is_supported_voltage(reg,
					opp->supplies[i].u_volt_min,
					opp->supplies[i].u_volt_max)) {
			pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n",
				__func__, opp->supplies[i].u_volt_min,
				opp->supplies[i].u_volt_max);
			return false;
		}
1108 1109 1110 1111 1112
	}

	return true;
}

1113 1114 1115 1116 1117 1118 1119 1120 1121 1122
/*
 * Returns:
 * 0: On success. And appropriate error message for duplicate OPPs.
 * -EBUSY: For OPP with same freq/volt and is available. The callers of
 *  _opp_add() must return 0 if they receive -EBUSY from it. This is to make
 *  sure we don't print error messages unnecessarily if different parts of
 *  kernel try to initialize the OPP table.
 * -EEXIST: For OPP with same freq but different volt or is unavailable. This
 *  should be considered an error by the callers of _opp_add().
 */
1123 1124
int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
	     struct opp_table *opp_table)
1125 1126
{
	struct dev_pm_opp *opp;
V
Viresh Kumar 已提交
1127
	struct list_head *head;
V
Viresh Kumar 已提交
1128
	int ret;
1129 1130 1131 1132 1133

	/*
	 * Insert new OPP in order of increasing frequency and discard if
	 * already present.
	 *
1134
	 * Need to use &opp_table->opp_list in the condition part of the 'for'
1135 1136 1137
	 * loop, don't replace it with head otherwise it will become an infinite
	 * loop.
	 */
V
Viresh Kumar 已提交
1138 1139 1140
	mutex_lock(&opp_table->lock);
	head = &opp_table->opp_list;

1141
	list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
1142 1143 1144 1145 1146 1147 1148 1149 1150
		if (new_opp->rate > opp->rate) {
			head = &opp->node;
			continue;
		}

		if (new_opp->rate < opp->rate)
			break;

		/* Duplicate OPPs */
1151
		dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
1152 1153 1154
			 __func__, opp->rate, opp->supplies[0].u_volt,
			 opp->available, new_opp->rate,
			 new_opp->supplies[0].u_volt, new_opp->available);
1155

1156
		/* Should we compare voltages for all regulators here ? */
V
Viresh Kumar 已提交
1157 1158 1159 1160 1161
		ret = opp->available &&
		      new_opp->supplies[0].u_volt == opp->supplies[0].u_volt ? -EBUSY : -EEXIST;

		mutex_unlock(&opp_table->lock);
		return ret;
1162 1163 1164
	}

	list_add_rcu(&new_opp->node, head);
V
Viresh Kumar 已提交
1165 1166 1167
	mutex_unlock(&opp_table->lock);

	new_opp->opp_table = opp_table;
1168

1169 1170 1171
	/* Get a reference to the OPP table */
	_get_opp_table_kref(opp_table);

1172
	ret = opp_debug_create_one(new_opp, opp_table);
V
Viresh Kumar 已提交
1173 1174 1175 1176
	if (ret)
		dev_err(dev, "%s: Failed to register opp to debugfs (%d)\n",
			__func__, ret);

1177
	if (!_opp_supported_by_regulators(new_opp, opp_table)) {
1178 1179 1180 1181 1182
		new_opp->available = false;
		dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n",
			 __func__, new_opp->rate);
	}

1183 1184 1185
	return 0;
}

1186
/**
1187
 * _opp_add_v1() - Allocate a OPP based on v1 bindings.
1188
 * @opp_table:	OPP table
1189 1190 1191 1192 1193
 * @dev:	device for which we do this operation
 * @freq:	Frequency in Hz for this OPP
 * @u_volt:	Voltage in uVolts for this OPP
 * @dynamic:	Dynamically added OPPs.
 *
1194
 * This function adds an opp definition to the opp table and returns status.
1195 1196 1197
 * The opp is made available by default and it can be controlled using
 * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
 *
1198 1199
 * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table
 * and freed by dev_pm_opp_of_remove_table.
1200
 *
1201
 * Locking: The internal opp_table and opp structures are RCU protected.
1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213
 * Hence this function internally uses RCU updater strategy with mutex locks
 * to keep the integrity of the internal data structures. Callers should ensure
 * that this function is *NOT* called under RCU protection or in contexts where
 * mutex cannot be locked.
 *
 * Return:
 * 0		On success OR
 *		Duplicate OPPs (both freq and volt are same) and opp->available
 * -EEXIST	Freq are same and volt are different OR
 *		Duplicate OPPs (both freq and volt are same) and !opp->available
 * -ENOMEM	Memory allocation failure
 */
1214 1215
int _opp_add_v1(struct opp_table *opp_table, struct device *dev,
		unsigned long freq, long u_volt, bool dynamic)
1216
{
1217
	struct dev_pm_opp *new_opp;
1218
	unsigned long tol;
1219
	int ret;
1220

1221
	opp_rcu_lockdep_assert();
1222

1223 1224 1225
	new_opp = _opp_allocate(opp_table);
	if (!new_opp)
		return -ENOMEM;
1226

1227 1228
	/* populate the opp table */
	new_opp->rate = freq;
1229
	tol = u_volt * opp_table->voltage_tolerance_v1 / 100;
1230 1231 1232
	new_opp->supplies[0].u_volt = u_volt;
	new_opp->supplies[0].u_volt_min = u_volt - tol;
	new_opp->supplies[0].u_volt_max = u_volt + tol;
1233
	new_opp->available = true;
1234
	new_opp->dynamic = dynamic;
1235

1236
	ret = _opp_add(dev, new_opp, opp_table);
1237 1238 1239 1240
	if (ret) {
		/* Don't return error for duplicate OPPs */
		if (ret == -EBUSY)
			ret = 0;
1241
		goto free_opp;
1242
	}
1243

1244 1245 1246 1247
	/*
	 * Notify the changes in the availability of the operable
	 * frequency/voltage list.
	 */
1248
	srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp);
1249
	return 0;
1250 1251

free_opp:
1252 1253
	_opp_free(new_opp);

1254
	return ret;
1255
}
1256

1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267
/**
 * dev_pm_opp_set_supported_hw() - Set supported platforms
 * @dev: Device for which supported-hw has to be set.
 * @versions: Array of hierarchy of versions to match.
 * @count: Number of elements in the array.
 *
 * This is required only for the V2 bindings, and it enables a platform to
 * specify the hierarchy of versions it supports. OPP layer will then enable
 * OPPs, which are available for those versions, based on its 'opp-supported-hw'
 * property.
 */
1268 1269
struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev,
			const u32 *versions, unsigned int count)
1270
{
1271
	struct opp_table *opp_table;
1272
	int ret;
1273

1274 1275 1276
	opp_table = dev_pm_opp_get_opp_table(dev);
	if (!opp_table)
		return ERR_PTR(-ENOMEM);
1277

1278 1279
	/* Make sure there are no concurrent readers while updating opp_table */
	WARN_ON(!list_empty(&opp_table->opp_list));
1280

1281 1282
	/* Do we already have a version hierarchy associated with opp_table? */
	if (opp_table->supported_hw) {
1283 1284 1285 1286 1287 1288
		dev_err(dev, "%s: Already have supported hardware list\n",
			__func__);
		ret = -EBUSY;
		goto err;
	}

1289
	opp_table->supported_hw = kmemdup(versions, count * sizeof(*versions),
1290
					GFP_KERNEL);
1291
	if (!opp_table->supported_hw) {
1292 1293 1294 1295
		ret = -ENOMEM;
		goto err;
	}

1296
	opp_table->supported_hw_count = count;
1297 1298

	return opp_table;
1299 1300

err:
1301
	dev_pm_opp_put_opp_table(opp_table);
1302

1303
	return ERR_PTR(ret);
1304 1305 1306 1307 1308
}
EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw);

/**
 * dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw
1309
 * @opp_table: OPP table returned by dev_pm_opp_set_supported_hw().
1310 1311
 *
 * This is required only for the V2 bindings, and is called for a matching
1312
 * dev_pm_opp_set_supported_hw(). Until this is called, the opp_table structure
1313 1314
 * will not be freed.
 */
1315
void dev_pm_opp_put_supported_hw(struct opp_table *opp_table)
1316
{
1317 1318
	/* Make sure there are no concurrent readers while updating opp_table */
	WARN_ON(!list_empty(&opp_table->opp_list));
1319

1320
	if (!opp_table->supported_hw) {
1321 1322 1323
		pr_err("%s: Doesn't have supported hardware list\n",
		       __func__);
		return;
1324 1325
	}

1326 1327 1328
	kfree(opp_table->supported_hw);
	opp_table->supported_hw = NULL;
	opp_table->supported_hw_count = 0;
1329

1330
	dev_pm_opp_put_opp_table(opp_table);
1331 1332 1333
}
EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw);

1334 1335
/**
 * dev_pm_opp_set_prop_name() - Set prop-extn name
V
Viresh Kumar 已提交
1336
 * @dev: Device for which the prop-name has to be set.
1337 1338 1339 1340 1341 1342 1343
 * @name: name to postfix to properties.
 *
 * This is required only for the V2 bindings, and it enables a platform to
 * specify the extn to be used for certain property names. The properties to
 * which the extension will apply are opp-microvolt and opp-microamp. OPP core
 * should postfix the property name with -<name> while looking for them.
 */
1344
struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name)
1345
{
1346
	struct opp_table *opp_table;
1347
	int ret;
1348

1349 1350 1351
	opp_table = dev_pm_opp_get_opp_table(dev);
	if (!opp_table)
		return ERR_PTR(-ENOMEM);
1352

1353 1354
	/* Make sure there are no concurrent readers while updating opp_table */
	WARN_ON(!list_empty(&opp_table->opp_list));
1355

1356 1357
	/* Do we already have a prop-name associated with opp_table? */
	if (opp_table->prop_name) {
1358
		dev_err(dev, "%s: Already have prop-name %s\n", __func__,
1359
			opp_table->prop_name);
1360 1361 1362 1363
		ret = -EBUSY;
		goto err;
	}

1364 1365
	opp_table->prop_name = kstrdup(name, GFP_KERNEL);
	if (!opp_table->prop_name) {
1366 1367 1368 1369
		ret = -ENOMEM;
		goto err;
	}

1370
	return opp_table;
1371 1372

err:
1373
	dev_pm_opp_put_opp_table(opp_table);
1374

1375
	return ERR_PTR(ret);
1376 1377 1378 1379 1380
}
EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name);

/**
 * dev_pm_opp_put_prop_name() - Releases resources blocked for prop-name
1381
 * @opp_table: OPP table returned by dev_pm_opp_set_prop_name().
1382 1383
 *
 * This is required only for the V2 bindings, and is called for a matching
1384
 * dev_pm_opp_set_prop_name(). Until this is called, the opp_table structure
1385 1386
 * will not be freed.
 */
1387
void dev_pm_opp_put_prop_name(struct opp_table *opp_table)
1388
{
1389 1390
	/* Make sure there are no concurrent readers while updating opp_table */
	WARN_ON(!list_empty(&opp_table->opp_list));
1391

1392
	if (!opp_table->prop_name) {
1393 1394
		pr_err("%s: Doesn't have a prop-name\n", __func__);
		return;
1395 1396
	}

1397 1398
	kfree(opp_table->prop_name);
	opp_table->prop_name = NULL;
1399

1400
	dev_pm_opp_put_opp_table(opp_table);
1401 1402 1403
}
EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);

1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435
static int _allocate_set_opp_data(struct opp_table *opp_table)
{
	struct dev_pm_set_opp_data *data;
	int len, count = opp_table->regulator_count;

	if (WARN_ON(!count))
		return -EINVAL;

	/* space for set_opp_data */
	len = sizeof(*data);

	/* space for old_opp.supplies and new_opp.supplies */
	len += 2 * sizeof(struct dev_pm_opp_supply) * count;

	data = kzalloc(len, GFP_KERNEL);
	if (!data)
		return -ENOMEM;

	data->old_opp.supplies = (void *)(data + 1);
	data->new_opp.supplies = data->old_opp.supplies + count;

	opp_table->set_opp_data = data;

	return 0;
}

static void _free_set_opp_data(struct opp_table *opp_table)
{
	kfree(opp_table->set_opp_data);
	opp_table->set_opp_data = NULL;
}

1436
/**
1437
 * dev_pm_opp_set_regulators() - Set regulator names for the device
1438
 * @dev: Device for which regulator name is being set.
1439 1440
 * @names: Array of pointers to the names of the regulator.
 * @count: Number of regulators.
1441 1442
 *
 * In order to support OPP switching, OPP layer needs to know the name of the
1443 1444
 * device's regulators, as the core would be required to switch voltages as
 * well.
1445 1446 1447
 *
 * This must be called before any OPPs are initialized for the device.
 */
1448 1449 1450
struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
					    const char * const names[],
					    unsigned int count)
1451
{
1452
	struct opp_table *opp_table;
1453
	struct regulator *reg;
1454
	int ret, i;
1455

1456 1457 1458
	opp_table = dev_pm_opp_get_opp_table(dev);
	if (!opp_table)
		return ERR_PTR(-ENOMEM);
1459 1460

	/* This should be called before OPPs are initialized */
1461
	if (WARN_ON(!list_empty(&opp_table->opp_list))) {
1462 1463 1464 1465
		ret = -EBUSY;
		goto err;
	}

1466
	/* Already have regulators set */
1467
	if (opp_table->regulators) {
1468 1469 1470
		ret = -EBUSY;
		goto err;
	}
1471 1472 1473 1474 1475 1476

	opp_table->regulators = kmalloc_array(count,
					      sizeof(*opp_table->regulators),
					      GFP_KERNEL);
	if (!opp_table->regulators) {
		ret = -ENOMEM;
1477 1478 1479
		goto err;
	}

1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493
	for (i = 0; i < count; i++) {
		reg = regulator_get_optional(dev, names[i]);
		if (IS_ERR(reg)) {
			ret = PTR_ERR(reg);
			if (ret != -EPROBE_DEFER)
				dev_err(dev, "%s: no regulator (%s) found: %d\n",
					__func__, names[i], ret);
			goto free_regulators;
		}

		opp_table->regulators[i] = reg;
	}

	opp_table->regulator_count = count;
1494

1495 1496 1497 1498 1499
	/* Allocate block only once to pass to set_opp() routines */
	ret = _allocate_set_opp_data(opp_table);
	if (ret)
		goto free_regulators;

1500
	return opp_table;
1501

1502 1503 1504 1505 1506 1507
free_regulators:
	while (i != 0)
		regulator_put(opp_table->regulators[--i]);

	kfree(opp_table->regulators);
	opp_table->regulators = NULL;
1508
	opp_table->regulator_count = 0;
1509
err:
1510
	dev_pm_opp_put_opp_table(opp_table);
1511

1512
	return ERR_PTR(ret);
1513
}
1514
EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulators);
1515 1516

/**
1517 1518
 * dev_pm_opp_put_regulators() - Releases resources blocked for regulator
 * @opp_table: OPP table returned from dev_pm_opp_set_regulators().
1519
 */
1520
void dev_pm_opp_put_regulators(struct opp_table *opp_table)
1521
{
1522 1523 1524 1525
	int i;

	if (!opp_table->regulators) {
		pr_err("%s: Doesn't have regulators set\n", __func__);
1526
		return;
1527 1528
	}

1529 1530
	/* Make sure there are no concurrent readers while updating opp_table */
	WARN_ON(!list_empty(&opp_table->opp_list));
1531

1532 1533 1534
	for (i = opp_table->regulator_count - 1; i >= 0; i--)
		regulator_put(opp_table->regulators[i]);

1535 1536
	_free_set_opp_data(opp_table);

1537 1538 1539
	kfree(opp_table->regulators);
	opp_table->regulators = NULL;
	opp_table->regulator_count = 0;
1540

1541
	dev_pm_opp_put_opp_table(opp_table);
1542
}
1543
EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulators);
1544

1545 1546 1547 1548 1549 1550 1551 1552 1553 1554
/**
 * dev_pm_opp_register_set_opp_helper() - Register custom set OPP helper
 * @dev: Device for which the helper is getting registered.
 * @set_opp: Custom set OPP helper.
 *
 * This is useful to support complex platforms (like platforms with multiple
 * regulators per device), instead of the generic OPP set rate helper.
 *
 * This must be called before any OPPs are initialized for the device.
 */
1555
struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev,
1556 1557 1558 1559 1560 1561
			int (*set_opp)(struct dev_pm_set_opp_data *data))
{
	struct opp_table *opp_table;
	int ret;

	if (!set_opp)
1562
		return ERR_PTR(-EINVAL);
1563

1564 1565 1566
	opp_table = dev_pm_opp_get_opp_table(dev);
	if (!opp_table)
		return ERR_PTR(-ENOMEM);
1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581

	/* This should be called before OPPs are initialized */
	if (WARN_ON(!list_empty(&opp_table->opp_list))) {
		ret = -EBUSY;
		goto err;
	}

	/* Already have custom set_opp helper */
	if (WARN_ON(opp_table->set_opp)) {
		ret = -EBUSY;
		goto err;
	}

	opp_table->set_opp = set_opp;

1582
	return opp_table;
1583 1584

err:
1585
	dev_pm_opp_put_opp_table(opp_table);
1586

1587
	return ERR_PTR(ret);
1588 1589 1590 1591 1592 1593
}
EXPORT_SYMBOL_GPL(dev_pm_opp_register_set_opp_helper);

/**
 * dev_pm_opp_register_put_opp_helper() - Releases resources blocked for
 *					   set_opp helper
1594
 * @opp_table: OPP table returned from dev_pm_opp_register_set_opp_helper().
1595
 *
1596
 * Release resources blocked for platform specific set_opp helper.
1597
 */
1598
void dev_pm_opp_register_put_opp_helper(struct opp_table *opp_table)
1599 1600
{
	if (!opp_table->set_opp) {
1601 1602 1603
		pr_err("%s: Doesn't have custom set_opp helper set\n",
		       __func__);
		return;
1604 1605 1606 1607 1608 1609 1610
	}

	/* Make sure there are no concurrent readers while updating opp_table */
	WARN_ON(!list_empty(&opp_table->opp_list));

	opp_table->set_opp = NULL;

1611
	dev_pm_opp_put_opp_table(opp_table);
1612 1613 1614
}
EXPORT_SYMBOL_GPL(dev_pm_opp_register_put_opp_helper);

1615 1616 1617 1618 1619 1620
/**
 * dev_pm_opp_add()  - Add an OPP table from a table definitions
 * @dev:	device for which we do this operation
 * @freq:	Frequency in Hz for this OPP
 * @u_volt:	Voltage in uVolts for this OPP
 *
1621
 * This function adds an opp definition to the opp table and returns status.
1622 1623 1624
 * The opp is made available by default and it can be controlled using
 * dev_pm_opp_enable/disable functions.
 *
1625
 * Locking: The internal opp_table and opp structures are RCU protected.
1626 1627 1628 1629 1630 1631
 * Hence this function internally uses RCU updater strategy with mutex locks
 * to keep the integrity of the internal data structures. Callers should ensure
 * that this function is *NOT* called under RCU protection or in contexts where
 * mutex cannot be locked.
 *
 * Return:
1632
 * 0		On success OR
1633
 *		Duplicate OPPs (both freq and volt are same) and opp->available
1634
 * -EEXIST	Freq are same and volt are different OR
1635
 *		Duplicate OPPs (both freq and volt are same) and !opp->available
1636
 * -ENOMEM	Memory allocation failure
1637 1638 1639
 */
int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
{
1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658
	struct opp_table *opp_table;
	int ret;

	/* Hold our table modification lock here */
	mutex_lock(&opp_table_lock);

	opp_table = _add_opp_table(dev);
	if (!opp_table) {
		ret = -ENOMEM;
		goto unlock;
	}

	ret = _opp_add_v1(opp_table, dev, freq, u_volt, true);
	if (ret)
		_remove_opp_table(opp_table);

unlock:
	mutex_unlock(&opp_table_lock);
	return ret;
1659
}
1660
EXPORT_SYMBOL_GPL(dev_pm_opp_add);
1661 1662

/**
1663
 * _opp_set_availability() - helper to set the availability of an opp
1664 1665 1666 1667 1668 1669 1670
 * @dev:		device for which we do this operation
 * @freq:		OPP frequency to modify availability
 * @availability_req:	availability status requested for this opp
 *
 * Set the availability of an OPP with an RCU operation, opp_{enable,disable}
 * share a common logic which is isolated here.
 *
1671
 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1672
 * copy operation, returns 0 if no modification was done OR modification was
1673 1674
 * successful.
 *
1675
 * Locking: The internal opp_table and opp structures are RCU protected.
1676 1677 1678 1679 1680
 * Hence this function internally uses RCU updater strategy with mutex locks to
 * keep the integrity of the internal data structures. Callers should ensure
 * that this function is *NOT* called under RCU protection or in contexts where
 * mutex locking or synchronize_rcu() blocking calls cannot be used.
 */
1681 1682
static int _opp_set_availability(struct device *dev, unsigned long freq,
				 bool availability_req)
1683
{
1684
	struct opp_table *opp_table;
1685
	struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
1686 1687 1688
	int r = 0;

	/* keep the node allocated */
1689
	new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL);
1690
	if (!new_opp)
1691 1692
		return -ENOMEM;

1693
	mutex_lock(&opp_table_lock);
1694

1695 1696 1697 1698
	/* Find the opp_table */
	opp_table = _find_opp_table(dev);
	if (IS_ERR(opp_table)) {
		r = PTR_ERR(opp_table);
1699 1700 1701 1702
		dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
		goto unlock;
	}

V
Viresh Kumar 已提交
1703 1704
	mutex_lock(&opp_table->lock);

1705
	/* Do we have the frequency? */
1706
	list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
1707 1708 1709 1710 1711
		if (tmp_opp->rate == freq) {
			opp = tmp_opp;
			break;
		}
	}
V
Viresh Kumar 已提交
1712 1713 1714

	mutex_unlock(&opp_table->lock);

1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729
	if (IS_ERR(opp)) {
		r = PTR_ERR(opp);
		goto unlock;
	}

	/* Is update really needed? */
	if (opp->available == availability_req)
		goto unlock;
	/* copy the old data over */
	*new_opp = *opp;

	/* plug in new node */
	new_opp->available = availability_req;

	list_replace_rcu(&opp->node, &new_opp->node);
1730 1731
	mutex_unlock(&opp_table_lock);
	call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
1732

1733 1734
	/* Notify the change of the OPP availability */
	if (availability_req)
1735 1736
		srcu_notifier_call_chain(&opp_table->srcu_head,
					 OPP_EVENT_ENABLE, new_opp);
1737
	else
1738 1739
		srcu_notifier_call_chain(&opp_table->srcu_head,
					 OPP_EVENT_DISABLE, new_opp);
1740

V
Vincent Guittot 已提交
1741
	return 0;
1742 1743

unlock:
1744
	mutex_unlock(&opp_table_lock);
1745 1746 1747 1748 1749
	kfree(new_opp);
	return r;
}

/**
1750
 * dev_pm_opp_enable() - Enable a specific OPP
1751 1752 1753 1754 1755
 * @dev:	device for which we do this operation
 * @freq:	OPP frequency to enable
 *
 * Enables a provided opp. If the operation is valid, this returns 0, else the
 * corresponding error value. It is meant to be used for users an OPP available
1756
 * after being temporarily made unavailable with dev_pm_opp_disable.
1757
 *
1758
 * Locking: The internal opp_table and opp structures are RCU protected.
1759 1760 1761 1762
 * Hence this function indirectly uses RCU and mutex locks to keep the
 * integrity of the internal data structures. Callers should ensure that
 * this function is *NOT* called under RCU protection or in contexts where
 * mutex locking or synchronize_rcu() blocking calls cannot be used.
1763 1764
 *
 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1765
 * copy operation, returns 0 if no modification was done OR modification was
1766
 * successful.
1767
 */
1768
int dev_pm_opp_enable(struct device *dev, unsigned long freq)
1769
{
1770
	return _opp_set_availability(dev, freq, true);
1771
}
1772
EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
1773 1774

/**
1775
 * dev_pm_opp_disable() - Disable a specific OPP
1776 1777 1778 1779 1780 1781
 * @dev:	device for which we do this operation
 * @freq:	OPP frequency to disable
 *
 * Disables a provided opp. If the operation is valid, this returns
 * 0, else the corresponding error value. It is meant to be a temporary
 * control by users to make this OPP not available until the circumstances are
1782
 * right to make it available again (with a call to dev_pm_opp_enable).
1783
 *
1784
 * Locking: The internal opp_table and opp structures are RCU protected.
1785 1786 1787 1788
 * Hence this function indirectly uses RCU and mutex locks to keep the
 * integrity of the internal data structures. Callers should ensure that
 * this function is *NOT* called under RCU protection or in contexts where
 * mutex locking or synchronize_rcu() blocking calls cannot be used.
1789 1790
 *
 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1791
 * copy operation, returns 0 if no modification was done OR modification was
1792
 * successful.
1793
 */
1794
int dev_pm_opp_disable(struct device *dev, unsigned long freq)
1795
{
1796
	return _opp_set_availability(dev, freq, false);
1797
}
1798
EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
1799

1800
/**
1801 1802 1803
 * dev_pm_opp_register_notifier() - Register OPP notifier for the device
 * @dev:	Device for which notifier needs to be registered
 * @nb:		Notifier block to be registered
1804
 *
1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832
 * Return: 0 on success or a negative error value.
 */
int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb)
{
	struct opp_table *opp_table;
	int ret;

	rcu_read_lock();

	opp_table = _find_opp_table(dev);
	if (IS_ERR(opp_table)) {
		ret = PTR_ERR(opp_table);
		goto unlock;
	}

	ret = srcu_notifier_chain_register(&opp_table->srcu_head, nb);

unlock:
	rcu_read_unlock();

	return ret;
}
EXPORT_SYMBOL(dev_pm_opp_register_notifier);

/**
 * dev_pm_opp_unregister_notifier() - Unregister OPP notifier for the device
 * @dev:	Device for which notifier needs to be unregistered
 * @nb:		Notifier block to be unregistered
1833
 *
1834
 * Return: 0 on success or a negative error value.
1835
 */
1836 1837
int dev_pm_opp_unregister_notifier(struct device *dev,
				   struct notifier_block *nb)
1838
{
1839 1840
	struct opp_table *opp_table;
	int ret;
1841

1842 1843 1844 1845 1846 1847 1848 1849 1850
	rcu_read_lock();

	opp_table = _find_opp_table(dev);
	if (IS_ERR(opp_table)) {
		ret = PTR_ERR(opp_table);
		goto unlock;
	}

	ret = srcu_notifier_chain_unregister(&opp_table->srcu_head, nb);
1851

1852 1853 1854 1855
unlock:
	rcu_read_unlock();

	return ret;
1856
}
1857
EXPORT_SYMBOL(dev_pm_opp_unregister_notifier);
1858

1859 1860 1861
/*
 * Free OPPs either created using static entries present in DT or even the
 * dynamically added entries based on remove_all param.
1862
 */
1863 1864
void _dev_pm_opp_remove_table(struct opp_table *opp_table, struct device *dev,
			      bool remove_all)
V
Viresh Kumar 已提交
1865 1866 1867
{
	struct dev_pm_opp *opp, *tmp;

1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885
	opp_rcu_lockdep_assert();

	/* Find if opp_table manages a single device */
	if (list_is_singular(&opp_table->dev_list)) {
		/* Free static OPPs */
		list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
			if (remove_all || !opp->dynamic)
				_opp_remove(opp_table, opp);
		}
	} else {
		_remove_opp_dev(_find_opp_dev(dev, opp_table), opp_table);
	}
}

void _dev_pm_opp_find_and_remove_table(struct device *dev, bool remove_all)
{
	struct opp_table *opp_table;

1886 1887
	/* Hold our table modification lock here */
	mutex_lock(&opp_table_lock);
1888

1889 1890 1891 1892
	/* Check for existing table for 'dev' */
	opp_table = _find_opp_table(dev);
	if (IS_ERR(opp_table)) {
		int error = PTR_ERR(opp_table);
V
Viresh Kumar 已提交
1893 1894

		if (error != -ENODEV)
1895
			WARN(1, "%s: opp_table: %d\n",
V
Viresh Kumar 已提交
1896 1897 1898
			     IS_ERR_OR_NULL(dev) ?
					"Invalid device" : dev_name(dev),
			     error);
1899
		goto unlock;
V
Viresh Kumar 已提交
1900 1901
	}

1902
	_dev_pm_opp_remove_table(opp_table, dev, remove_all);
V
Viresh Kumar 已提交
1903

1904
unlock:
1905
	mutex_unlock(&opp_table_lock);
V
Viresh Kumar 已提交
1906
}
1907 1908

/**
1909
 * dev_pm_opp_remove_table() - Free all OPPs associated with the device
1910
 * @dev:	device pointer used to lookup OPP table.
1911
 *
1912 1913
 * Free both OPPs created using static entries present in DT and the
 * dynamically added entries.
1914
 *
1915
 * Locking: The internal opp_table and opp structures are RCU protected.
1916 1917 1918 1919
 * Hence this function indirectly uses RCU updater strategy with mutex locks
 * to keep the integrity of the internal data structures. Callers should ensure
 * that this function is *NOT* called under RCU protection or in contexts where
 * mutex cannot be locked.
1920
 */
1921
void dev_pm_opp_remove_table(struct device *dev)
1922
{
1923
	_dev_pm_opp_find_and_remove_table(dev, true);
1924
}
1925
EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table);