core.c 54.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * Generic OPP Interface
 *
 * Copyright (C) 2009-2010 Texas Instruments Incorporated.
 *	Nishanth Menon
 *	Romit Dasgupta
 *	Kevin Hilman
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

14 15
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

V
Viresh Kumar 已提交
16
#include <linux/clk.h>
17 18 19
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/slab.h>
20
#include <linux/device.h>
21
#include <linux/export.h>
22
#include <linux/regulator/consumer.h>
23

24
#include "opp.h"
25 26

/*
27 28
 * The root of the list of all opp-tables. All opp_table structures branch off
 * from here, with each opp_table containing the list of opps it supports in
29 30
 * various states of availability.
 */
31
LIST_HEAD(opp_tables);
32
/* Lock to allow exclusive modification to the device and opp lists */
33
DEFINE_MUTEX(opp_table_lock);
34

35 36
#define opp_rcu_lockdep_assert()					\
do {									\
37
	RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&			\
38 39 40
			 !lockdep_is_held(&opp_table_lock),		\
			 "Missing rcu_read_lock() or "			\
			 "opp_table_lock protection");			\
41 42
} while (0)

43 44
static struct opp_device *_find_opp_dev(const struct device *dev,
					struct opp_table *opp_table)
45
{
46
	struct opp_device *opp_dev;
47

48 49 50
	list_for_each_entry(opp_dev, &opp_table->dev_list, node)
		if (opp_dev->dev == dev)
			return opp_dev;
51 52 53 54

	return NULL;
}

55
/**
56 57
 * _find_opp_table() - find opp_table struct using device pointer
 * @dev:	device pointer used to lookup OPP table
58
 *
59 60
 * Search OPP table for one containing matching device. Does a RCU reader
 * operation to grab the pointer needed.
61
 *
62
 * Return: pointer to 'struct opp_table' if found, otherwise -ENODEV or
63 64
 * -EINVAL based on type of error.
 *
65
 * Locking: For readers, this function must be called under rcu_read_lock().
66
 * opp_table is a RCU protected pointer, which means that opp_table is valid
67 68
 * as long as we are under RCU lock.
 *
69
 * For Writers, this function must be called with opp_table_lock held.
70
 */
71
struct opp_table *_find_opp_table(struct device *dev)
72
{
73
	struct opp_table *opp_table;
74

75 76
	opp_rcu_lockdep_assert();

77
	if (IS_ERR_OR_NULL(dev)) {
78 79 80 81
		pr_err("%s: Invalid parameters\n", __func__);
		return ERR_PTR(-EINVAL);
	}

82 83 84
	list_for_each_entry_rcu(opp_table, &opp_tables, node)
		if (_find_opp_dev(dev, opp_table))
			return opp_table;
85

86
	return ERR_PTR(-ENODEV);
87 88 89
}

/**
90
 * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp
91 92
 * @opp:	opp for which voltage has to be returned for
 *
93
 * Return: voltage in micro volt corresponding to the opp, else
94 95
 * return 0
 *
96 97
 * This is useful only for devices with single power supply.
 *
98 99 100 101 102 103 104 105
 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
 * protected pointer. This means that opp which could have been fetched by
 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
 * under RCU lock. The pointer returned by the opp_find_freq family must be
 * used in the same section as the usage of this function with the pointer
 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
 * pointer.
 */
106
unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
107
{
108
	struct dev_pm_opp *tmp_opp;
109 110
	unsigned long v = 0;

111 112
	opp_rcu_lockdep_assert();

113
	tmp_opp = rcu_dereference(opp);
114
	if (IS_ERR_OR_NULL(tmp_opp))
115 116
		pr_err("%s: Invalid parameters\n", __func__);
	else
117
		v = tmp_opp->supplies[0].u_volt;
118 119 120

	return v;
}
121
EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
122 123

/**
124
 * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
125 126
 * @opp:	opp for which frequency has to be returned for
 *
127
 * Return: frequency in hertz corresponding to the opp, else
128 129 130 131 132 133 134 135 136 137
 * return 0
 *
 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
 * protected pointer. This means that opp which could have been fetched by
 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
 * under RCU lock. The pointer returned by the opp_find_freq family must be
 * used in the same section as the usage of this function with the pointer
 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
 * pointer.
 */
138
unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
139
{
140
	struct dev_pm_opp *tmp_opp;
141 142
	unsigned long f = 0;

143 144
	opp_rcu_lockdep_assert();

145
	tmp_opp = rcu_dereference(opp);
146
	if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available)
147 148 149 150 151 152
		pr_err("%s: Invalid parameters\n", __func__);
	else
		f = tmp_opp->rate;

	return f;
}
153
EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
154

155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
/**
 * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
 * @opp: opp for which turbo mode is being verified
 *
 * Turbo OPPs are not for normal use, and can be enabled (under certain
 * conditions) for short duration of times to finish high throughput work
 * quickly. Running on them for longer times may overheat the chip.
 *
 * Return: true if opp is turbo opp, else false.
 *
 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
 * protected pointer. This means that opp which could have been fetched by
 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
 * under RCU lock. The pointer returned by the opp_find_freq family must be
 * used in the same section as the usage of this function with the pointer
 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
 * pointer.
 */
bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
{
	struct dev_pm_opp *tmp_opp;

	opp_rcu_lockdep_assert();

	tmp_opp = rcu_dereference(opp);
	if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) {
		pr_err("%s: Invalid parameters\n", __func__);
		return false;
	}

	return tmp_opp->turbo;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);

189 190 191 192 193 194 195 196 197 198
/**
 * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds
 * @dev:	device for which we do this operation
 *
 * Return: This function returns the max clock latency in nanoseconds.
 *
 * Locking: This function takes rcu_read_lock().
 */
unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
{
199
	struct opp_table *opp_table;
200 201 202 203
	unsigned long clock_latency_ns;

	rcu_read_lock();

204 205
	opp_table = _find_opp_table(dev);
	if (IS_ERR(opp_table))
206 207
		clock_latency_ns = 0;
	else
208
		clock_latency_ns = opp_table->clock_latency_ns_max;
209 210 211 212 213 214

	rcu_read_unlock();
	return clock_latency_ns;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);

215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232
static int _get_regulator_count(struct device *dev)
{
	struct opp_table *opp_table;
	int count;

	rcu_read_lock();

	opp_table = _find_opp_table(dev);
	if (!IS_ERR(opp_table))
		count = opp_table->regulator_count;
	else
		count = 0;

	rcu_read_unlock();

	return count;
}

233 234 235 236 237 238 239 240 241 242
/**
 * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds
 * @dev: device for which we do this operation
 *
 * Return: This function returns the max voltage latency in nanoseconds.
 *
 * Locking: This function takes rcu_read_lock().
 */
unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
{
243
	struct opp_table *opp_table;
244
	struct dev_pm_opp *opp;
245
	struct regulator *reg, **regulators;
246
	unsigned long latency_ns = 0;
247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265
	int ret, i, count;
	struct {
		unsigned long min;
		unsigned long max;
	} *uV;

	count = _get_regulator_count(dev);

	/* Regulator may not be required for the device */
	if (!count)
		return 0;

	regulators = kmalloc_array(count, sizeof(*regulators), GFP_KERNEL);
	if (!regulators)
		return 0;

	uV = kmalloc_array(count, sizeof(*uV), GFP_KERNEL);
	if (!uV)
		goto free_regulators;
266 267 268

	rcu_read_lock();

269 270
	opp_table = _find_opp_table(dev);
	if (IS_ERR(opp_table)) {
271
		rcu_read_unlock();
272
		goto free_uV;
273 274
	}

275
	memcpy(regulators, opp_table->regulators, count * sizeof(*regulators));
276

277 278 279
	for (i = 0; i < count; i++) {
		uV[i].min = ~0;
		uV[i].max = 0;
280

281 282 283 284 285 286 287 288 289
		list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
			if (!opp->available)
				continue;

			if (opp->supplies[i].u_volt_min < uV[i].min)
				uV[i].min = opp->supplies[i].u_volt_min;
			if (opp->supplies[i].u_volt_max > uV[i].max)
				uV[i].max = opp->supplies[i].u_volt_max;
		}
290 291 292 293 294
	}

	rcu_read_unlock();

	/*
295
	 * The caller needs to ensure that opp_table (and hence the regulator)
296 297
	 * isn't freed, while we are executing this routine.
	 */
298 299 300 301 302 303 304 305 306 307
	for (i = 0; reg = regulators[i], i < count; i++) {
		ret = regulator_set_voltage_time(reg, uV[i].min, uV[i].max);
		if (ret > 0)
			latency_ns += ret * 1000;
	}

free_uV:
	kfree(uV);
free_regulators:
	kfree(regulators);
308 309 310 311 312

	return latency_ns;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency);

313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329
/**
 * dev_pm_opp_get_max_transition_latency() - Get max transition latency in
 *					     nanoseconds
 * @dev: device for which we do this operation
 *
 * Return: This function returns the max transition latency, in nanoseconds, to
 * switch from one OPP to other.
 *
 * Locking: This function takes rcu_read_lock().
 */
unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev)
{
	return dev_pm_opp_get_max_volt_latency(dev) +
		dev_pm_opp_get_max_clock_latency(dev);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency);

330 331 332 333 334
/**
 * dev_pm_opp_get_suspend_opp() - Get suspend opp
 * @dev:	device for which we do this operation
 *
 * Return: This function returns pointer to the suspend opp if it is
335
 * defined and available, otherwise it returns NULL.
336 337 338 339 340 341 342 343 344
 *
 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
 * protected pointer. The reason for the same is that the opp pointer which is
 * returned will remain valid for use with opp_get_{voltage, freq} only while
 * under the locked area. The pointer returned must be used prior to unlocking
 * with rcu_read_unlock() to maintain the integrity of the pointer.
 */
struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
{
345
	struct opp_table *opp_table;
346 347 348

	opp_rcu_lockdep_assert();

349 350 351
	opp_table = _find_opp_table(dev);
	if (IS_ERR(opp_table) || !opp_table->suspend_opp ||
	    !opp_table->suspend_opp->available)
352
		return NULL;
353

354
	return opp_table->suspend_opp;
355 356 357
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp);

358
/**
359
 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp table
360 361
 * @dev:	device for which we do this operation
 *
362
 * Return: This function returns the number of available opps if there are any,
363 364
 * else returns 0 if none or the corresponding error value.
 *
365
 * Locking: This function takes rcu_read_lock().
366
 */
367
int dev_pm_opp_get_opp_count(struct device *dev)
368
{
369
	struct opp_table *opp_table;
370
	struct dev_pm_opp *temp_opp;
371 372
	int count = 0;

373
	rcu_read_lock();
374

375 376 377 378
	opp_table = _find_opp_table(dev);
	if (IS_ERR(opp_table)) {
		count = PTR_ERR(opp_table);
		dev_err(dev, "%s: OPP table not found (%d)\n",
379 380
			__func__, count);
		goto out_unlock;
381 382
	}

383
	list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
384 385 386 387
		if (temp_opp->available)
			count++;
	}

388 389
out_unlock:
	rcu_read_unlock();
390 391
	return count;
}
392
EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
393 394

/**
395
 * dev_pm_opp_find_freq_exact() - search for an exact frequency
396 397
 * @dev:		device for which we do this operation
 * @freq:		frequency to search for
398
 * @available:		true/false - match for available opp
399
 *
400
 * Return: Searches for exact match in the opp table and returns pointer to the
401 402
 * matching opp if found, else returns ERR_PTR in case of error and should
 * be handled using IS_ERR. Error return values can be:
403 404 405
 * EINVAL:	for bad pointer
 * ERANGE:	no match found for search
 * ENODEV:	if device not found in list of registered devices
406 407 408 409 410 411 412 413 414 415 416 417 418 419
 *
 * Note: available is a modifier for the search. if available=true, then the
 * match is for exact matching frequency and is available in the stored OPP
 * table. if false, the match is for exact frequency which is not available.
 *
 * This provides a mechanism to enable an opp which is not available currently
 * or the opposite as well.
 *
 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
 * protected pointer. The reason for the same is that the opp pointer which is
 * returned will remain valid for use with opp_get_{voltage, freq} only while
 * under the locked area. The pointer returned must be used prior to unlocking
 * with rcu_read_unlock() to maintain the integrity of the pointer.
 */
420 421 422
struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
					      unsigned long freq,
					      bool available)
423
{
424
	struct opp_table *opp_table;
425
	struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
426

427 428
	opp_rcu_lockdep_assert();

429 430 431 432 433
	opp_table = _find_opp_table(dev);
	if (IS_ERR(opp_table)) {
		int r = PTR_ERR(opp_table);

		dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
434 435 436
		return ERR_PTR(r);
	}

437
	list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
438 439 440 441 442 443 444 445 446
		if (temp_opp->available == available &&
				temp_opp->rate == freq) {
			opp = temp_opp;
			break;
		}
	}

	return opp;
}
447
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
448

449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464
static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table,
						   unsigned long *freq)
{
	struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);

	list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
		if (temp_opp->available && temp_opp->rate >= *freq) {
			opp = temp_opp;
			*freq = opp->rate;
			break;
		}
	}

	return opp;
}

465
/**
466
 * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
467 468 469 470 471 472
 * @dev:	device for which we do this operation
 * @freq:	Start frequency
 *
 * Search for the matching ceil *available* OPP from a starting freq
 * for a device.
 *
473
 * Return: matching *opp and refreshes *freq accordingly, else returns
474 475 476 477 478
 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
 * values can be:
 * EINVAL:	for bad pointer
 * ERANGE:	no match found for search
 * ENODEV:	if device not found in list of registered devices
479 480 481 482 483 484 485
 *
 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
 * protected pointer. The reason for the same is that the opp pointer which is
 * returned will remain valid for use with opp_get_{voltage, freq} only while
 * under the locked area. The pointer returned must be used prior to unlocking
 * with rcu_read_unlock() to maintain the integrity of the pointer.
 */
486 487
struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
					     unsigned long *freq)
488
{
489
	struct opp_table *opp_table;
490

491 492
	opp_rcu_lockdep_assert();

493 494 495 496 497
	if (!dev || !freq) {
		dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
		return ERR_PTR(-EINVAL);
	}

498 499 500
	opp_table = _find_opp_table(dev);
	if (IS_ERR(opp_table))
		return ERR_CAST(opp_table);
501

502
	return _find_freq_ceil(opp_table, freq);
503
}
504
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
505 506

/**
507
 * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
508 509 510 511 512 513
 * @dev:	device for which we do this operation
 * @freq:	Start frequency
 *
 * Search for the matching floor *available* OPP from a starting freq
 * for a device.
 *
514
 * Return: matching *opp and refreshes *freq accordingly, else returns
515 516 517 518 519
 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
 * values can be:
 * EINVAL:	for bad pointer
 * ERANGE:	no match found for search
 * ENODEV:	if device not found in list of registered devices
520 521 522 523 524 525 526
 *
 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
 * protected pointer. The reason for the same is that the opp pointer which is
 * returned will remain valid for use with opp_get_{voltage, freq} only while
 * under the locked area. The pointer returned must be used prior to unlocking
 * with rcu_read_unlock() to maintain the integrity of the pointer.
 */
527 528
struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
					      unsigned long *freq)
529
{
530
	struct opp_table *opp_table;
531
	struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
532

533 534
	opp_rcu_lockdep_assert();

535 536 537 538 539
	if (!dev || !freq) {
		dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
		return ERR_PTR(-EINVAL);
	}

540 541 542
	opp_table = _find_opp_table(dev);
	if (IS_ERR(opp_table))
		return ERR_CAST(opp_table);
543

544
	list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
545 546 547 548 549 550 551 552 553 554 555 556 557
		if (temp_opp->available) {
			/* go to the next node, before choosing prev */
			if (temp_opp->rate > *freq)
				break;
			else
				opp = temp_opp;
		}
	}
	if (!IS_ERR(opp))
		*freq = opp->rate;

	return opp;
}
558
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
559

560
/*
561
 * The caller needs to ensure that opp_table (and hence the clk) isn't freed,
562 563 564 565
 * while clk returned here is used.
 */
static struct clk *_get_opp_clk(struct device *dev)
{
566
	struct opp_table *opp_table;
567 568 569 570
	struct clk *clk;

	rcu_read_lock();

571 572
	opp_table = _find_opp_table(dev);
	if (IS_ERR(opp_table)) {
573
		dev_err(dev, "%s: device opp doesn't exist\n", __func__);
574
		clk = ERR_CAST(opp_table);
575 576 577
		goto unlock;
	}

578
	clk = opp_table->clk;
579 580 581 582 583 584 585 586 587 588
	if (IS_ERR(clk))
		dev_err(dev, "%s: No clock available for the device\n",
			__func__);

unlock:
	rcu_read_unlock();
	return clk;
}

static int _set_opp_voltage(struct device *dev, struct regulator *reg,
589
			    struct dev_pm_opp_supply *supply)
590 591 592 593 594 595 596 597 598 599
{
	int ret;

	/* Regulator not available for device */
	if (IS_ERR(reg)) {
		dev_dbg(dev, "%s: regulator not available: %ld\n", __func__,
			PTR_ERR(reg));
		return 0;
	}

600 601
	dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__,
		supply->u_volt_min, supply->u_volt, supply->u_volt_max);
602

603 604
	ret = regulator_set_voltage_triplet(reg, supply->u_volt_min,
					    supply->u_volt, supply->u_volt_max);
605 606
	if (ret)
		dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n",
607 608
			__func__, supply->u_volt_min, supply->u_volt,
			supply->u_volt_max, ret);
609 610 611 612

	return ret;
}

613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675
static inline int
_generic_set_opp_clk_only(struct device *dev, struct clk *clk,
			  unsigned long old_freq, unsigned long freq)
{
	int ret;

	ret = clk_set_rate(clk, freq);
	if (ret) {
		dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
			ret);
	}

	return ret;
}

static int _generic_set_opp(struct dev_pm_set_opp_data *data)
{
	struct dev_pm_opp_supply *old_supply = data->old_opp.supplies;
	struct dev_pm_opp_supply *new_supply = data->new_opp.supplies;
	unsigned long old_freq = data->old_opp.rate, freq = data->new_opp.rate;
	struct regulator *reg = data->regulators[0];
	struct device *dev= data->dev;
	int ret;

	/* This function only supports single regulator per device */
	if (WARN_ON(data->regulator_count > 1)) {
		dev_err(dev, "multiple regulators are not supported\n");
		return -EINVAL;
	}

	/* Scaling up? Scale voltage before frequency */
	if (freq > old_freq) {
		ret = _set_opp_voltage(dev, reg, new_supply);
		if (ret)
			goto restore_voltage;
	}

	/* Change frequency */
	ret = _generic_set_opp_clk_only(dev, data->clk, old_freq, freq);
	if (ret)
		goto restore_voltage;

	/* Scaling down? Scale voltage after frequency */
	if (freq < old_freq) {
		ret = _set_opp_voltage(dev, reg, new_supply);
		if (ret)
			goto restore_freq;
	}

	return 0;

restore_freq:
	if (_generic_set_opp_clk_only(dev, data->clk, freq, old_freq))
		dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
			__func__, old_freq);
restore_voltage:
	/* This shouldn't harm even if the voltages weren't updated earlier */
	if (old_supply->u_volt)
		_set_opp_voltage(dev, reg, old_supply);

	return ret;
}

676 677 678 679 680 681 682 683 684 685 686 687
/**
 * dev_pm_opp_set_rate() - Configure new OPP based on frequency
 * @dev:	 device for which we do this operation
 * @target_freq: frequency to achieve
 *
 * This configures the power-supplies and clock source to the levels specified
 * by the OPP corresponding to the target_freq.
 *
 * Locking: This function takes rcu_read_lock().
 */
int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
{
688
	struct opp_table *opp_table;
689
	unsigned long freq, old_freq;
690
	int (*set_opp)(struct dev_pm_set_opp_data *data);
691
	struct dev_pm_opp *old_opp, *opp;
692 693
	struct regulator **regulators;
	struct dev_pm_set_opp_data *data;
694
	struct clk *clk;
695
	int ret, size;
696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721

	if (unlikely(!target_freq)) {
		dev_err(dev, "%s: Invalid target frequency %lu\n", __func__,
			target_freq);
		return -EINVAL;
	}

	clk = _get_opp_clk(dev);
	if (IS_ERR(clk))
		return PTR_ERR(clk);

	freq = clk_round_rate(clk, target_freq);
	if ((long)freq <= 0)
		freq = target_freq;

	old_freq = clk_get_rate(clk);

	/* Return early if nothing to do */
	if (old_freq == freq) {
		dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
			__func__, freq);
		return 0;
	}

	rcu_read_lock();

722 723
	opp_table = _find_opp_table(dev);
	if (IS_ERR(opp_table)) {
724 725
		dev_err(dev, "%s: device opp doesn't exist\n", __func__);
		rcu_read_unlock();
726
		return PTR_ERR(opp_table);
727 728
	}

729
	old_opp = _find_freq_ceil(opp_table, &old_freq);
730
	if (IS_ERR(old_opp)) {
731 732 733 734
		dev_err(dev, "%s: failed to find current OPP for freq %lu (%ld)\n",
			__func__, old_freq, PTR_ERR(old_opp));
	}

735
	opp = _find_freq_ceil(opp_table, &freq);
736 737 738 739 740 741 742 743
	if (IS_ERR(opp)) {
		ret = PTR_ERR(opp);
		dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
			__func__, freq, ret);
		rcu_read_unlock();
		return ret;
	}

744 745
	dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n", __func__,
		old_freq, freq);
746

747 748 749 750 751 752
	regulators = opp_table->regulators;

	/* Only frequency scaling */
	if (!regulators) {
		rcu_read_unlock();
		return _generic_set_opp_clk_only(dev, clk, old_freq, freq);
753 754
	}

755 756 757 758 759
	if (opp_table->set_opp)
		set_opp = opp_table->set_opp;
	else
		set_opp = _generic_set_opp;

760 761 762 763 764 765 766 767
	data = opp_table->set_opp_data;
	data->regulators = regulators;
	data->regulator_count = opp_table->regulator_count;
	data->clk = clk;
	data->dev = dev;

	data->old_opp.rate = old_freq;
	size = sizeof(*opp->supplies) * opp_table->regulator_count;
768
	if (IS_ERR(old_opp))
769
		memset(data->old_opp.supplies, 0, size);
770
	else
771
		memcpy(data->old_opp.supplies, old_opp->supplies, size);
772

773 774
	data->new_opp.rate = freq;
	memcpy(data->new_opp.supplies, opp->supplies, size);
775 776 777

	rcu_read_unlock();

778
	return set_opp(data);
779 780 781
}
EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);

782 783
/* OPP-dev Helpers */
static void _kfree_opp_dev_rcu(struct rcu_head *head)
784
{
785
	struct opp_device *opp_dev;
786

787 788
	opp_dev = container_of(head, struct opp_device, rcu_head);
	kfree_rcu(opp_dev, rcu_head);
789 790
}

791 792
static void _remove_opp_dev(struct opp_device *opp_dev,
			    struct opp_table *opp_table)
793
{
794 795 796 797
	opp_debug_unregister(opp_dev, opp_table);
	list_del(&opp_dev->node);
	call_srcu(&opp_table->srcu_head.srcu, &opp_dev->rcu_head,
		  _kfree_opp_dev_rcu);
798 799
}

800 801
struct opp_device *_add_opp_dev(const struct device *dev,
				struct opp_table *opp_table)
802
{
803
	struct opp_device *opp_dev;
V
Viresh Kumar 已提交
804
	int ret;
805

806 807
	opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL);
	if (!opp_dev)
808 809
		return NULL;

810 811 812
	/* Initialize opp-dev */
	opp_dev->dev = dev;
	list_add_rcu(&opp_dev->node, &opp_table->dev_list);
813

814 815
	/* Create debugfs entries for the opp_table */
	ret = opp_debug_register(opp_dev, opp_table);
V
Viresh Kumar 已提交
816 817 818 819
	if (ret)
		dev_err(dev, "%s: Failed to register opp debugfs (%d)\n",
			__func__, ret);

820
	return opp_dev;
821 822
}

823
/**
824
 * _add_opp_table() - Find OPP table or allocate a new one
825 826
 * @dev:	device for which we do this operation
 *
827 828
 * It tries to find an existing table first, if it couldn't find one, it
 * allocates a new OPP table and returns that.
829
 *
830
 * Return: valid opp_table pointer if success, else NULL.
831
 */
832
static struct opp_table *_add_opp_table(struct device *dev)
833
{
834 835
	struct opp_table *opp_table;
	struct opp_device *opp_dev;
V
Viresh Kumar 已提交
836
	int ret;
837

838 839 840 841
	/* Check for existing table for 'dev' first */
	opp_table = _find_opp_table(dev);
	if (!IS_ERR(opp_table))
		return opp_table;
842 843

	/*
844
	 * Allocate a new OPP table. In the infrequent case where a new
845 846
	 * device is needed to be added, we pay this penalty.
	 */
847 848
	opp_table = kzalloc(sizeof(*opp_table), GFP_KERNEL);
	if (!opp_table)
849 850
		return NULL;

851
	INIT_LIST_HEAD(&opp_table->dev_list);
852

853 854 855
	opp_dev = _add_opp_dev(dev, opp_table);
	if (!opp_dev) {
		kfree(opp_table);
856 857 858
		return NULL;
	}

859
	_of_init_opp_table(opp_table, dev);
860

V
Viresh Kumar 已提交
861
	/* Find clk for the device */
862 863 864
	opp_table->clk = clk_get(dev, NULL);
	if (IS_ERR(opp_table->clk)) {
		ret = PTR_ERR(opp_table->clk);
V
Viresh Kumar 已提交
865 866 867 868 869
		if (ret != -EPROBE_DEFER)
			dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__,
				ret);
	}

870 871
	srcu_init_notifier_head(&opp_table->srcu_head);
	INIT_LIST_HEAD(&opp_table->opp_list);
872

873 874 875
	/* Secure the device table modification */
	list_add_rcu(&opp_table->node, &opp_tables);
	return opp_table;
876 877
}

878
/**
879
 * _kfree_device_rcu() - Free opp_table RCU handler
V
Viresh Kumar 已提交
880
 * @head:	RCU head
881
 */
V
Viresh Kumar 已提交
882
static void _kfree_device_rcu(struct rcu_head *head)
883
{
884 885
	struct opp_table *opp_table = container_of(head, struct opp_table,
						   rcu_head);
886

887
	kfree_rcu(opp_table, rcu_head);
888
}
889 890

/**
891 892
 * _remove_opp_table() - Removes a OPP table
 * @opp_table: OPP table to be removed.
893
 *
894
 * Removes/frees OPP table if it doesn't contain any OPPs.
895
 */
896
static void _remove_opp_table(struct opp_table *opp_table)
897
{
898
	struct opp_device *opp_dev;
899

900
	if (!list_empty(&opp_table->opp_list))
901 902
		return;

903
	if (opp_table->supported_hw)
904 905
		return;

906
	if (opp_table->prop_name)
907 908
		return;

909
	if (opp_table->regulators)
910 911
		return;

912 913 914
	if (opp_table->set_opp)
		return;

V
Viresh Kumar 已提交
915
	/* Release clk */
916 917
	if (!IS_ERR(opp_table->clk))
		clk_put(opp_table->clk);
V
Viresh Kumar 已提交
918

919 920
	opp_dev = list_first_entry(&opp_table->dev_list, struct opp_device,
				   node);
921

922
	_remove_opp_dev(opp_dev, opp_table);
923 924

	/* dev_list must be empty now */
925
	WARN_ON(!list_empty(&opp_table->dev_list));
926

927 928
	list_del_rcu(&opp_table->node);
	call_srcu(&opp_table->srcu_head.srcu, &opp_table->rcu_head,
929
		  _kfree_device_rcu);
930
}
931

932 933 934 935 936 937
void _opp_free(struct dev_pm_opp *opp, struct opp_table *opp_table)
{
	kfree(opp);
	_remove_opp_table(opp_table);
}

938 939 940 941
/**
 * _kfree_opp_rcu() - Free OPP RCU handler
 * @head:	RCU head
 */
942
static void _kfree_opp_rcu(struct rcu_head *head)
943 944 945 946 947 948
{
	struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head);

	kfree_rcu(opp, rcu_head);
}

949 950
/**
 * _opp_remove()  - Remove an OPP from a table definition
951
 * @opp_table:	points back to the opp_table struct this opp belongs to
952 953
 * @opp:	pointer to the OPP to remove
 *
954
 * This function removes an opp definition from the opp table.
955
 *
956
 * Locking: The internal opp_table and opp structures are RCU protected.
957 958 959
 * It is assumed that the caller holds required mutex for an RCU updater
 * strategy.
 */
960
static void _opp_remove(struct opp_table *opp_table, struct dev_pm_opp *opp)
961 962 963 964 965
{
	/*
	 * Notify the changes in the availability of the operable
	 * frequency/voltage list.
	 */
966
	srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_REMOVE, opp);
V
Viresh Kumar 已提交
967
	opp_debug_remove_one(opp);
968
	list_del_rcu(&opp->node);
969
	call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
970

971
	_remove_opp_table(opp_table);
972 973 974
}

/**
975
 * dev_pm_opp_remove()  - Remove an OPP from OPP table
976 977 978
 * @dev:	device for which we do this operation
 * @freq:	OPP to remove with matching 'freq'
 *
979
 * This function removes an opp from the opp table.
980
 *
981
 * Locking: The internal opp_table and opp structures are RCU protected.
982 983 984 985
 * Hence this function internally uses RCU updater strategy with mutex locks
 * to keep the integrity of the internal data structures. Callers should ensure
 * that this function is *NOT* called under RCU protection or in contexts where
 * mutex cannot be locked.
986 987 988 989
 */
void dev_pm_opp_remove(struct device *dev, unsigned long freq)
{
	struct dev_pm_opp *opp;
990
	struct opp_table *opp_table;
991 992
	bool found = false;

993 994
	/* Hold our table modification lock here */
	mutex_lock(&opp_table_lock);
995

996 997
	opp_table = _find_opp_table(dev);
	if (IS_ERR(opp_table))
998 999
		goto unlock;

1000
	list_for_each_entry(opp, &opp_table->opp_list, node) {
1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012
		if (opp->rate == freq) {
			found = true;
			break;
		}
	}

	if (!found) {
		dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
			 __func__, freq);
		goto unlock;
	}

1013
	_opp_remove(opp_table, opp);
1014
unlock:
1015
	mutex_unlock(&opp_table_lock);
1016 1017 1018
}
EXPORT_SYMBOL_GPL(dev_pm_opp_remove);

1019
struct dev_pm_opp *_opp_allocate(struct device *dev,
1020
				 struct opp_table **opp_table)
1021
{
1022
	struct dev_pm_opp *opp;
1023 1024
	int count, supply_size;
	struct opp_table *table;
1025

1026 1027
	table = _add_opp_table(dev);
	if (!table)
1028
		return NULL;
1029

1030 1031 1032
	/* Allocate space for at least one supply */
	count = table->regulator_count ? table->regulator_count : 1;
	supply_size = sizeof(*opp->supplies) * count;
1033

1034 1035 1036 1037
	/* allocate new OPP node and supplies structures */
	opp = kzalloc(sizeof(*opp) + supply_size, GFP_KERNEL);
	if (!opp) {
		kfree(table);
1038 1039 1040
		return NULL;
	}

1041 1042 1043 1044 1045 1046
	/* Put the supplies at the end of the OPP structure as an empty array */
	opp->supplies = (struct dev_pm_opp_supply *)(opp + 1);
	INIT_LIST_HEAD(&opp->node);

	*opp_table = table;

1047 1048 1049
	return opp;
}

1050
static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
1051
					 struct opp_table *opp_table)
1052
{
1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066
	struct regulator *reg;
	int i;

	for (i = 0; i < opp_table->regulator_count; i++) {
		reg = opp_table->regulators[i];

		if (!regulator_is_supported_voltage(reg,
					opp->supplies[i].u_volt_min,
					opp->supplies[i].u_volt_max)) {
			pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n",
				__func__, opp->supplies[i].u_volt_min,
				opp->supplies[i].u_volt_max);
			return false;
		}
1067 1068 1069 1070 1071
	}

	return true;
}

1072 1073 1074 1075 1076 1077 1078 1079 1080 1081
/*
 * Returns:
 * 0: On success. And appropriate error message for duplicate OPPs.
 * -EBUSY: For OPP with same freq/volt and is available. The callers of
 *  _opp_add() must return 0 if they receive -EBUSY from it. This is to make
 *  sure we don't print error messages unnecessarily if different parts of
 *  kernel try to initialize the OPP table.
 * -EEXIST: For OPP with same freq but different volt or is unavailable. This
 *  should be considered an error by the callers of _opp_add().
 */
1082 1083
int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
	     struct opp_table *opp_table)
1084 1085
{
	struct dev_pm_opp *opp;
1086
	struct list_head *head = &opp_table->opp_list;
V
Viresh Kumar 已提交
1087
	int ret;
1088 1089 1090 1091 1092

	/*
	 * Insert new OPP in order of increasing frequency and discard if
	 * already present.
	 *
1093
	 * Need to use &opp_table->opp_list in the condition part of the 'for'
1094 1095 1096
	 * loop, don't replace it with head otherwise it will become an infinite
	 * loop.
	 */
1097
	list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
1098 1099 1100 1101 1102 1103 1104 1105 1106
		if (new_opp->rate > opp->rate) {
			head = &opp->node;
			continue;
		}

		if (new_opp->rate < opp->rate)
			break;

		/* Duplicate OPPs */
1107
		dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
1108 1109 1110
			 __func__, opp->rate, opp->supplies[0].u_volt,
			 opp->available, new_opp->rate,
			 new_opp->supplies[0].u_volt, new_opp->available);
1111

1112
		/* Should we compare voltages for all regulators here ? */
1113
		return opp->available &&
1114
		       new_opp->supplies[0].u_volt == opp->supplies[0].u_volt ? -EBUSY : -EEXIST;
1115 1116
	}

1117
	new_opp->opp_table = opp_table;
1118 1119
	list_add_rcu(&new_opp->node, head);

1120
	ret = opp_debug_create_one(new_opp, opp_table);
V
Viresh Kumar 已提交
1121 1122 1123 1124
	if (ret)
		dev_err(dev, "%s: Failed to register opp to debugfs (%d)\n",
			__func__, ret);

1125
	if (!_opp_supported_by_regulators(new_opp, opp_table)) {
1126 1127 1128 1129 1130
		new_opp->available = false;
		dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n",
			 __func__, new_opp->rate);
	}

1131 1132 1133
	return 0;
}

1134
/**
1135
 * _opp_add_v1() - Allocate a OPP based on v1 bindings.
1136 1137 1138 1139 1140
 * @dev:	device for which we do this operation
 * @freq:	Frequency in Hz for this OPP
 * @u_volt:	Voltage in uVolts for this OPP
 * @dynamic:	Dynamically added OPPs.
 *
1141
 * This function adds an opp definition to the opp table and returns status.
1142 1143 1144
 * The opp is made available by default and it can be controlled using
 * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
 *
1145 1146
 * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table
 * and freed by dev_pm_opp_of_remove_table.
1147
 *
1148
 * Locking: The internal opp_table and opp structures are RCU protected.
1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160
 * Hence this function internally uses RCU updater strategy with mutex locks
 * to keep the integrity of the internal data structures. Callers should ensure
 * that this function is *NOT* called under RCU protection or in contexts where
 * mutex cannot be locked.
 *
 * Return:
 * 0		On success OR
 *		Duplicate OPPs (both freq and volt are same) and opp->available
 * -EEXIST	Freq are same and volt are different OR
 *		Duplicate OPPs (both freq and volt are same) and !opp->available
 * -ENOMEM	Memory allocation failure
 */
1161 1162
int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
		bool dynamic)
1163
{
1164
	struct opp_table *opp_table;
1165
	struct dev_pm_opp *new_opp;
1166
	unsigned long tol;
1167
	int ret;
1168

1169 1170
	/* Hold our table modification lock here */
	mutex_lock(&opp_table_lock);
1171

1172
	new_opp = _opp_allocate(dev, &opp_table);
1173 1174 1175 1176 1177
	if (!new_opp) {
		ret = -ENOMEM;
		goto unlock;
	}

1178 1179
	/* populate the opp table */
	new_opp->rate = freq;
1180
	tol = u_volt * opp_table->voltage_tolerance_v1 / 100;
1181 1182 1183
	new_opp->supplies[0].u_volt = u_volt;
	new_opp->supplies[0].u_volt_min = u_volt - tol;
	new_opp->supplies[0].u_volt_max = u_volt + tol;
1184
	new_opp->available = true;
1185
	new_opp->dynamic = dynamic;
1186

1187
	ret = _opp_add(dev, new_opp, opp_table);
1188 1189 1190 1191
	if (ret) {
		/* Don't return error for duplicate OPPs */
		if (ret == -EBUSY)
			ret = 0;
1192
		goto free_opp;
1193
	}
1194

1195
	mutex_unlock(&opp_table_lock);
1196

1197 1198 1199 1200
	/*
	 * Notify the changes in the availability of the operable
	 * frequency/voltage list.
	 */
1201
	srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp);
1202
	return 0;
1203 1204

free_opp:
1205
	_opp_free(new_opp, opp_table);
1206
unlock:
1207
	mutex_unlock(&opp_table_lock);
1208
	return ret;
1209
}
1210

1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221
/**
 * dev_pm_opp_set_supported_hw() - Set supported platforms
 * @dev: Device for which supported-hw has to be set.
 * @versions: Array of hierarchy of versions to match.
 * @count: Number of elements in the array.
 *
 * This is required only for the V2 bindings, and it enables a platform to
 * specify the hierarchy of versions it supports. OPP layer will then enable
 * OPPs, which are available for those versions, based on its 'opp-supported-hw'
 * property.
 *
1222
 * Locking: The internal opp_table and opp structures are RCU protected.
1223 1224 1225 1226 1227 1228 1229 1230
 * Hence this function internally uses RCU updater strategy with mutex locks
 * to keep the integrity of the internal data structures. Callers should ensure
 * that this function is *NOT* called under RCU protection or in contexts where
 * mutex cannot be locked.
 */
int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions,
				unsigned int count)
{
1231
	struct opp_table *opp_table;
1232 1233
	int ret = 0;

1234 1235
	/* Hold our table modification lock here */
	mutex_lock(&opp_table_lock);
1236

1237 1238
	opp_table = _add_opp_table(dev);
	if (!opp_table) {
1239 1240 1241 1242
		ret = -ENOMEM;
		goto unlock;
	}

1243 1244
	/* Make sure there are no concurrent readers while updating opp_table */
	WARN_ON(!list_empty(&opp_table->opp_list));
1245

1246 1247
	/* Do we already have a version hierarchy associated with opp_table? */
	if (opp_table->supported_hw) {
1248 1249 1250 1251 1252 1253
		dev_err(dev, "%s: Already have supported hardware list\n",
			__func__);
		ret = -EBUSY;
		goto err;
	}

1254
	opp_table->supported_hw = kmemdup(versions, count * sizeof(*versions),
1255
					GFP_KERNEL);
1256
	if (!opp_table->supported_hw) {
1257 1258 1259 1260
		ret = -ENOMEM;
		goto err;
	}

1261 1262
	opp_table->supported_hw_count = count;
	mutex_unlock(&opp_table_lock);
1263 1264 1265
	return 0;

err:
1266
	_remove_opp_table(opp_table);
1267
unlock:
1268
	mutex_unlock(&opp_table_lock);
1269 1270 1271 1272 1273 1274 1275

	return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw);

/**
 * dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw
V
Viresh Kumar 已提交
1276
 * @dev: Device for which supported-hw has to be put.
1277 1278
 *
 * This is required only for the V2 bindings, and is called for a matching
1279
 * dev_pm_opp_set_supported_hw(). Until this is called, the opp_table structure
1280 1281
 * will not be freed.
 *
1282
 * Locking: The internal opp_table and opp structures are RCU protected.
1283 1284 1285 1286 1287 1288 1289
 * Hence this function internally uses RCU updater strategy with mutex locks
 * to keep the integrity of the internal data structures. Callers should ensure
 * that this function is *NOT* called under RCU protection or in contexts where
 * mutex cannot be locked.
 */
void dev_pm_opp_put_supported_hw(struct device *dev)
{
1290
	struct opp_table *opp_table;
1291

1292 1293
	/* Hold our table modification lock here */
	mutex_lock(&opp_table_lock);
1294

1295 1296 1297 1298 1299
	/* Check for existing table for 'dev' first */
	opp_table = _find_opp_table(dev);
	if (IS_ERR(opp_table)) {
		dev_err(dev, "Failed to find opp_table: %ld\n",
			PTR_ERR(opp_table));
1300 1301 1302
		goto unlock;
	}

1303 1304
	/* Make sure there are no concurrent readers while updating opp_table */
	WARN_ON(!list_empty(&opp_table->opp_list));
1305

1306
	if (!opp_table->supported_hw) {
1307 1308 1309 1310 1311
		dev_err(dev, "%s: Doesn't have supported hardware list\n",
			__func__);
		goto unlock;
	}

1312 1313 1314
	kfree(opp_table->supported_hw);
	opp_table->supported_hw = NULL;
	opp_table->supported_hw_count = 0;
1315

1316 1317
	/* Try freeing opp_table if this was the last blocking resource */
	_remove_opp_table(opp_table);
1318 1319

unlock:
1320
	mutex_unlock(&opp_table_lock);
1321 1322 1323
}
EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw);

1324 1325
/**
 * dev_pm_opp_set_prop_name() - Set prop-extn name
V
Viresh Kumar 已提交
1326
 * @dev: Device for which the prop-name has to be set.
1327 1328 1329 1330 1331 1332 1333
 * @name: name to postfix to properties.
 *
 * This is required only for the V2 bindings, and it enables a platform to
 * specify the extn to be used for certain property names. The properties to
 * which the extension will apply are opp-microvolt and opp-microamp. OPP core
 * should postfix the property name with -<name> while looking for them.
 *
1334
 * Locking: The internal opp_table and opp structures are RCU protected.
1335 1336 1337 1338 1339 1340 1341
 * Hence this function internally uses RCU updater strategy with mutex locks
 * to keep the integrity of the internal data structures. Callers should ensure
 * that this function is *NOT* called under RCU protection or in contexts where
 * mutex cannot be locked.
 */
int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
{
1342
	struct opp_table *opp_table;
1343 1344
	int ret = 0;

1345 1346
	/* Hold our table modification lock here */
	mutex_lock(&opp_table_lock);
1347

1348 1349
	opp_table = _add_opp_table(dev);
	if (!opp_table) {
1350 1351 1352 1353
		ret = -ENOMEM;
		goto unlock;
	}

1354 1355
	/* Make sure there are no concurrent readers while updating opp_table */
	WARN_ON(!list_empty(&opp_table->opp_list));
1356

1357 1358
	/* Do we already have a prop-name associated with opp_table? */
	if (opp_table->prop_name) {
1359
		dev_err(dev, "%s: Already have prop-name %s\n", __func__,
1360
			opp_table->prop_name);
1361 1362 1363 1364
		ret = -EBUSY;
		goto err;
	}

1365 1366
	opp_table->prop_name = kstrdup(name, GFP_KERNEL);
	if (!opp_table->prop_name) {
1367 1368 1369 1370
		ret = -ENOMEM;
		goto err;
	}

1371
	mutex_unlock(&opp_table_lock);
1372 1373 1374
	return 0;

err:
1375
	_remove_opp_table(opp_table);
1376
unlock:
1377
	mutex_unlock(&opp_table_lock);
1378 1379 1380 1381 1382 1383 1384

	return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name);

/**
 * dev_pm_opp_put_prop_name() - Releases resources blocked for prop-name
V
Viresh Kumar 已提交
1385
 * @dev: Device for which the prop-name has to be put.
1386 1387
 *
 * This is required only for the V2 bindings, and is called for a matching
1388
 * dev_pm_opp_set_prop_name(). Until this is called, the opp_table structure
1389 1390
 * will not be freed.
 *
1391
 * Locking: The internal opp_table and opp structures are RCU protected.
1392 1393 1394 1395 1396 1397 1398
 * Hence this function internally uses RCU updater strategy with mutex locks
 * to keep the integrity of the internal data structures. Callers should ensure
 * that this function is *NOT* called under RCU protection or in contexts where
 * mutex cannot be locked.
 */
void dev_pm_opp_put_prop_name(struct device *dev)
{
1399
	struct opp_table *opp_table;
1400

1401 1402
	/* Hold our table modification lock here */
	mutex_lock(&opp_table_lock);
1403

1404 1405 1406 1407 1408
	/* Check for existing table for 'dev' first */
	opp_table = _find_opp_table(dev);
	if (IS_ERR(opp_table)) {
		dev_err(dev, "Failed to find opp_table: %ld\n",
			PTR_ERR(opp_table));
1409 1410 1411
		goto unlock;
	}

1412 1413
	/* Make sure there are no concurrent readers while updating opp_table */
	WARN_ON(!list_empty(&opp_table->opp_list));
1414

1415
	if (!opp_table->prop_name) {
1416 1417 1418 1419
		dev_err(dev, "%s: Doesn't have a prop-name\n", __func__);
		goto unlock;
	}

1420 1421
	kfree(opp_table->prop_name);
	opp_table->prop_name = NULL;
1422

1423 1424
	/* Try freeing opp_table if this was the last blocking resource */
	_remove_opp_table(opp_table);
1425 1426

unlock:
1427
	mutex_unlock(&opp_table_lock);
1428 1429 1430
}
EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);

1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462
static int _allocate_set_opp_data(struct opp_table *opp_table)
{
	struct dev_pm_set_opp_data *data;
	int len, count = opp_table->regulator_count;

	if (WARN_ON(!count))
		return -EINVAL;

	/* space for set_opp_data */
	len = sizeof(*data);

	/* space for old_opp.supplies and new_opp.supplies */
	len += 2 * sizeof(struct dev_pm_opp_supply) * count;

	data = kzalloc(len, GFP_KERNEL);
	if (!data)
		return -ENOMEM;

	data->old_opp.supplies = (void *)(data + 1);
	data->new_opp.supplies = data->old_opp.supplies + count;

	opp_table->set_opp_data = data;

	return 0;
}

static void _free_set_opp_data(struct opp_table *opp_table)
{
	kfree(opp_table->set_opp_data);
	opp_table->set_opp_data = NULL;
}

1463
/**
1464
 * dev_pm_opp_set_regulators() - Set regulator names for the device
1465
 * @dev: Device for which regulator name is being set.
1466 1467
 * @names: Array of pointers to the names of the regulator.
 * @count: Number of regulators.
1468 1469
 *
 * In order to support OPP switching, OPP layer needs to know the name of the
1470 1471
 * device's regulators, as the core would be required to switch voltages as
 * well.
1472 1473 1474
 *
 * This must be called before any OPPs are initialized for the device.
 *
1475
 * Locking: The internal opp_table and opp structures are RCU protected.
1476 1477 1478 1479 1480
 * Hence this function internally uses RCU updater strategy with mutex locks
 * to keep the integrity of the internal data structures. Callers should ensure
 * that this function is *NOT* called under RCU protection or in contexts where
 * mutex cannot be locked.
 */
1481 1482 1483
struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
					    const char * const names[],
					    unsigned int count)
1484
{
1485
	struct opp_table *opp_table;
1486
	struct regulator *reg;
1487
	int ret, i;
1488

1489
	mutex_lock(&opp_table_lock);
1490

1491 1492
	opp_table = _add_opp_table(dev);
	if (!opp_table) {
1493 1494 1495 1496 1497
		ret = -ENOMEM;
		goto unlock;
	}

	/* This should be called before OPPs are initialized */
1498
	if (WARN_ON(!list_empty(&opp_table->opp_list))) {
1499 1500 1501 1502
		ret = -EBUSY;
		goto err;
	}

1503
	/* Already have regulators set */
1504
	if (opp_table->regulators) {
1505 1506 1507
		ret = -EBUSY;
		goto err;
	}
1508 1509 1510 1511 1512 1513

	opp_table->regulators = kmalloc_array(count,
					      sizeof(*opp_table->regulators),
					      GFP_KERNEL);
	if (!opp_table->regulators) {
		ret = -ENOMEM;
1514 1515 1516
		goto err;
	}

1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530
	for (i = 0; i < count; i++) {
		reg = regulator_get_optional(dev, names[i]);
		if (IS_ERR(reg)) {
			ret = PTR_ERR(reg);
			if (ret != -EPROBE_DEFER)
				dev_err(dev, "%s: no regulator (%s) found: %d\n",
					__func__, names[i], ret);
			goto free_regulators;
		}

		opp_table->regulators[i] = reg;
	}

	opp_table->regulator_count = count;
1531

1532 1533 1534 1535 1536
	/* Allocate block only once to pass to set_opp() routines */
	ret = _allocate_set_opp_data(opp_table);
	if (ret)
		goto free_regulators;

1537
	mutex_unlock(&opp_table_lock);
1538
	return opp_table;
1539

1540 1541 1542 1543 1544 1545
free_regulators:
	while (i != 0)
		regulator_put(opp_table->regulators[--i]);

	kfree(opp_table->regulators);
	opp_table->regulators = NULL;
1546
	opp_table->regulator_count = 0;
1547
err:
1548
	_remove_opp_table(opp_table);
1549
unlock:
1550
	mutex_unlock(&opp_table_lock);
1551

1552
	return ERR_PTR(ret);
1553
}
1554
EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulators);
1555 1556

/**
1557 1558
 * dev_pm_opp_put_regulators() - Releases resources blocked for regulator
 * @opp_table: OPP table returned from dev_pm_opp_set_regulators().
1559
 *
1560
 * Locking: The internal opp_table and opp structures are RCU protected.
1561 1562 1563 1564 1565
 * Hence this function internally uses RCU updater strategy with mutex locks
 * to keep the integrity of the internal data structures. Callers should ensure
 * that this function is *NOT* called under RCU protection or in contexts where
 * mutex cannot be locked.
 */
1566
void dev_pm_opp_put_regulators(struct opp_table *opp_table)
1567
{
1568 1569
	int i;

1570
	mutex_lock(&opp_table_lock);
1571

1572 1573
	if (!opp_table->regulators) {
		pr_err("%s: Doesn't have regulators set\n", __func__);
1574 1575 1576
		goto unlock;
	}

1577 1578
	/* Make sure there are no concurrent readers while updating opp_table */
	WARN_ON(!list_empty(&opp_table->opp_list));
1579

1580 1581 1582
	for (i = opp_table->regulator_count - 1; i >= 0; i--)
		regulator_put(opp_table->regulators[i]);

1583 1584
	_free_set_opp_data(opp_table);

1585 1586 1587
	kfree(opp_table->regulators);
	opp_table->regulators = NULL;
	opp_table->regulator_count = 0;
1588

1589 1590
	/* Try freeing opp_table if this was the last blocking resource */
	_remove_opp_table(opp_table);
1591 1592

unlock:
1593
	mutex_unlock(&opp_table_lock);
1594
}
1595
EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulators);
1596

1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699
/**
 * dev_pm_opp_register_set_opp_helper() - Register custom set OPP helper
 * @dev: Device for which the helper is getting registered.
 * @set_opp: Custom set OPP helper.
 *
 * This is useful to support complex platforms (like platforms with multiple
 * regulators per device), instead of the generic OPP set rate helper.
 *
 * This must be called before any OPPs are initialized for the device.
 *
 * Locking: The internal opp_table and opp structures are RCU protected.
 * Hence this function internally uses RCU updater strategy with mutex locks
 * to keep the integrity of the internal data structures. Callers should ensure
 * that this function is *NOT* called under RCU protection or in contexts where
 * mutex cannot be locked.
 */
int dev_pm_opp_register_set_opp_helper(struct device *dev,
			int (*set_opp)(struct dev_pm_set_opp_data *data))
{
	struct opp_table *opp_table;
	int ret;

	if (!set_opp)
		return -EINVAL;

	mutex_lock(&opp_table_lock);

	opp_table = _add_opp_table(dev);
	if (!opp_table) {
		ret = -ENOMEM;
		goto unlock;
	}

	/* This should be called before OPPs are initialized */
	if (WARN_ON(!list_empty(&opp_table->opp_list))) {
		ret = -EBUSY;
		goto err;
	}

	/* Already have custom set_opp helper */
	if (WARN_ON(opp_table->set_opp)) {
		ret = -EBUSY;
		goto err;
	}

	opp_table->set_opp = set_opp;

	mutex_unlock(&opp_table_lock);
	return 0;

err:
	_remove_opp_table(opp_table);
unlock:
	mutex_unlock(&opp_table_lock);

	return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_register_set_opp_helper);

/**
 * dev_pm_opp_register_put_opp_helper() - Releases resources blocked for
 *					   set_opp helper
 * @dev: Device for which custom set_opp helper has to be cleared.
 *
 * Locking: The internal opp_table and opp structures are RCU protected.
 * Hence this function internally uses RCU updater strategy with mutex locks
 * to keep the integrity of the internal data structures. Callers should ensure
 * that this function is *NOT* called under RCU protection or in contexts where
 * mutex cannot be locked.
 */
void dev_pm_opp_register_put_opp_helper(struct device *dev)
{
	struct opp_table *opp_table;

	mutex_lock(&opp_table_lock);

	/* Check for existing table for 'dev' first */
	opp_table = _find_opp_table(dev);
	if (IS_ERR(opp_table)) {
		dev_err(dev, "Failed to find opp_table: %ld\n",
			PTR_ERR(opp_table));
		goto unlock;
	}

	if (!opp_table->set_opp) {
		dev_err(dev, "%s: Doesn't have custom set_opp helper set\n",
			__func__);
		goto unlock;
	}

	/* Make sure there are no concurrent readers while updating opp_table */
	WARN_ON(!list_empty(&opp_table->opp_list));

	opp_table->set_opp = NULL;

	/* Try freeing opp_table if this was the last blocking resource */
	_remove_opp_table(opp_table);

unlock:
	mutex_unlock(&opp_table_lock);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_register_put_opp_helper);

1700 1701 1702 1703 1704 1705
/**
 * dev_pm_opp_add()  - Add an OPP table from a table definitions
 * @dev:	device for which we do this operation
 * @freq:	Frequency in Hz for this OPP
 * @u_volt:	Voltage in uVolts for this OPP
 *
1706
 * This function adds an opp definition to the opp table and returns status.
1707 1708 1709
 * The opp is made available by default and it can be controlled using
 * dev_pm_opp_enable/disable functions.
 *
1710
 * Locking: The internal opp_table and opp structures are RCU protected.
1711 1712 1713 1714 1715 1716
 * Hence this function internally uses RCU updater strategy with mutex locks
 * to keep the integrity of the internal data structures. Callers should ensure
 * that this function is *NOT* called under RCU protection or in contexts where
 * mutex cannot be locked.
 *
 * Return:
1717
 * 0		On success OR
1718
 *		Duplicate OPPs (both freq and volt are same) and opp->available
1719
 * -EEXIST	Freq are same and volt are different OR
1720
 *		Duplicate OPPs (both freq and volt are same) and !opp->available
1721
 * -ENOMEM	Memory allocation failure
1722 1723 1724
 */
int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
{
1725
	return _opp_add_v1(dev, freq, u_volt, true);
1726
}
1727
EXPORT_SYMBOL_GPL(dev_pm_opp_add);
1728 1729

/**
1730
 * _opp_set_availability() - helper to set the availability of an opp
1731 1732 1733 1734 1735 1736 1737
 * @dev:		device for which we do this operation
 * @freq:		OPP frequency to modify availability
 * @availability_req:	availability status requested for this opp
 *
 * Set the availability of an OPP with an RCU operation, opp_{enable,disable}
 * share a common logic which is isolated here.
 *
1738
 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1739
 * copy operation, returns 0 if no modification was done OR modification was
1740 1741
 * successful.
 *
1742
 * Locking: The internal opp_table and opp structures are RCU protected.
1743 1744 1745 1746 1747
 * Hence this function internally uses RCU updater strategy with mutex locks to
 * keep the integrity of the internal data structures. Callers should ensure
 * that this function is *NOT* called under RCU protection or in contexts where
 * mutex locking or synchronize_rcu() blocking calls cannot be used.
 */
1748 1749
static int _opp_set_availability(struct device *dev, unsigned long freq,
				 bool availability_req)
1750
{
1751
	struct opp_table *opp_table;
1752
	struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
1753 1754 1755
	int r = 0;

	/* keep the node allocated */
1756
	new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL);
1757
	if (!new_opp)
1758 1759
		return -ENOMEM;

1760
	mutex_lock(&opp_table_lock);
1761

1762 1763 1764 1765
	/* Find the opp_table */
	opp_table = _find_opp_table(dev);
	if (IS_ERR(opp_table)) {
		r = PTR_ERR(opp_table);
1766 1767 1768 1769 1770
		dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
		goto unlock;
	}

	/* Do we have the frequency? */
1771
	list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791
		if (tmp_opp->rate == freq) {
			opp = tmp_opp;
			break;
		}
	}
	if (IS_ERR(opp)) {
		r = PTR_ERR(opp);
		goto unlock;
	}

	/* Is update really needed? */
	if (opp->available == availability_req)
		goto unlock;
	/* copy the old data over */
	*new_opp = *opp;

	/* plug in new node */
	new_opp->available = availability_req;

	list_replace_rcu(&opp->node, &new_opp->node);
1792 1793
	mutex_unlock(&opp_table_lock);
	call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
1794

1795 1796
	/* Notify the change of the OPP availability */
	if (availability_req)
1797 1798
		srcu_notifier_call_chain(&opp_table->srcu_head,
					 OPP_EVENT_ENABLE, new_opp);
1799
	else
1800 1801
		srcu_notifier_call_chain(&opp_table->srcu_head,
					 OPP_EVENT_DISABLE, new_opp);
1802

V
Vincent Guittot 已提交
1803
	return 0;
1804 1805

unlock:
1806
	mutex_unlock(&opp_table_lock);
1807 1808 1809 1810 1811
	kfree(new_opp);
	return r;
}

/**
1812
 * dev_pm_opp_enable() - Enable a specific OPP
1813 1814 1815 1816 1817
 * @dev:	device for which we do this operation
 * @freq:	OPP frequency to enable
 *
 * Enables a provided opp. If the operation is valid, this returns 0, else the
 * corresponding error value. It is meant to be used for users an OPP available
1818
 * after being temporarily made unavailable with dev_pm_opp_disable.
1819
 *
1820
 * Locking: The internal opp_table and opp structures are RCU protected.
1821 1822 1823 1824
 * Hence this function indirectly uses RCU and mutex locks to keep the
 * integrity of the internal data structures. Callers should ensure that
 * this function is *NOT* called under RCU protection or in contexts where
 * mutex locking or synchronize_rcu() blocking calls cannot be used.
1825 1826
 *
 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1827
 * copy operation, returns 0 if no modification was done OR modification was
1828
 * successful.
1829
 */
1830
int dev_pm_opp_enable(struct device *dev, unsigned long freq)
1831
{
1832
	return _opp_set_availability(dev, freq, true);
1833
}
1834
EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
1835 1836

/**
1837
 * dev_pm_opp_disable() - Disable a specific OPP
1838 1839 1840 1841 1842 1843
 * @dev:	device for which we do this operation
 * @freq:	OPP frequency to disable
 *
 * Disables a provided opp. If the operation is valid, this returns
 * 0, else the corresponding error value. It is meant to be a temporary
 * control by users to make this OPP not available until the circumstances are
1844
 * right to make it available again (with a call to dev_pm_opp_enable).
1845
 *
1846
 * Locking: The internal opp_table and opp structures are RCU protected.
1847 1848 1849 1850
 * Hence this function indirectly uses RCU and mutex locks to keep the
 * integrity of the internal data structures. Callers should ensure that
 * this function is *NOT* called under RCU protection or in contexts where
 * mutex locking or synchronize_rcu() blocking calls cannot be used.
1851 1852
 *
 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1853
 * copy operation, returns 0 if no modification was done OR modification was
1854
 * successful.
1855
 */
1856
int dev_pm_opp_disable(struct device *dev, unsigned long freq)
1857
{
1858
	return _opp_set_availability(dev, freq, false);
1859
}
1860
EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
1861

1862
/**
1863
 * dev_pm_opp_get_notifier() - find notifier_head of the device with opp
1864
 * @dev:	device pointer used to lookup OPP table.
1865 1866 1867 1868 1869
 *
 * Return: pointer to  notifier head if found, otherwise -ENODEV or
 * -EINVAL based on type of error casted as pointer. value must be checked
 *  with IS_ERR to determine valid pointer or error result.
 *
1870 1871 1872
 * Locking: This function must be called under rcu_read_lock(). opp_table is a
 * RCU protected pointer. The reason for the same is that the opp pointer which
 * is returned will remain valid for use with opp_get_{voltage, freq} only while
1873 1874
 * under the locked area. The pointer returned must be used prior to unlocking
 * with rcu_read_unlock() to maintain the integrity of the pointer.
1875
 */
1876
struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
1877
{
1878
	struct opp_table *opp_table = _find_opp_table(dev);
1879

1880 1881
	if (IS_ERR(opp_table))
		return ERR_CAST(opp_table); /* matching type */
1882

1883
	return &opp_table->srcu_head;
1884
}
1885
EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
1886

1887 1888 1889
/*
 * Free OPPs either created using static entries present in DT or even the
 * dynamically added entries based on remove_all param.
1890
 */
1891
void _dev_pm_opp_remove_table(struct device *dev, bool remove_all)
V
Viresh Kumar 已提交
1892
{
1893
	struct opp_table *opp_table;
V
Viresh Kumar 已提交
1894 1895
	struct dev_pm_opp *opp, *tmp;

1896 1897
	/* Hold our table modification lock here */
	mutex_lock(&opp_table_lock);
1898

1899 1900 1901 1902
	/* Check for existing table for 'dev' */
	opp_table = _find_opp_table(dev);
	if (IS_ERR(opp_table)) {
		int error = PTR_ERR(opp_table);
V
Viresh Kumar 已提交
1903 1904

		if (error != -ENODEV)
1905
			WARN(1, "%s: opp_table: %d\n",
V
Viresh Kumar 已提交
1906 1907 1908
			     IS_ERR_OR_NULL(dev) ?
					"Invalid device" : dev_name(dev),
			     error);
1909
		goto unlock;
V
Viresh Kumar 已提交
1910 1911
	}

1912 1913
	/* Find if opp_table manages a single device */
	if (list_is_singular(&opp_table->dev_list)) {
1914
		/* Free static OPPs */
1915
		list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
1916
			if (remove_all || !opp->dynamic)
1917
				_opp_remove(opp_table, opp);
1918 1919
		}
	} else {
1920
		_remove_opp_dev(_find_opp_dev(dev, opp_table), opp_table);
V
Viresh Kumar 已提交
1921 1922
	}

1923
unlock:
1924
	mutex_unlock(&opp_table_lock);
V
Viresh Kumar 已提交
1925
}
1926 1927

/**
1928
 * dev_pm_opp_remove_table() - Free all OPPs associated with the device
1929
 * @dev:	device pointer used to lookup OPP table.
1930
 *
1931 1932
 * Free both OPPs created using static entries present in DT and the
 * dynamically added entries.
1933
 *
1934
 * Locking: The internal opp_table and opp structures are RCU protected.
1935 1936 1937 1938
 * Hence this function indirectly uses RCU updater strategy with mutex locks
 * to keep the integrity of the internal data structures. Callers should ensure
 * that this function is *NOT* called under RCU protection or in contexts where
 * mutex cannot be locked.
1939
 */
1940
void dev_pm_opp_remove_table(struct device *dev)
1941
{
1942
	_dev_pm_opp_remove_table(dev, true);
1943
}
1944
EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table);