clk.c 121.2 KB
Newer Older
S
Stephen Boyd 已提交
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5
/*
 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
 *
6
 * Standard functionality for the common clock API.  See Documentation/driver-api/clk.rst
7 8
 */

S
Stephen Boyd 已提交
9
#include <linux/clk.h>
M
Michael Turquette 已提交
10
#include <linux/clk-provider.h>
11
#include <linux/clk/clk-conf.h>
12 13 14 15 16 17
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/err.h>
#include <linux/list.h>
#include <linux/slab.h>
G
Grant Likely 已提交
18
#include <linux/of.h>
19
#include <linux/device.h>
20
#include <linux/init.h>
21
#include <linux/pm_runtime.h>
22
#include <linux/sched.h>
23
#include <linux/clkdev.h>
24

25 26
#include "clk.h"

27 28 29
static DEFINE_SPINLOCK(enable_lock);
static DEFINE_MUTEX(prepare_lock);

30 31 32 33 34 35
static struct task_struct *prepare_owner;
static struct task_struct *enable_owner;

static int prepare_refcnt;
static int enable_refcnt;

36 37 38 39
static HLIST_HEAD(clk_root_list);
static HLIST_HEAD(clk_orphan_list);
static LIST_HEAD(clk_notifier_list);

40 41 42 43 44 45
static struct hlist_head *all_lists[] = {
	&clk_root_list,
	&clk_orphan_list,
	NULL,
};

M
Michael Turquette 已提交
46 47
/***    private data structures    ***/

48 49 50 51 52
struct clk_parent_map {
	const struct clk_hw	*hw;
	struct clk_core		*core;
	const char		*fw_name;
	const char		*name;
53
	int			index;
54 55
};

M
Michael Turquette 已提交
56 57 58 59 60
struct clk_core {
	const char		*name;
	const struct clk_ops	*ops;
	struct clk_hw		*hw;
	struct module		*owner;
61
	struct device		*dev;
62
	struct device_node	*of_node;
M
Michael Turquette 已提交
63
	struct clk_core		*parent;
64
	struct clk_parent_map	*parents;
M
Michael Turquette 已提交
65 66 67
	u8			num_parents;
	u8			new_parent_index;
	unsigned long		rate;
68
	unsigned long		req_rate;
M
Michael Turquette 已提交
69 70 71 72
	unsigned long		new_rate;
	struct clk_core		*new_parent;
	struct clk_core		*new_child;
	unsigned long		flags;
73
	bool			orphan;
74
	bool			rpm_enabled;
M
Michael Turquette 已提交
75 76
	unsigned int		enable_count;
	unsigned int		prepare_count;
77
	unsigned int		protect_count;
78 79
	unsigned long		min_rate;
	unsigned long		max_rate;
M
Michael Turquette 已提交
80 81
	unsigned long		accuracy;
	int			phase;
J
Jerome Brunet 已提交
82
	struct clk_duty		duty;
M
Michael Turquette 已提交
83 84
	struct hlist_head	children;
	struct hlist_node	child_node;
85
	struct hlist_head	clks;
M
Michael Turquette 已提交
86 87 88
	unsigned int		notifier_count;
#ifdef CONFIG_DEBUG_FS
	struct dentry		*dentry;
89
	struct hlist_node	debug_node;
M
Michael Turquette 已提交
90 91 92 93
#endif
	struct kref		ref;
};

94 95 96
#define CREATE_TRACE_POINTS
#include <trace/events/clk.h>

M
Michael Turquette 已提交
97 98
struct clk {
	struct clk_core	*core;
99
	struct device *dev;
M
Michael Turquette 已提交
100 101
	const char *dev_id;
	const char *con_id;
102 103
	unsigned long min_rate;
	unsigned long max_rate;
J
Jerome Brunet 已提交
104
	unsigned int exclusive_count;
105
	struct hlist_node clks_node;
M
Michael Turquette 已提交
106 107
};

108 109 110
/***           runtime pm          ***/
static int clk_pm_runtime_get(struct clk_core *core)
{
111
	int ret;
112

113
	if (!core->rpm_enabled)
114 115 116 117 118 119 120 121
		return 0;

	ret = pm_runtime_get_sync(core->dev);
	return ret < 0 ? ret : 0;
}

static void clk_pm_runtime_put(struct clk_core *core)
{
122
	if (!core->rpm_enabled)
123 124 125 126 127
		return;

	pm_runtime_put_sync(core->dev);
}

128 129 130
/***           locking             ***/
static void clk_prepare_lock(void)
{
131 132 133 134 135 136 137 138 139 140 141
	if (!mutex_trylock(&prepare_lock)) {
		if (prepare_owner == current) {
			prepare_refcnt++;
			return;
		}
		mutex_lock(&prepare_lock);
	}
	WARN_ON_ONCE(prepare_owner != NULL);
	WARN_ON_ONCE(prepare_refcnt != 0);
	prepare_owner = current;
	prepare_refcnt = 1;
142 143 144 145
}

static void clk_prepare_unlock(void)
{
146 147 148 149 150 151
	WARN_ON_ONCE(prepare_owner != current);
	WARN_ON_ONCE(prepare_refcnt == 0);

	if (--prepare_refcnt)
		return;
	prepare_owner = NULL;
152 153 154 155
	mutex_unlock(&prepare_lock);
}

static unsigned long clk_enable_lock(void)
156
	__acquires(enable_lock)
157 158
{
	unsigned long flags;
159

160 161 162 163 164 165 166
	/*
	 * On UP systems, spin_trylock_irqsave() always returns true, even if
	 * we already hold the lock. So, in that case, we rely only on
	 * reference counting.
	 */
	if (!IS_ENABLED(CONFIG_SMP) ||
	    !spin_trylock_irqsave(&enable_lock, flags)) {
167 168
		if (enable_owner == current) {
			enable_refcnt++;
169
			__acquire(enable_lock);
170 171
			if (!IS_ENABLED(CONFIG_SMP))
				local_save_flags(flags);
172 173 174 175 176 177 178 179
			return flags;
		}
		spin_lock_irqsave(&enable_lock, flags);
	}
	WARN_ON_ONCE(enable_owner != NULL);
	WARN_ON_ONCE(enable_refcnt != 0);
	enable_owner = current;
	enable_refcnt = 1;
180 181 182 183
	return flags;
}

static void clk_enable_unlock(unsigned long flags)
184
	__releases(enable_lock)
185
{
186 187 188
	WARN_ON_ONCE(enable_owner != current);
	WARN_ON_ONCE(enable_refcnt == 0);

189 190
	if (--enable_refcnt) {
		__release(enable_lock);
191
		return;
192
	}
193
	enable_owner = NULL;
194 195 196
	spin_unlock_irqrestore(&enable_lock, flags);
}

197 198 199 200 201
static bool clk_core_rate_is_protected(struct clk_core *core)
{
	return core->protect_count;
}

202 203
static bool clk_core_is_prepared(struct clk_core *core)
{
204 205
	bool ret = false;

206 207 208 209 210 211
	/*
	 * .is_prepared is optional for clocks that can prepare
	 * fall back to software usage counter if it is missing
	 */
	if (!core->ops->is_prepared)
		return core->prepare_count;
212

213 214 215 216 217 218
	if (!clk_pm_runtime_get(core)) {
		ret = core->ops->is_prepared(core->hw);
		clk_pm_runtime_put(core);
	}

	return ret;
219
}
220

221 222
static bool clk_core_is_enabled(struct clk_core *core)
{
223 224
	bool ret = false;

225 226 227 228 229 230
	/*
	 * .is_enabled is only mandatory for clocks that gate
	 * fall back to software usage counter if .is_enabled is missing
	 */
	if (!core->ops->is_enabled)
		return core->enable_count;
S
Sachin Kamat 已提交
231

232 233 234 235 236 237 238 239 240 241
	/*
	 * Check if clock controller's device is runtime active before
	 * calling .is_enabled callback. If not, assume that clock is
	 * disabled, because we might be called from atomic context, from
	 * which pm_runtime_get() is not allowed.
	 * This function is called mainly from clk_disable_unused_subtree,
	 * which ensures proper runtime pm activation of controller before
	 * taking enable spinlock, but the below check is needed if one tries
	 * to call it from other places.
	 */
242
	if (core->rpm_enabled) {
243 244 245 246 247 248 249 250 251
		pm_runtime_get_noresume(core->dev);
		if (!pm_runtime_active(core->dev)) {
			ret = false;
			goto done;
		}
	}

	ret = core->ops->is_enabled(core->hw);
done:
252
	if (core->rpm_enabled)
253
		pm_runtime_put(core->dev);
254 255

	return ret;
256
}
S
Sachin Kamat 已提交
257

258
/***    helper functions   ***/
259

260
const char *__clk_get_name(const struct clk *clk)
261
{
262
	return !clk ? NULL : clk->core->name;
263
}
264
EXPORT_SYMBOL_GPL(__clk_get_name);
265

266
const char *clk_hw_get_name(const struct clk_hw *hw)
267 268 269 270 271
{
	return hw->core->name;
}
EXPORT_SYMBOL_GPL(clk_hw_get_name);

272 273 274 275 276
struct clk_hw *__clk_get_hw(struct clk *clk)
{
	return !clk ? NULL : clk->core->hw;
}
EXPORT_SYMBOL_GPL(__clk_get_hw);
277

278
unsigned int clk_hw_get_num_parents(const struct clk_hw *hw)
279 280 281 282 283
{
	return hw->core->num_parents;
}
EXPORT_SYMBOL_GPL(clk_hw_get_num_parents);

284
struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw)
285 286 287 288 289
{
	return hw->core->parent ? hw->core->parent->hw : NULL;
}
EXPORT_SYMBOL_GPL(clk_hw_get_parent);

290 291
static struct clk_core *__clk_lookup_subtree(const char *name,
					     struct clk_core *core)
292
{
293
	struct clk_core *child;
294
	struct clk_core *ret;
295

296 297
	if (!strcmp(core->name, name))
		return core;
298

299 300 301 302
	hlist_for_each_entry(child, &core->children, child_node) {
		ret = __clk_lookup_subtree(name, child);
		if (ret)
			return ret;
303 304
	}

305
	return NULL;
306 307
}

308
static struct clk_core *clk_core_lookup(const char *name)
309
{
310 311
	struct clk_core *root_clk;
	struct clk_core *ret;
312

313 314
	if (!name)
		return NULL;
315

316 317 318 319 320
	/* search the 'proper' clk tree first */
	hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
		ret = __clk_lookup_subtree(name, root_clk);
		if (ret)
			return ret;
321 322
	}

323 324 325 326 327 328
	/* if not found, then search the orphan tree */
	hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
		ret = __clk_lookup_subtree(name, root_clk);
		if (ret)
			return ret;
	}
329

330
	return NULL;
331 332
}

333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351
#ifdef CONFIG_OF
static int of_parse_clkspec(const struct device_node *np, int index,
			    const char *name, struct of_phandle_args *out_args);
static struct clk_hw *
of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec);
#else
static inline int of_parse_clkspec(const struct device_node *np, int index,
				   const char *name,
				   struct of_phandle_args *out_args)
{
	return -ENOENT;
}
static inline struct clk_hw *
of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
{
	return ERR_PTR(-ENOENT);
}
#endif

352
/**
353
 * clk_core_get - Find the clk_core parent of a clk
354
 * @core: clk to find parent of
355
 * @p_index: parent index to search for
356 357 358 359
 *
 * This is the preferred method for clk providers to find the parent of a
 * clk when that parent is external to the clk controller. The parent_names
 * array is indexed and treated as a local name matching a string in the device
360 361
 * node's 'clock-names' property or as the 'con_id' matching the device's
 * dev_name() in a clk_lookup. This allows clk providers to use their own
362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382
 * namespace instead of looking for a globally unique parent string.
 *
 * For example the following DT snippet would allow a clock registered by the
 * clock-controller@c001 that has a clk_init_data::parent_data array
 * with 'xtal' in the 'name' member to find the clock provided by the
 * clock-controller@f00abcd without needing to get the globally unique name of
 * the xtal clk.
 *
 *      parent: clock-controller@f00abcd {
 *              reg = <0xf00abcd 0xabcd>;
 *              #clock-cells = <0>;
 *      };
 *
 *      clock-controller@c001 {
 *              reg = <0xc001 0xf00d>;
 *              clocks = <&parent>;
 *              clock-names = "xtal";
 *              #clock-cells = <1>;
 *      };
 *
 * Returns: -ENOENT when the provider can't be found or the clk doesn't
383 384 385
 * exist in the provider or the name can't be found in the DT node or
 * in a clkdev lookup. NULL when the provider knows about the clk but it
 * isn't provided on this system.
386 387
 * A valid clk_core pointer when the clk can be found in the provider.
 */
388
static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index)
389
{
390 391
	const char *name = core->parents[p_index].fw_name;
	int index = core->parents[p_index].index;
392 393 394
	struct clk_hw *hw = ERR_PTR(-ENOENT);
	struct device *dev = core->dev;
	const char *dev_id = dev ? dev_name(dev) : NULL;
395
	struct device_node *np = core->of_node;
396
	struct of_phandle_args clkspec;
397

398 399 400 401 402 403 404 405 406
	if (np && (name || index >= 0) &&
	    !of_parse_clkspec(np, index, name, &clkspec)) {
		hw = of_clk_get_hw_from_clkspec(&clkspec);
		of_node_put(clkspec.np);
	} else if (name) {
		/*
		 * If the DT search above couldn't find the provider fallback to
		 * looking up via clkdev based clk_lookups.
		 */
407
		hw = clk_find_hw(dev_id, name);
408
	}
409 410

	if (IS_ERR(hw))
411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430
		return ERR_CAST(hw);

	return hw->core;
}

static void clk_core_fill_parent_index(struct clk_core *core, u8 index)
{
	struct clk_parent_map *entry = &core->parents[index];
	struct clk_core *parent = ERR_PTR(-ENOENT);

	if (entry->hw) {
		parent = entry->hw->core;
		/*
		 * We have a direct reference but it isn't registered yet?
		 * Orphan it and let clk_reparent() update the orphan status
		 * when the parent is registered.
		 */
		if (!parent)
			parent = ERR_PTR(-EPROBE_DEFER);
	} else {
431
		parent = clk_core_get(core, index);
432
		if (IS_ERR(parent) && PTR_ERR(parent) == -ENOENT && entry->name)
433 434 435 436 437 438 439 440
			parent = clk_core_lookup(entry->name);
	}

	/* Only cache it if it's not an error */
	if (!IS_ERR(parent))
		entry->core = parent;
}

441 442
static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core,
							 u8 index)
443
{
444
	if (!core || index >= core->num_parents || !core->parents)
445
		return NULL;
446

447 448
	if (!core->parents[index].core)
		clk_core_fill_parent_index(core, index);
449

450
	return core->parents[index].core;
451 452
}

453 454
struct clk_hw *
clk_hw_get_parent_by_index(const struct clk_hw *hw, unsigned int index)
455 456 457 458 459 460 461 462 463
{
	struct clk_core *parent;

	parent = clk_core_get_parent_by_index(hw->core, index);

	return !parent ? NULL : parent->hw;
}
EXPORT_SYMBOL_GPL(clk_hw_get_parent_by_index);

464 465 466 467
unsigned int __clk_get_enable_count(struct clk *clk)
{
	return !clk ? 0 : clk->core->enable_count;
}
468

469 470
static unsigned long clk_core_get_rate_nolock(struct clk_core *core)
{
471 472
	if (!core)
		return 0;
473

474 475
	if (!core->num_parents || core->parent)
		return core->rate;
476

477 478 479 480 481 482
	/*
	 * Clk must have a parent because num_parents > 0 but the parent isn't
	 * known yet. Best to return 0 as the rate of this clk until we can
	 * properly recalc the rate based on the parent's rate.
	 */
	return 0;
483 484
}

485
unsigned long clk_hw_get_rate(const struct clk_hw *hw)
486 487 488 489 490
{
	return clk_core_get_rate_nolock(hw->core);
}
EXPORT_SYMBOL_GPL(clk_hw_get_rate);

491 492 493 494
static unsigned long __clk_get_accuracy(struct clk_core *core)
{
	if (!core)
		return 0;
495

496
	return core->accuracy;
497 498
}

499
unsigned long __clk_get_flags(struct clk *clk)
S
Sylwester Nawrocki 已提交
500
{
501
	return !clk ? 0 : clk->core->flags;
S
Sylwester Nawrocki 已提交
502
}
503
EXPORT_SYMBOL_GPL(__clk_get_flags);
S
Sylwester Nawrocki 已提交
504

505
unsigned long clk_hw_get_flags(const struct clk_hw *hw)
506 507 508 509 510
{
	return hw->core->flags;
}
EXPORT_SYMBOL_GPL(clk_hw_get_flags);

511
bool clk_hw_is_prepared(const struct clk_hw *hw)
512 513 514
{
	return clk_core_is_prepared(hw->core);
}
515
EXPORT_SYMBOL_GPL(clk_hw_is_prepared);
516

517 518 519 520
bool clk_hw_rate_is_protected(const struct clk_hw *hw)
{
	return clk_core_rate_is_protected(hw->core);
}
521
EXPORT_SYMBOL_GPL(clk_hw_rate_is_protected);
522

523 524 525 526
bool clk_hw_is_enabled(const struct clk_hw *hw)
{
	return clk_core_is_enabled(hw->core);
}
527
EXPORT_SYMBOL_GPL(clk_hw_is_enabled);
528

529
bool __clk_is_enabled(struct clk *clk)
530
{
531 532
	if (!clk)
		return false;
533

534 535 536
	return clk_core_is_enabled(clk->core);
}
EXPORT_SYMBOL_GPL(__clk_is_enabled);
537

538 539 540 541 542
static bool mux_is_better_rate(unsigned long rate, unsigned long now,
			   unsigned long best, unsigned long flags)
{
	if (flags & CLK_MUX_ROUND_CLOSEST)
		return abs(now - rate) < abs(best - rate);
543

544 545
	return now <= rate && now > best;
}
546

547 548 549
int clk_mux_determine_rate_flags(struct clk_hw *hw,
				 struct clk_rate_request *req,
				 unsigned long flags)
550 551
{
	struct clk_core *core = hw->core, *parent, *best_parent = NULL;
552 553 554
	int i, num_parents, ret;
	unsigned long best = 0;
	struct clk_rate_request parent_req = *req;
555

556 557 558
	/* if NO_REPARENT flag set, pass through to current parent */
	if (core->flags & CLK_SET_RATE_NO_REPARENT) {
		parent = core->parent;
559 560 561 562 563 564 565 566
		if (core->flags & CLK_SET_RATE_PARENT) {
			ret = __clk_determine_rate(parent ? parent->hw : NULL,
						   &parent_req);
			if (ret)
				return ret;

			best = parent_req.rate;
		} else if (parent) {
567
			best = clk_core_get_rate_nolock(parent);
568
		} else {
569
			best = clk_core_get_rate_nolock(core);
570 571
		}

572 573
		goto out;
	}
574

575 576 577 578 579 580
	/* find the parent that can provide the fastest rate <= rate */
	num_parents = core->num_parents;
	for (i = 0; i < num_parents; i++) {
		parent = clk_core_get_parent_by_index(core, i);
		if (!parent)
			continue;
581 582 583 584 585 586 587 588 589 590 591 592

		if (core->flags & CLK_SET_RATE_PARENT) {
			parent_req = *req;
			ret = __clk_determine_rate(parent->hw, &parent_req);
			if (ret)
				continue;
		} else {
			parent_req.rate = clk_core_get_rate_nolock(parent);
		}

		if (mux_is_better_rate(req->rate, parent_req.rate,
				       best, flags)) {
593
			best_parent = parent;
594
			best = parent_req.rate;
595 596
		}
	}
597

598 599 600
	if (!best_parent)
		return -EINVAL;

601 602
out:
	if (best_parent)
603 604 605
		req->best_parent_hw = best_parent->hw;
	req->best_parent_rate = best;
	req->rate = best;
606

607
	return 0;
608
}
609
EXPORT_SYMBOL_GPL(clk_mux_determine_rate_flags);
610 611

struct clk *__clk_lookup(const char *name)
S
Sylwester Nawrocki 已提交
612
{
613 614 615
	struct clk_core *core = clk_core_lookup(name);

	return !core ? NULL : core->hw->clk;
S
Sylwester Nawrocki 已提交
616
}
617

618 619 620
static void clk_core_get_boundaries(struct clk_core *core,
				    unsigned long *min_rate,
				    unsigned long *max_rate)
621
{
622
	struct clk *clk_user;
623

624 625
	lockdep_assert_held(&prepare_lock);

626 627
	*min_rate = core->min_rate;
	*max_rate = core->max_rate;
628

629 630
	hlist_for_each_entry(clk_user, &core->clks, clks_node)
		*min_rate = max(*min_rate, clk_user->min_rate);
631

632 633 634
	hlist_for_each_entry(clk_user, &core->clks, clks_node)
		*max_rate = min(*max_rate, clk_user->max_rate);
}
635

636 637 638 639 640 641 642 643
void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
			   unsigned long max_rate)
{
	hw->core->min_rate = min_rate;
	hw->core->max_rate = max_rate;
}
EXPORT_SYMBOL_GPL(clk_hw_set_rate_range);

644
/*
645 646 647 648
 * __clk_mux_determine_rate - clk_ops::determine_rate implementation for a mux type clk
 * @hw: mux type clk to determine rate on
 * @req: rate request, also used to return preferred parent and frequencies
 *
649 650 651
 * Helper for finding best parent to provide a given frequency. This can be used
 * directly as a determine_rate callback (e.g. for a mux), or from a more
 * complex clock that may combine a mux with other operations.
652 653
 *
 * Returns: 0 on success, -EERROR value on error
654
 */
655 656
int __clk_mux_determine_rate(struct clk_hw *hw,
			     struct clk_rate_request *req)
657
{
658
	return clk_mux_determine_rate_flags(hw, req, 0);
659
}
660
EXPORT_SYMBOL_GPL(__clk_mux_determine_rate);
661

662 663
int __clk_mux_determine_rate_closest(struct clk_hw *hw,
				     struct clk_rate_request *req)
664
{
665
	return clk_mux_determine_rate_flags(hw, req, CLK_MUX_ROUND_CLOSEST);
666 667
}
EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest);
668

669
/***        clk api        ***/
670

671 672 673 674 675 676 677
static void clk_core_rate_unprotect(struct clk_core *core)
{
	lockdep_assert_held(&prepare_lock);

	if (!core)
		return;

678 679
	if (WARN(core->protect_count == 0,
	    "%s already unprotected\n", core->name))
680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706
		return;

	if (--core->protect_count > 0)
		return;

	clk_core_rate_unprotect(core->parent);
}

static int clk_core_rate_nuke_protect(struct clk_core *core)
{
	int ret;

	lockdep_assert_held(&prepare_lock);

	if (!core)
		return -EINVAL;

	if (core->protect_count == 0)
		return 0;

	ret = core->protect_count;
	core->protect_count = 1;
	clk_core_rate_unprotect(core);

	return ret;
}

J
Jerome Brunet 已提交
707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745
/**
 * clk_rate_exclusive_put - release exclusivity over clock rate control
 * @clk: the clk over which the exclusivity is released
 *
 * clk_rate_exclusive_put() completes a critical section during which a clock
 * consumer cannot tolerate any other consumer making any operation on the
 * clock which could result in a rate change or rate glitch. Exclusive clocks
 * cannot have their rate changed, either directly or indirectly due to changes
 * further up the parent chain of clocks. As a result, clocks up parent chain
 * also get under exclusive control of the calling consumer.
 *
 * If exlusivity is claimed more than once on clock, even by the same consumer,
 * the rate effectively gets locked as exclusivity can't be preempted.
 *
 * Calls to clk_rate_exclusive_put() must be balanced with calls to
 * clk_rate_exclusive_get(). Calls to this function may sleep, and do not return
 * error status.
 */
void clk_rate_exclusive_put(struct clk *clk)
{
	if (!clk)
		return;

	clk_prepare_lock();

	/*
	 * if there is something wrong with this consumer protect count, stop
	 * here before messing with the provider
	 */
	if (WARN_ON(clk->exclusive_count <= 0))
		goto out;

	clk_core_rate_unprotect(clk->core);
	clk->exclusive_count--;
out:
	clk_prepare_unlock();
}
EXPORT_SYMBOL_GPL(clk_rate_exclusive_put);

746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772
static void clk_core_rate_protect(struct clk_core *core)
{
	lockdep_assert_held(&prepare_lock);

	if (!core)
		return;

	if (core->protect_count == 0)
		clk_core_rate_protect(core->parent);

	core->protect_count++;
}

static void clk_core_rate_restore_protect(struct clk_core *core, int count)
{
	lockdep_assert_held(&prepare_lock);

	if (!core)
		return;

	if (count == 0)
		return;

	clk_core_rate_protect(core);
	core->protect_count = count;
}

J
Jerome Brunet 已提交
773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804
/**
 * clk_rate_exclusive_get - get exclusivity over the clk rate control
 * @clk: the clk over which the exclusity of rate control is requested
 *
 * clk_rate_exlusive_get() begins a critical section during which a clock
 * consumer cannot tolerate any other consumer making any operation on the
 * clock which could result in a rate change or rate glitch. Exclusive clocks
 * cannot have their rate changed, either directly or indirectly due to changes
 * further up the parent chain of clocks. As a result, clocks up parent chain
 * also get under exclusive control of the calling consumer.
 *
 * If exlusivity is claimed more than once on clock, even by the same consumer,
 * the rate effectively gets locked as exclusivity can't be preempted.
 *
 * Calls to clk_rate_exclusive_get() should be balanced with calls to
 * clk_rate_exclusive_put(). Calls to this function may sleep.
 * Returns 0 on success, -EERROR otherwise
 */
int clk_rate_exclusive_get(struct clk *clk)
{
	if (!clk)
		return 0;

	clk_prepare_lock();
	clk_core_rate_protect(clk->core);
	clk->exclusive_count++;
	clk_prepare_unlock();

	return 0;
}
EXPORT_SYMBOL_GPL(clk_rate_exclusive_get);

805 806
static void clk_core_unprepare(struct clk_core *core)
{
807 808
	lockdep_assert_held(&prepare_lock);

809 810
	if (!core)
		return;
811

812 813
	if (WARN(core->prepare_count == 0,
	    "%s already unprepared\n", core->name))
814
		return;
815

816 817
	if (WARN(core->prepare_count == 1 && core->flags & CLK_IS_CRITICAL,
	    "Unpreparing critical %s\n", core->name))
818 819
		return;

820 821 822
	if (core->flags & CLK_SET_RATE_GATE)
		clk_core_rate_unprotect(core);

823 824
	if (--core->prepare_count > 0)
		return;
825

826
	WARN(core->enable_count > 0, "Unpreparing enabled %s\n", core->name);
827

828
	trace_clk_unprepare(core);
829

830 831 832
	if (core->ops->unprepare)
		core->ops->unprepare(core->hw);

833 834
	clk_pm_runtime_put(core);

835 836
	trace_clk_unprepare_complete(core);
	clk_core_unprepare(core->parent);
837 838
}

839 840 841 842 843 844 845
static void clk_core_unprepare_lock(struct clk_core *core)
{
	clk_prepare_lock();
	clk_core_unprepare(core);
	clk_prepare_unlock();
}

846 847 848 849 850 851 852 853 854 855 856 857
/**
 * clk_unprepare - undo preparation of a clock source
 * @clk: the clk being unprepared
 *
 * clk_unprepare may sleep, which differentiates it from clk_disable.  In a
 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
 * if the operation may sleep.  One example is a clk which is accessed over
 * I2c.  In the complex case a clk gate operation may require a fast and a slow
 * part.  It is this reason that clk_unprepare and clk_disable are not mutually
 * exclusive.  In fact clk_disable must be called before clk_unprepare.
 */
void clk_unprepare(struct clk *clk)
858
{
859 860 861
	if (IS_ERR_OR_NULL(clk))
		return;

862
	clk_core_unprepare_lock(clk->core);
863
}
864
EXPORT_SYMBOL_GPL(clk_unprepare);
865

866
static int clk_core_prepare(struct clk_core *core)
867
{
868
	int ret = 0;
869

870 871
	lockdep_assert_held(&prepare_lock);

872
	if (!core)
873 874
		return 0;

875
	if (core->prepare_count == 0) {
876
		ret = clk_pm_runtime_get(core);
877 878
		if (ret)
			return ret;
879

880 881 882 883
		ret = clk_core_prepare(core->parent);
		if (ret)
			goto runtime_put;

884
		trace_clk_prepare(core);
885

886 887
		if (core->ops->prepare)
			ret = core->ops->prepare(core->hw);
888

889
		trace_clk_prepare_complete(core);
890

891 892
		if (ret)
			goto unprepare;
893
	}
894

895
	core->prepare_count++;
896

897 898 899 900 901 902 903 904 905 906
	/*
	 * CLK_SET_RATE_GATE is a special case of clock protection
	 * Instead of a consumer claiming exclusive rate control, it is
	 * actually the provider which prevents any consumer from making any
	 * operation which could result in a rate change or rate glitch while
	 * the clock is prepared.
	 */
	if (core->flags & CLK_SET_RATE_GATE)
		clk_core_rate_protect(core);

907
	return 0;
908 909 910 911 912
unprepare:
	clk_core_unprepare(core->parent);
runtime_put:
	clk_pm_runtime_put(core);
	return ret;
913 914
}

915 916 917 918 919 920 921 922 923 924 925
static int clk_core_prepare_lock(struct clk_core *core)
{
	int ret;

	clk_prepare_lock();
	ret = clk_core_prepare(core);
	clk_prepare_unlock();

	return ret;
}

926 927 928 929 930 931 932 933 934 935 936 937 938
/**
 * clk_prepare - prepare a clock source
 * @clk: the clk being prepared
 *
 * clk_prepare may sleep, which differentiates it from clk_enable.  In a simple
 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
 * operation may sleep.  One example is a clk which is accessed over I2c.  In
 * the complex case a clk ungate operation may require a fast and a slow part.
 * It is this reason that clk_prepare and clk_enable are not mutually
 * exclusive.  In fact clk_prepare must be called before clk_enable.
 * Returns 0 on success, -EERROR otherwise.
 */
int clk_prepare(struct clk *clk)
939
{
940 941
	if (!clk)
		return 0;
942

943
	return clk_core_prepare_lock(clk->core);
944
}
945
EXPORT_SYMBOL_GPL(clk_prepare);
946

947
static void clk_core_disable(struct clk_core *core)
948
{
949 950
	lockdep_assert_held(&enable_lock);

951 952
	if (!core)
		return;
953

954
	if (WARN(core->enable_count == 0, "%s already disabled\n", core->name))
955
		return;
956

957 958
	if (WARN(core->enable_count == 1 && core->flags & CLK_IS_CRITICAL,
	    "Disabling critical %s\n", core->name))
959 960
		return;

961 962
	if (--core->enable_count > 0)
		return;
963

964
	trace_clk_disable_rcuidle(core);
965

966 967
	if (core->ops->disable)
		core->ops->disable(core->hw);
968

969
	trace_clk_disable_complete_rcuidle(core);
970

971
	clk_core_disable(core->parent);
972
}
J
James Hogan 已提交
973

974 975 976 977 978 979 980 981 982
static void clk_core_disable_lock(struct clk_core *core)
{
	unsigned long flags;

	flags = clk_enable_lock();
	clk_core_disable(core);
	clk_enable_unlock(flags);
}

983 984 985 986 987 988 989 990 991 992 993 994 995
/**
 * clk_disable - gate a clock
 * @clk: the clk being gated
 *
 * clk_disable must not sleep, which differentiates it from clk_unprepare.  In
 * a simple case, clk_disable can be used instead of clk_unprepare to gate a
 * clk if the operation is fast and will never sleep.  One example is a
 * SoC-internal clk which is controlled via simple register writes.  In the
 * complex case a clk gate operation may require a fast and a slow part.  It is
 * this reason that clk_unprepare and clk_disable are not mutually exclusive.
 * In fact clk_disable must be called before clk_unprepare.
 */
void clk_disable(struct clk *clk)
996
{
997 998 999
	if (IS_ERR_OR_NULL(clk))
		return;

1000
	clk_core_disable_lock(clk->core);
1001
}
1002
EXPORT_SYMBOL_GPL(clk_disable);
1003

1004
static int clk_core_enable(struct clk_core *core)
1005
{
1006
	int ret = 0;
1007

1008 1009
	lockdep_assert_held(&enable_lock);

1010 1011
	if (!core)
		return 0;
1012

1013 1014
	if (WARN(core->prepare_count == 0,
	    "Enabling unprepared %s\n", core->name))
1015
		return -ESHUTDOWN;
1016

1017 1018
	if (core->enable_count == 0) {
		ret = clk_core_enable(core->parent);
1019

1020 1021
		if (ret)
			return ret;
1022

1023
		trace_clk_enable_rcuidle(core);
1024

1025 1026
		if (core->ops->enable)
			ret = core->ops->enable(core->hw);
1027

1028
		trace_clk_enable_complete_rcuidle(core);
1029 1030 1031 1032 1033 1034 1035 1036 1037

		if (ret) {
			clk_core_disable(core->parent);
			return ret;
		}
	}

	core->enable_count++;
	return 0;
1038
}
1039

1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051
static int clk_core_enable_lock(struct clk_core *core)
{
	unsigned long flags;
	int ret;

	flags = clk_enable_lock();
	ret = clk_core_enable(core);
	clk_enable_unlock(flags);

	return ret;
}

1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063
/**
 * clk_gate_restore_context - restore context for poweroff
 * @hw: the clk_hw pointer of clock whose state is to be restored
 *
 * The clock gate restore context function enables or disables
 * the gate clocks based on the enable_count. This is done in cases
 * where the clock context is lost and based on the enable_count
 * the clock either needs to be enabled/disabled. This
 * helps restore the state of gate clocks.
 */
void clk_gate_restore_context(struct clk_hw *hw)
{
1064 1065 1066 1067
	struct clk_core *core = hw->core;

	if (core->enable_count)
		core->ops->enable(hw);
1068
	else
1069
		core->ops->disable(hw);
1070 1071 1072
}
EXPORT_SYMBOL_GPL(clk_gate_restore_context);

1073
static int clk_core_save_context(struct clk_core *core)
1074 1075 1076 1077
{
	struct clk_core *child;
	int ret = 0;

1078 1079
	hlist_for_each_entry(child, &core->children, child_node) {
		ret = clk_core_save_context(child);
1080 1081 1082 1083
		if (ret < 0)
			return ret;
	}

1084 1085
	if (core->ops && core->ops->save_context)
		ret = core->ops->save_context(core->hw);
1086 1087 1088 1089

	return ret;
}

1090
static void clk_core_restore_context(struct clk_core *core)
1091 1092 1093
{
	struct clk_core *child;

1094 1095
	if (core->ops && core->ops->restore_context)
		core->ops->restore_context(core->hw);
1096

1097 1098
	hlist_for_each_entry(child, &core->children, child_node)
		clk_core_restore_context(child);
1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113
}

/**
 * clk_save_context - save clock context for poweroff
 *
 * Saves the context of the clock register for powerstates in which the
 * contents of the registers will be lost. Occurs deep within the suspend
 * code.  Returns 0 on success.
 */
int clk_save_context(void)
{
	struct clk_core *clk;
	int ret;

	hlist_for_each_entry(clk, &clk_root_list, child_node) {
1114
		ret = clk_core_save_context(clk);
1115 1116 1117 1118 1119
		if (ret < 0)
			return ret;
	}

	hlist_for_each_entry(clk, &clk_orphan_list, child_node) {
1120
		ret = clk_core_save_context(clk);
1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136
		if (ret < 0)
			return ret;
	}

	return 0;
}
EXPORT_SYMBOL_GPL(clk_save_context);

/**
 * clk_restore_context - restore clock context after poweroff
 *
 * Restore the saved clock context upon resume.
 *
 */
void clk_restore_context(void)
{
1137
	struct clk_core *core;
1138

1139 1140
	hlist_for_each_entry(core, &clk_root_list, child_node)
		clk_core_restore_context(core);
1141

1142 1143
	hlist_for_each_entry(core, &clk_orphan_list, child_node)
		clk_core_restore_context(core);
1144 1145 1146
}
EXPORT_SYMBOL_GPL(clk_restore_context);

1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160
/**
 * clk_enable - ungate a clock
 * @clk: the clk being ungated
 *
 * clk_enable must not sleep, which differentiates it from clk_prepare.  In a
 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
 * if the operation will never sleep.  One example is a SoC-internal clk which
 * is controlled via simple register writes.  In the complex case a clk ungate
 * operation may require a fast and a slow part.  It is this reason that
 * clk_enable and clk_prepare are not mutually exclusive.  In fact clk_prepare
 * must be called before clk_enable.  Returns 0 on success, -EERROR
 * otherwise.
 */
int clk_enable(struct clk *clk)
1161
{
1162
	if (!clk)
1163 1164
		return 0;

1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179
	return clk_core_enable_lock(clk->core);
}
EXPORT_SYMBOL_GPL(clk_enable);

static int clk_core_prepare_enable(struct clk_core *core)
{
	int ret;

	ret = clk_core_prepare_lock(core);
	if (ret)
		return ret;

	ret = clk_core_enable_lock(core);
	if (ret)
		clk_core_unprepare_lock(core);
1180

1181
	return ret;
1182
}
1183 1184 1185 1186 1187 1188

static void clk_core_disable_unprepare(struct clk_core *core)
{
	clk_core_disable_lock(core);
	clk_core_unprepare_lock(core);
}
1189

1190
static void __init clk_unprepare_unused_subtree(struct clk_core *core)
1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204
{
	struct clk_core *child;

	lockdep_assert_held(&prepare_lock);

	hlist_for_each_entry(child, &core->children, child_node)
		clk_unprepare_unused_subtree(child);

	if (core->prepare_count)
		return;

	if (core->flags & CLK_IGNORE_UNUSED)
		return;

1205 1206 1207
	if (clk_pm_runtime_get(core))
		return;

1208 1209 1210 1211 1212 1213 1214 1215
	if (clk_core_is_prepared(core)) {
		trace_clk_unprepare(core);
		if (core->ops->unprepare_unused)
			core->ops->unprepare_unused(core->hw);
		else if (core->ops->unprepare)
			core->ops->unprepare(core->hw);
		trace_clk_unprepare_complete(core);
	}
1216 1217

	clk_pm_runtime_put(core);
1218 1219
}

1220
static void __init clk_disable_unused_subtree(struct clk_core *core)
1221 1222 1223 1224 1225 1226 1227 1228 1229
{
	struct clk_core *child;
	unsigned long flags;

	lockdep_assert_held(&prepare_lock);

	hlist_for_each_entry(child, &core->children, child_node)
		clk_disable_unused_subtree(child);

1230 1231 1232
	if (core->flags & CLK_OPS_PARENT_ENABLE)
		clk_core_prepare_enable(core->parent);

1233 1234 1235
	if (clk_pm_runtime_get(core))
		goto unprepare_out;

1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259
	flags = clk_enable_lock();

	if (core->enable_count)
		goto unlock_out;

	if (core->flags & CLK_IGNORE_UNUSED)
		goto unlock_out;

	/*
	 * some gate clocks have special needs during the disable-unused
	 * sequence.  call .disable_unused if available, otherwise fall
	 * back to .disable
	 */
	if (clk_core_is_enabled(core)) {
		trace_clk_disable(core);
		if (core->ops->disable_unused)
			core->ops->disable_unused(core->hw);
		else if (core->ops->disable)
			core->ops->disable(core->hw);
		trace_clk_disable_complete(core);
	}

unlock_out:
	clk_enable_unlock(flags);
1260 1261
	clk_pm_runtime_put(core);
unprepare_out:
1262 1263
	if (core->flags & CLK_OPS_PARENT_ENABLE)
		clk_core_disable_unprepare(core->parent);
1264 1265
}

1266
static bool clk_ignore_unused __initdata;
1267 1268 1269 1270 1271 1272 1273
static int __init clk_ignore_unused_setup(char *__unused)
{
	clk_ignore_unused = true;
	return 1;
}
__setup("clk_ignore_unused", clk_ignore_unused_setup);

1274
static int __init clk_disable_unused(void)
1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302
{
	struct clk_core *core;

	if (clk_ignore_unused) {
		pr_warn("clk: Not disabling unused clocks\n");
		return 0;
	}

	clk_prepare_lock();

	hlist_for_each_entry(core, &clk_root_list, child_node)
		clk_disable_unused_subtree(core);

	hlist_for_each_entry(core, &clk_orphan_list, child_node)
		clk_disable_unused_subtree(core);

	hlist_for_each_entry(core, &clk_root_list, child_node)
		clk_unprepare_unused_subtree(core);

	hlist_for_each_entry(core, &clk_orphan_list, child_node)
		clk_unprepare_unused_subtree(core);

	clk_prepare_unlock();

	return 0;
}
late_initcall_sync(clk_disable_unused);

1303 1304
static int clk_core_determine_round_nolock(struct clk_core *core,
					   struct clk_rate_request *req)
1305
{
1306
	long rate;
1307 1308

	lockdep_assert_held(&prepare_lock);
1309

1310
	if (!core)
1311
		return 0;
1312

J
Jerome Brunet 已提交
1313 1314 1315 1316 1317 1318
	/*
	 * At this point, core protection will be disabled if
	 * - if the provider is not protected at all
	 * - if the calling consumer is the only one which has exclusivity
	 *   over the provider
	 */
1319 1320 1321
	if (clk_core_rate_is_protected(core)) {
		req->rate = core->rate;
	} else if (core->ops->determine_rate) {
1322 1323 1324 1325 1326 1327 1328 1329 1330
		return core->ops->determine_rate(core->hw, req);
	} else if (core->ops->round_rate) {
		rate = core->ops->round_rate(core->hw, req->rate,
					     &req->best_parent_rate);
		if (rate < 0)
			return rate;

		req->rate = rate;
	} else {
1331
		return -EINVAL;
1332 1333 1334
	}

	return 0;
1335 1336
}

1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351
static void clk_core_init_rate_req(struct clk_core * const core,
				   struct clk_rate_request *req)
{
	struct clk_core *parent;

	if (WARN_ON(!core || !req))
		return;

	parent = core->parent;
	if (parent) {
		req->best_parent_hw = parent->hw;
		req->best_parent_rate = parent->rate;
	} else {
		req->best_parent_hw = NULL;
		req->best_parent_rate = 0;
1352
	}
1353
}
1354

1355 1356
static bool clk_core_can_round(struct clk_core * const core)
{
1357
	return core->ops->determine_rate || core->ops->round_rate;
1358 1359 1360 1361 1362 1363 1364
}

static int clk_core_round_rate_nolock(struct clk_core *core,
				      struct clk_rate_request *req)
{
	lockdep_assert_held(&prepare_lock);

1365 1366
	if (!core) {
		req->rate = 0;
1367
		return 0;
1368
	}
1369

1370 1371 1372 1373 1374 1375 1376 1377
	clk_core_init_rate_req(core, req);

	if (clk_core_can_round(core))
		return clk_core_determine_round_nolock(core, req);
	else if (core->flags & CLK_SET_RATE_PARENT)
		return clk_core_round_rate_nolock(core->parent, req);

	req->rate = core->rate;
1378
	return 0;
1379 1380
}

1381 1382 1383
/**
 * __clk_determine_rate - get the closest rate actually supported by a clock
 * @hw: determine the rate of this clock
1384
 * @req: target rate request
1385
 *
1386
 * Useful for clk_ops such as .set_rate and .determine_rate.
1387
 */
1388
int __clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
1389
{
1390 1391
	if (!hw) {
		req->rate = 0;
1392
		return 0;
1393
	}
1394

1395
	return clk_core_round_rate_nolock(hw->core, req);
1396
}
1397
EXPORT_SYMBOL_GPL(__clk_determine_rate);
1398

1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414
unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate)
{
	int ret;
	struct clk_rate_request req;

	clk_core_get_boundaries(hw->core, &req.min_rate, &req.max_rate);
	req.rate = rate;

	ret = clk_core_round_rate_nolock(hw->core, &req);
	if (ret)
		return 0;

	return req.rate;
}
EXPORT_SYMBOL_GPL(clk_hw_round_rate);

1415 1416 1417 1418 1419 1420 1421 1422 1423 1424
/**
 * clk_round_rate - round the given rate for a clk
 * @clk: the clk for which we are rounding a rate
 * @rate: the rate which is to be rounded
 *
 * Takes in a rate as input and rounds it to a rate that the clk can actually
 * use which is then returned.  If clk doesn't support round_rate operation
 * then the parent rate is returned.
 */
long clk_round_rate(struct clk *clk, unsigned long rate)
1425
{
S
Stephen Boyd 已提交
1426 1427
	struct clk_rate_request req;
	int ret;
1428

1429
	if (!clk)
1430
		return 0;
1431

1432
	clk_prepare_lock();
S
Stephen Boyd 已提交
1433

J
Jerome Brunet 已提交
1434 1435 1436
	if (clk->exclusive_count)
		clk_core_rate_unprotect(clk->core);

S
Stephen Boyd 已提交
1437 1438 1439 1440
	clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate);
	req.rate = rate;

	ret = clk_core_round_rate_nolock(clk->core, &req);
J
Jerome Brunet 已提交
1441 1442 1443 1444

	if (clk->exclusive_count)
		clk_core_rate_protect(clk->core);

1445 1446
	clk_prepare_unlock();

S
Stephen Boyd 已提交
1447 1448 1449 1450
	if (ret)
		return ret;

	return req.rate;
1451
}
1452
EXPORT_SYMBOL_GPL(clk_round_rate);
1453

1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469
/**
 * __clk_notify - call clk notifier chain
 * @core: clk that is changing rate
 * @msg: clk notifier type (see include/linux/clk.h)
 * @old_rate: old clk rate
 * @new_rate: new clk rate
 *
 * Triggers a notifier call chain on the clk rate-change notification
 * for 'clk'.  Passes a pointer to the struct clk and the previous
 * and current rates to the notifier callback.  Intended to be called by
 * internal clock code only.  Returns NOTIFY_DONE from the last driver
 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
 * a driver returns that.
 */
static int __clk_notify(struct clk_core *core, unsigned long msg,
		unsigned long old_rate, unsigned long new_rate)
1470
{
1471 1472 1473
	struct clk_notifier *cn;
	struct clk_notifier_data cnd;
	int ret = NOTIFY_DONE;
1474

1475 1476
	cnd.old_rate = old_rate;
	cnd.new_rate = new_rate;
1477

1478 1479 1480 1481 1482
	list_for_each_entry(cn, &clk_notifier_list, node) {
		if (cn->clk->core == core) {
			cnd.clk = cn->clk;
			ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
					&cnd);
1483 1484
			if (ret & NOTIFY_STOP_MASK)
				return ret;
1485
		}
1486 1487
	}

1488
	return ret;
1489 1490
}

1491 1492 1493 1494 1495 1496
/**
 * __clk_recalc_accuracies
 * @core: first clk in the subtree
 *
 * Walks the subtree of clks starting with clk and recalculates accuracies as
 * it goes.  Note that if a clk does not implement the .recalc_accuracy
1497
 * callback then it is assumed that the clock will take on the accuracy of its
1498 1499 1500
 * parent.
 */
static void __clk_recalc_accuracies(struct clk_core *core)
1501
{
1502 1503
	unsigned long parent_accuracy = 0;
	struct clk_core *child;
1504

1505
	lockdep_assert_held(&prepare_lock);
1506

1507 1508
	if (core->parent)
		parent_accuracy = core->parent->accuracy;
1509

1510 1511 1512 1513 1514
	if (core->ops->recalc_accuracy)
		core->accuracy = core->ops->recalc_accuracy(core->hw,
							  parent_accuracy);
	else
		core->accuracy = parent_accuracy;
1515

1516 1517
	hlist_for_each_entry(child, &core->children, child_node)
		__clk_recalc_accuracies(child);
1518 1519
}

1520
static long clk_core_get_accuracy(struct clk_core *core)
1521
{
1522
	unsigned long accuracy;
1523

1524 1525 1526
	clk_prepare_lock();
	if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE))
		__clk_recalc_accuracies(core);
1527

1528 1529
	accuracy = __clk_get_accuracy(core);
	clk_prepare_unlock();
1530

1531
	return accuracy;
1532
}
1533

1534 1535 1536 1537 1538 1539 1540 1541 1542 1543
/**
 * clk_get_accuracy - return the accuracy of clk
 * @clk: the clk whose accuracy is being returned
 *
 * Simply returns the cached accuracy of the clk, unless
 * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be
 * issued.
 * If clk is NULL then returns 0.
 */
long clk_get_accuracy(struct clk *clk)
1544
{
1545 1546
	if (!clk)
		return 0;
1547

1548
	return clk_core_get_accuracy(clk->core);
1549
}
1550
EXPORT_SYMBOL_GPL(clk_get_accuracy);
1551

1552 1553
static unsigned long clk_recalc(struct clk_core *core,
				unsigned long parent_rate)
1554
{
1555 1556 1557 1558 1559 1560 1561
	unsigned long rate = parent_rate;

	if (core->ops->recalc_rate && !clk_pm_runtime_get(core)) {
		rate = core->ops->recalc_rate(core->hw, parent_rate);
		clk_pm_runtime_put(core);
	}
	return rate;
1562 1563
}

1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574
/**
 * __clk_recalc_rates
 * @core: first clk in the subtree
 * @msg: notification type (see include/linux/clk.h)
 *
 * Walks the subtree of clks starting with clk and recalculates rates as it
 * goes.  Note that if a clk does not implement the .recalc_rate callback then
 * it is assumed that the clock will take on the rate of its parent.
 *
 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
 * if necessary.
1575
 */
1576
static void __clk_recalc_rates(struct clk_core *core, unsigned long msg)
1577
{
1578 1579 1580
	unsigned long old_rate;
	unsigned long parent_rate = 0;
	struct clk_core *child;
1581

1582
	lockdep_assert_held(&prepare_lock);
1583

1584
	old_rate = core->rate;
1585

1586 1587
	if (core->parent)
		parent_rate = core->parent->rate;
1588

1589
	core->rate = clk_recalc(core, parent_rate);
1590

1591 1592 1593 1594 1595 1596
	/*
	 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
	 * & ABORT_RATE_CHANGE notifiers
	 */
	if (core->notifier_count && msg)
		__clk_notify(core, msg, old_rate, core->rate);
1597

1598 1599 1600
	hlist_for_each_entry(child, &core->children, child_node)
		__clk_recalc_rates(child, msg);
}
1601

1602 1603 1604
static unsigned long clk_core_get_rate(struct clk_core *core)
{
	unsigned long rate;
1605

1606
	clk_prepare_lock();
1607

1608 1609 1610 1611 1612 1613 1614
	if (core && (core->flags & CLK_GET_RATE_NOCACHE))
		__clk_recalc_rates(core, 0);

	rate = clk_core_get_rate_nolock(core);
	clk_prepare_unlock();

	return rate;
1615 1616 1617
}

/**
1618 1619
 * clk_get_rate - return the rate of clk
 * @clk: the clk whose rate is being returned
1620
 *
1621 1622 1623
 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
 * is set, which means a recalc_rate will be issued.
 * If clk is NULL then returns 0.
1624
 */
1625
unsigned long clk_get_rate(struct clk *clk)
1626
{
1627 1628
	if (!clk)
		return 0;
1629

1630
	return clk_core_get_rate(clk->core);
1631
}
1632
EXPORT_SYMBOL_GPL(clk_get_rate);
1633

1634 1635
static int clk_fetch_parent_index(struct clk_core *core,
				  struct clk_core *parent)
1636
{
1637
	int i;
1638

1639 1640 1641
	if (!parent)
		return -EINVAL;

1642
	for (i = 0; i < core->num_parents; i++) {
1643
		/* Found it first try! */
1644
		if (core->parents[i].core == parent)
1645
			return i;
1646

1647
		/* Something else is here, so keep looking */
1648
		if (core->parents[i].core)
1649 1650
			continue;

1651 1652 1653 1654 1655 1656 1657
		/* Maybe core hasn't been cached but the hw is all we know? */
		if (core->parents[i].hw) {
			if (core->parents[i].hw == parent->hw)
				break;

			/* Didn't match, but we're expecting a clk_hw */
			continue;
1658
		}
1659 1660 1661 1662 1663 1664

		/* Maybe it hasn't been cached (clk_set_parent() path) */
		if (parent == clk_core_get(core, i))
			break;

		/* Fallback to comparing globally unique names */
1665 1666
		if (core->parents[i].name &&
		    !strcmp(parent->name, core->parents[i].name))
1667
			break;
1668 1669
	}

1670 1671 1672 1673 1674
	if (i == core->num_parents)
		return -EINVAL;

	core->parents[i].core = parent;
	return i;
1675 1676
}

1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694
/**
 * clk_hw_get_parent_index - return the index of the parent clock
 * @hw: clk_hw associated with the clk being consumed
 *
 * Fetches and returns the index of parent clock. Returns -EINVAL if the given
 * clock does not have a current parent.
 */
int clk_hw_get_parent_index(struct clk_hw *hw)
{
	struct clk_hw *parent = clk_hw_get_parent(hw);

	if (WARN_ON(parent == NULL))
		return -EINVAL;

	return clk_fetch_parent_index(hw->core, parent->core);
}
EXPORT_SYMBOL_GPL(clk_hw_get_parent_index);

1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707
/*
 * Update the orphan status of @core and all its children.
 */
static void clk_core_update_orphan_status(struct clk_core *core, bool is_orphan)
{
	struct clk_core *child;

	core->orphan = is_orphan;

	hlist_for_each_entry(child, &core->children, child_node)
		clk_core_update_orphan_status(child, is_orphan);
}

1708
static void clk_reparent(struct clk_core *core, struct clk_core *new_parent)
1709
{
1710 1711
	bool was_orphan = core->orphan;

1712
	hlist_del(&core->child_node);
1713

1714
	if (new_parent) {
1715 1716
		bool becomes_orphan = new_parent->orphan;

1717 1718 1719
		/* avoid duplicate POST_RATE_CHANGE notifications */
		if (new_parent->new_child == core)
			new_parent->new_child = NULL;
1720

1721
		hlist_add_head(&core->child_node, &new_parent->children);
1722 1723 1724

		if (was_orphan != becomes_orphan)
			clk_core_update_orphan_status(core, becomes_orphan);
1725 1726
	} else {
		hlist_add_head(&core->child_node, &clk_orphan_list);
1727 1728
		if (!was_orphan)
			clk_core_update_orphan_status(core, true);
1729
	}
1730

1731
	core->parent = new_parent;
1732 1733
}

1734 1735
static struct clk_core *__clk_set_parent_before(struct clk_core *core,
					   struct clk_core *parent)
1736 1737
{
	unsigned long flags;
1738
	struct clk_core *old_parent = core->parent;
1739

1740
	/*
1741 1742 1743
	 * 1. enable parents for CLK_OPS_PARENT_ENABLE clock
	 *
	 * 2. Migrate prepare state between parents and prevent race with
1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758
	 * clk_enable().
	 *
	 * If the clock is not prepared, then a race with
	 * clk_enable/disable() is impossible since we already have the
	 * prepare lock (future calls to clk_enable() need to be preceded by
	 * a clk_prepare()).
	 *
	 * If the clock is prepared, migrate the prepared state to the new
	 * parent and also protect against a race with clk_enable() by
	 * forcing the clock and the new parent on.  This ensures that all
	 * future calls to clk_enable() are practically NOPs with respect to
	 * hardware and software states.
	 *
	 * See also: Comment for clk_set_parent() below.
	 */
1759 1760 1761 1762 1763 1764 1765 1766

	/* enable old_parent & parent if CLK_OPS_PARENT_ENABLE is set */
	if (core->flags & CLK_OPS_PARENT_ENABLE) {
		clk_core_prepare_enable(old_parent);
		clk_core_prepare_enable(parent);
	}

	/* migrate prepare count if > 0 */
1767
	if (core->prepare_count) {
1768 1769
		clk_core_prepare_enable(parent);
		clk_core_enable_lock(core);
1770
	}
1771

1772
	/* update the clk tree topology */
1773
	flags = clk_enable_lock();
1774
	clk_reparent(core, parent);
1775
	clk_enable_unlock(flags);
1776 1777

	return old_parent;
1778 1779
}

1780 1781 1782
static void __clk_set_parent_after(struct clk_core *core,
				   struct clk_core *parent,
				   struct clk_core *old_parent)
1783
{
1784 1785 1786 1787 1788
	/*
	 * Finish the migration of prepare state and undo the changes done
	 * for preventing a race with clk_enable().
	 */
	if (core->prepare_count) {
1789 1790 1791 1792 1793 1794 1795 1796
		clk_core_disable_lock(core);
		clk_core_disable_unprepare(old_parent);
	}

	/* re-balance ref counting if CLK_OPS_PARENT_ENABLE is set */
	if (core->flags & CLK_OPS_PARENT_ENABLE) {
		clk_core_disable_unprepare(parent);
		clk_core_disable_unprepare(old_parent);
1797 1798
	}
}
1799

1800 1801 1802 1803 1804 1805
static int __clk_set_parent(struct clk_core *core, struct clk_core *parent,
			    u8 p_index)
{
	unsigned long flags;
	int ret = 0;
	struct clk_core *old_parent;
1806

1807
	old_parent = __clk_set_parent_before(core, parent);
1808

1809
	trace_clk_set_parent(core, parent);
1810

1811 1812 1813
	/* change clock input source */
	if (parent && core->ops->set_parent)
		ret = core->ops->set_parent(core->hw, p_index);
1814

1815
	trace_clk_set_parent_complete(core, parent);
1816

1817 1818 1819 1820
	if (ret) {
		flags = clk_enable_lock();
		clk_reparent(core, old_parent);
		clk_enable_unlock(flags);
1821
		__clk_set_parent_after(core, old_parent, parent);
1822

1823
		return ret;
1824 1825
	}

1826 1827
	__clk_set_parent_after(core, parent, old_parent);

1828 1829 1830 1831
	return 0;
}

/**
1832 1833 1834
 * __clk_speculate_rates
 * @core: first clk in the subtree
 * @parent_rate: the "future" rate of clk's parent
1835
 *
1836 1837 1838 1839 1840 1841 1842 1843
 * Walks the subtree of clks starting with clk, speculating rates as it
 * goes and firing off PRE_RATE_CHANGE notifications as necessary.
 *
 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
 * pre-rate change notifications and returns early if no clks in the
 * subtree have subscribed to the notifications.  Note that if a clk does not
 * implement the .recalc_rate callback then it is assumed that the clock will
 * take on the rate of its parent.
1844
 */
1845 1846
static int __clk_speculate_rates(struct clk_core *core,
				 unsigned long parent_rate)
1847
{
1848 1849 1850
	struct clk_core *child;
	unsigned long new_rate;
	int ret = NOTIFY_DONE;
1851

1852
	lockdep_assert_held(&prepare_lock);
1853

1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870
	new_rate = clk_recalc(core, parent_rate);

	/* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */
	if (core->notifier_count)
		ret = __clk_notify(core, PRE_RATE_CHANGE, core->rate, new_rate);

	if (ret & NOTIFY_STOP_MASK) {
		pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n",
				__func__, core->name, ret);
		goto out;
	}

	hlist_for_each_entry(child, &core->children, child_node) {
		ret = __clk_speculate_rates(child, new_rate);
		if (ret & NOTIFY_STOP_MASK)
			break;
	}
1871

1872
out:
1873 1874 1875
	return ret;
}

1876 1877
static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate,
			     struct clk_core *new_parent, u8 p_index)
1878
{
1879
	struct clk_core *child;
1880

1881 1882 1883 1884 1885 1886 1887
	core->new_rate = new_rate;
	core->new_parent = new_parent;
	core->new_parent_index = p_index;
	/* include clk in new parent's PRE_RATE_CHANGE notifications */
	core->new_child = NULL;
	if (new_parent && new_parent != core->parent)
		new_parent->new_child = core;
1888

1889 1890 1891 1892 1893
	hlist_for_each_entry(child, &core->children, child_node) {
		child->new_rate = clk_recalc(child, new_rate);
		clk_calc_subtree(child, child->new_rate, NULL, 0);
	}
}
1894

1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916
/*
 * calculate the new rates returning the topmost clock that has to be
 * changed.
 */
static struct clk_core *clk_calc_new_rates(struct clk_core *core,
					   unsigned long rate)
{
	struct clk_core *top = core;
	struct clk_core *old_parent, *parent;
	unsigned long best_parent_rate = 0;
	unsigned long new_rate;
	unsigned long min_rate;
	unsigned long max_rate;
	int p_index = 0;
	long ret;

	/* sanity */
	if (IS_ERR_OR_NULL(core))
		return NULL;

	/* save parent rate, if it exists */
	parent = old_parent = core->parent;
1917
	if (parent)
1918
		best_parent_rate = parent->rate;
1919

1920 1921 1922
	clk_core_get_boundaries(core, &min_rate, &max_rate);

	/* find the closest rate and parent clk/rate */
1923
	if (clk_core_can_round(core)) {
1924 1925 1926 1927 1928 1929
		struct clk_rate_request req;

		req.rate = rate;
		req.min_rate = min_rate;
		req.max_rate = max_rate;

1930 1931 1932
		clk_core_init_rate_req(core, &req);

		ret = clk_core_determine_round_nolock(core, &req);
1933 1934
		if (ret < 0)
			return NULL;
1935

1936 1937 1938
		best_parent_rate = req.best_parent_rate;
		new_rate = req.rate;
		parent = req.best_parent_hw ? req.best_parent_hw->core : NULL;
1939

1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951
		if (new_rate < min_rate || new_rate > max_rate)
			return NULL;
	} else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) {
		/* pass-through clock without adjustable parent */
		core->new_rate = core->rate;
		return NULL;
	} else {
		/* pass-through clock with adjustable parent */
		top = clk_calc_new_rates(parent, rate);
		new_rate = parent->new_rate;
		goto out;
	}
1952

1953 1954 1955 1956 1957 1958 1959
	/* some clocks must be gated to change parent */
	if (parent != old_parent &&
	    (core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) {
		pr_debug("%s: %s not gated but wants to reparent\n",
			 __func__, core->name);
		return NULL;
	}
1960

1961 1962 1963 1964 1965 1966 1967 1968 1969
	/* try finding the new parent index */
	if (parent && core->num_parents > 1) {
		p_index = clk_fetch_parent_index(core, parent);
		if (p_index < 0) {
			pr_debug("%s: clk %s can not be parent of clk %s\n",
				 __func__, parent->name, core->name);
			return NULL;
		}
	}
1970

1971 1972 1973
	if ((core->flags & CLK_SET_RATE_PARENT) && parent &&
	    best_parent_rate != parent->rate)
		top = clk_calc_new_rates(parent, best_parent_rate);
1974

1975 1976
out:
	clk_calc_subtree(core, new_rate, parent, p_index);
1977

1978
	return top;
1979 1980
}

1981 1982 1983 1984
/*
 * Notify about rate changes in a subtree. Always walk down the whole tree
 * so that in case of an error we can walk down the whole tree again and
 * abort the change.
1985
 */
1986 1987
static struct clk_core *clk_propagate_rate_change(struct clk_core *core,
						  unsigned long event)
1988
{
1989
	struct clk_core *child, *tmp_clk, *fail_clk = NULL;
1990 1991
	int ret = NOTIFY_DONE;

1992 1993
	if (core->rate == core->new_rate)
		return NULL;
1994

1995 1996 1997 1998
	if (core->notifier_count) {
		ret = __clk_notify(core, event, core->rate, core->new_rate);
		if (ret & NOTIFY_STOP_MASK)
			fail_clk = core;
1999 2000
	}

2001 2002 2003 2004 2005 2006 2007 2008
	hlist_for_each_entry(child, &core->children, child_node) {
		/* Skip children who will be reparented to another clock */
		if (child->new_parent && child->new_parent != core)
			continue;
		tmp_clk = clk_propagate_rate_change(child, event);
		if (tmp_clk)
			fail_clk = tmp_clk;
	}
2009

2010 2011 2012 2013 2014 2015
	/* handle the new child who might not be in core->children yet */
	if (core->new_child) {
		tmp_clk = clk_propagate_rate_change(core->new_child, event);
		if (tmp_clk)
			fail_clk = tmp_clk;
	}
2016

2017
	return fail_clk;
2018 2019
}

2020 2021 2022 2023 2024
/*
 * walk down a subtree and set the new rates notifying the rate
 * change on the way
 */
static void clk_change_rate(struct clk_core *core)
2025
{
2026 2027 2028 2029 2030 2031
	struct clk_core *child;
	struct hlist_node *tmp;
	unsigned long old_rate;
	unsigned long best_parent_rate = 0;
	bool skip_set_rate = false;
	struct clk_core *old_parent;
2032
	struct clk_core *parent = NULL;
2033

2034
	old_rate = core->rate;
2035

2036 2037
	if (core->new_parent) {
		parent = core->new_parent;
2038
		best_parent_rate = core->new_parent->rate;
2039 2040
	} else if (core->parent) {
		parent = core->parent;
2041
		best_parent_rate = core->parent->rate;
2042
	}
2043

2044 2045 2046
	if (clk_pm_runtime_get(core))
		return;

2047 2048 2049 2050 2051 2052 2053 2054 2055
	if (core->flags & CLK_SET_RATE_UNGATE) {
		unsigned long flags;

		clk_core_prepare(core);
		flags = clk_enable_lock();
		clk_core_enable(core);
		clk_enable_unlock(flags);
	}

2056 2057 2058
	if (core->new_parent && core->new_parent != core->parent) {
		old_parent = __clk_set_parent_before(core, core->new_parent);
		trace_clk_set_parent(core, core->new_parent);
2059

2060 2061 2062 2063 2064 2065 2066 2067
		if (core->ops->set_rate_and_parent) {
			skip_set_rate = true;
			core->ops->set_rate_and_parent(core->hw, core->new_rate,
					best_parent_rate,
					core->new_parent_index);
		} else if (core->ops->set_parent) {
			core->ops->set_parent(core->hw, core->new_parent_index);
		}
2068

2069 2070 2071
		trace_clk_set_parent_complete(core, core->new_parent);
		__clk_set_parent_after(core, core->new_parent, old_parent);
	}
2072

2073 2074 2075
	if (core->flags & CLK_OPS_PARENT_ENABLE)
		clk_core_prepare_enable(parent);

2076
	trace_clk_set_rate(core, core->new_rate);
2077

2078 2079
	if (!skip_set_rate && core->ops->set_rate)
		core->ops->set_rate(core->hw, core->new_rate, best_parent_rate);
2080

2081
	trace_clk_set_rate_complete(core, core->new_rate);
2082

2083
	core->rate = clk_recalc(core, best_parent_rate);
2084

2085 2086 2087 2088 2089 2090 2091 2092 2093
	if (core->flags & CLK_SET_RATE_UNGATE) {
		unsigned long flags;

		flags = clk_enable_lock();
		clk_core_disable(core);
		clk_enable_unlock(flags);
		clk_core_unprepare(core);
	}

2094 2095 2096
	if (core->flags & CLK_OPS_PARENT_ENABLE)
		clk_core_disable_unprepare(parent);

2097 2098
	if (core->notifier_count && old_rate != core->rate)
		__clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate);
2099

2100 2101
	if (core->flags & CLK_RECALC_NEW_RATES)
		(void)clk_calc_new_rates(core, core->new_rate);
2102

2103
	/*
2104 2105
	 * Use safe iteration, as change_rate can actually swap parents
	 * for certain clock types.
2106
	 */
2107 2108 2109 2110 2111 2112
	hlist_for_each_entry_safe(child, tmp, &core->children, child_node) {
		/* Skip children who will be reparented to another clock */
		if (child->new_parent && child->new_parent != core)
			continue;
		clk_change_rate(child);
	}
2113

2114 2115 2116
	/* handle the new child who might not be in core->children yet */
	if (core->new_child)
		clk_change_rate(core->new_child);
2117 2118

	clk_pm_runtime_put(core);
2119 2120
}

2121 2122 2123
static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core,
						     unsigned long req_rate)
{
2124
	int ret, cnt;
2125 2126 2127 2128 2129 2130 2131
	struct clk_rate_request req;

	lockdep_assert_held(&prepare_lock);

	if (!core)
		return 0;

2132 2133 2134 2135 2136
	/* simulate what the rate would be if it could be freely set */
	cnt = clk_core_rate_nuke_protect(core);
	if (cnt < 0)
		return cnt;

2137 2138 2139 2140 2141
	clk_core_get_boundaries(core, &req.min_rate, &req.max_rate);
	req.rate = req_rate;

	ret = clk_core_round_rate_nolock(core, &req);

2142 2143 2144
	/* restore the protection */
	clk_core_rate_restore_protect(core, cnt);

2145
	return ret ? 0 : req.rate;
2146 2147
}

2148 2149
static int clk_core_set_rate_nolock(struct clk_core *core,
				    unsigned long req_rate)
2150
{
2151
	struct clk_core *top, *fail_clk;
2152
	unsigned long rate;
2153
	int ret = 0;
2154

2155 2156
	if (!core)
		return 0;
2157

2158 2159
	rate = clk_core_req_round_rate_nolock(core, req_rate);

2160 2161 2162
	/* bail early if nothing to do */
	if (rate == clk_core_get_rate_nolock(core))
		return 0;
2163

2164 2165 2166 2167
	/* fail on a direct rate set of a protected provider */
	if (clk_core_rate_is_protected(core))
		return -EBUSY;

2168
	/* calculate new rates and get the topmost changed clock */
2169
	top = clk_calc_new_rates(core, req_rate);
2170 2171 2172
	if (!top)
		return -EINVAL;

2173 2174 2175 2176
	ret = clk_pm_runtime_get(core);
	if (ret)
		return ret;

2177 2178 2179 2180 2181 2182
	/* notify that we are about to change rates */
	fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
	if (fail_clk) {
		pr_debug("%s: failed to set %s rate\n", __func__,
				fail_clk->name);
		clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
2183 2184
		ret = -EBUSY;
		goto err;
2185 2186 2187 2188 2189 2190
	}

	/* change the rates */
	clk_change_rate(top);

	core->req_rate = req_rate;
2191 2192
err:
	clk_pm_runtime_put(core);
2193

2194
	return ret;
2195
}
2196 2197

/**
2198 2199 2200
 * clk_set_rate - specify a new rate for clk
 * @clk: the clk whose rate is being changed
 * @rate: the new rate for clk
2201
 *
2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216
 * In the simplest case clk_set_rate will only adjust the rate of clk.
 *
 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
 * propagate up to clk's parent; whether or not this happens depends on the
 * outcome of clk's .round_rate implementation.  If *parent_rate is unchanged
 * after calling .round_rate then upstream parent propagation is ignored.  If
 * *parent_rate comes back with a new rate for clk's parent then we propagate
 * up to clk's parent and set its rate.  Upward propagation will continue
 * until either a clk does not support the CLK_SET_RATE_PARENT flag or
 * .round_rate stops requesting changes to clk's parent_rate.
 *
 * Rate changes are accomplished via tree traversal that also recalculates the
 * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
 *
 * Returns 0 on success, -EERROR otherwise.
2217
 */
2218
int clk_set_rate(struct clk *clk, unsigned long rate)
2219
{
2220 2221
	int ret;

2222 2223 2224
	if (!clk)
		return 0;

2225 2226
	/* prevent racing with updates to the clock topology */
	clk_prepare_lock();
2227

J
Jerome Brunet 已提交
2228 2229 2230
	if (clk->exclusive_count)
		clk_core_rate_unprotect(clk->core);

2231
	ret = clk_core_set_rate_nolock(clk->core, rate);
2232

J
Jerome Brunet 已提交
2233 2234 2235
	if (clk->exclusive_count)
		clk_core_rate_protect(clk->core);

2236
	clk_prepare_unlock();
2237

2238
	return ret;
2239
}
2240
EXPORT_SYMBOL_GPL(clk_set_rate);
2241

J
Jerome Brunet 已提交
2242
/**
2243
 * clk_set_rate_exclusive - specify a new rate and get exclusive control
J
Jerome Brunet 已提交
2244 2245 2246 2247 2248 2249 2250
 * @clk: the clk whose rate is being changed
 * @rate: the new rate for clk
 *
 * This is a combination of clk_set_rate() and clk_rate_exclusive_get()
 * within a critical section
 *
 * This can be used initially to ensure that at least 1 consumer is
2251
 * satisfied when several consumers are competing for exclusivity over the
J
Jerome Brunet 已提交
2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288
 * same clock provider.
 *
 * The exclusivity is not applied if setting the rate failed.
 *
 * Calls to clk_rate_exclusive_get() should be balanced with calls to
 * clk_rate_exclusive_put().
 *
 * Returns 0 on success, -EERROR otherwise.
 */
int clk_set_rate_exclusive(struct clk *clk, unsigned long rate)
{
	int ret;

	if (!clk)
		return 0;

	/* prevent racing with updates to the clock topology */
	clk_prepare_lock();

	/*
	 * The temporary protection removal is not here, on purpose
	 * This function is meant to be used instead of clk_rate_protect,
	 * so before the consumer code path protect the clock provider
	 */

	ret = clk_core_set_rate_nolock(clk->core, rate);
	if (!ret) {
		clk_core_rate_protect(clk->core);
		clk->exclusive_count++;
	}

	clk_prepare_unlock();

	return ret;
}
EXPORT_SYMBOL_GPL(clk_set_rate_exclusive);

2289 2290 2291 2292 2293 2294 2295 2296 2297
/**
 * clk_set_rate_range - set a rate range for a clock source
 * @clk: clock source
 * @min: desired minimum clock rate in Hz, inclusive
 * @max: desired maximum clock rate in Hz, inclusive
 *
 * Returns success (0) or negative errno.
 */
int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
2298
{
2299
	int ret = 0;
2300
	unsigned long old_min, old_max, rate;
2301

2302 2303
	if (!clk)
		return 0;
2304

2305 2306 2307 2308 2309
	if (min > max) {
		pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n",
		       __func__, clk->core->name, clk->dev_id, clk->con_id,
		       min, max);
		return -EINVAL;
2310
	}
2311

2312
	clk_prepare_lock();
2313

J
Jerome Brunet 已提交
2314 2315 2316
	if (clk->exclusive_count)
		clk_core_rate_unprotect(clk->core);

2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348
	/* Save the current values in case we need to rollback the change */
	old_min = clk->min_rate;
	old_max = clk->max_rate;
	clk->min_rate = min;
	clk->max_rate = max;

	rate = clk_core_get_rate_nolock(clk->core);
	if (rate < min || rate > max) {
		/*
		 * FIXME:
		 * We are in bit of trouble here, current rate is outside the
		 * the requested range. We are going try to request appropriate
		 * range boundary but there is a catch. It may fail for the
		 * usual reason (clock broken, clock protected, etc) but also
		 * because:
		 * - round_rate() was not favorable and fell on the wrong
		 *   side of the boundary
		 * - the determine_rate() callback does not really check for
		 *   this corner case when determining the rate
		 */

		if (rate < min)
			rate = min;
		else
			rate = max;

		ret = clk_core_set_rate_nolock(clk->core, rate);
		if (ret) {
			/* rollback the changes */
			clk->min_rate = old_min;
			clk->max_rate = old_max;
		}
2349 2350
	}

J
Jerome Brunet 已提交
2351 2352 2353
	if (clk->exclusive_count)
		clk_core_rate_protect(clk->core);

2354
	clk_prepare_unlock();
2355

2356
	return ret;
S
Stephen Boyd 已提交
2357
}
2358
EXPORT_SYMBOL_GPL(clk_set_rate_range);
S
Stephen Boyd 已提交
2359

2360 2361 2362 2363 2364 2365 2366 2367
/**
 * clk_set_min_rate - set a minimum clock rate for a clock source
 * @clk: clock source
 * @rate: desired minimum clock rate in Hz, inclusive
 *
 * Returns success (0) or negative errno.
 */
int clk_set_min_rate(struct clk *clk, unsigned long rate)
S
Stephen Boyd 已提交
2368
{
2369 2370 2371 2372
	if (!clk)
		return 0;

	return clk_set_rate_range(clk, rate, clk->max_rate);
S
Stephen Boyd 已提交
2373
}
2374
EXPORT_SYMBOL_GPL(clk_set_min_rate);
S
Stephen Boyd 已提交
2375

2376 2377 2378 2379 2380 2381 2382 2383
/**
 * clk_set_max_rate - set a maximum clock rate for a clock source
 * @clk: clock source
 * @rate: desired maximum clock rate in Hz, inclusive
 *
 * Returns success (0) or negative errno.
 */
int clk_set_max_rate(struct clk *clk, unsigned long rate)
S
Stephen Boyd 已提交
2384
{
2385 2386
	if (!clk)
		return 0;
2387

2388
	return clk_set_rate_range(clk, clk->min_rate, rate);
2389
}
2390
EXPORT_SYMBOL_GPL(clk_set_max_rate);
2391

2392
/**
2393 2394
 * clk_get_parent - return the parent of a clk
 * @clk: the clk whose parent gets returned
2395
 *
2396
 * Simply returns clk->parent.  Returns NULL if clk is NULL.
2397
 */
2398
struct clk *clk_get_parent(struct clk *clk)
2399
{
2400
	struct clk *parent;
2401

S
Stephen Boyd 已提交
2402 2403 2404
	if (!clk)
		return NULL;

2405
	clk_prepare_lock();
S
Stephen Boyd 已提交
2406 2407
	/* TODO: Create a per-user clk and change callers to call clk_put */
	parent = !clk->core->parent ? NULL : clk->core->parent->hw->clk;
2408
	clk_prepare_unlock();
2409

2410 2411 2412
	return parent;
}
EXPORT_SYMBOL_GPL(clk_get_parent);
2413

2414 2415
static struct clk_core *__clk_init_parent(struct clk_core *core)
{
2416
	u8 index = 0;
2417

2418
	if (core->num_parents > 1 && core->ops->get_parent)
2419
		index = core->ops->get_parent(core->hw);
2420

2421
	return clk_core_get_parent_by_index(core, index);
2422 2423
}

2424 2425
static void clk_core_reparent(struct clk_core *core,
				  struct clk_core *new_parent)
2426
{
2427 2428 2429
	clk_reparent(core, new_parent);
	__clk_recalc_accuracies(core);
	__clk_recalc_rates(core, POST_RATE_CHANGE);
2430 2431
}

2432 2433 2434 2435 2436 2437 2438 2439
void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent)
{
	if (!hw)
		return;

	clk_core_reparent(hw->core, !new_parent ? NULL : new_parent->core);
}

2440 2441 2442 2443 2444 2445 2446 2447 2448
/**
 * clk_has_parent - check if a clock is a possible parent for another
 * @clk: clock source
 * @parent: parent clock source
 *
 * This function can be used in drivers that need to check that a clock can be
 * the parent of another without actually changing the parent.
 *
 * Returns true if @parent is a possible parent for @clk, false otherwise.
2449
 */
2450
bool clk_has_parent(struct clk *clk, struct clk *parent)
2451
{
2452
	struct clk_core *core, *parent_core;
2453
	int i;
2454

2455 2456 2457
	/* NULL clocks should be nops, so return success if either is NULL. */
	if (!clk || !parent)
		return true;
2458

2459 2460
	core = clk->core;
	parent_core = parent->core;
2461

2462 2463 2464
	/* Optimize for the case where the parent is already the parent. */
	if (core->parent == parent_core)
		return true;
2465

2466 2467 2468 2469 2470
	for (i = 0; i < core->num_parents; i++)
		if (!strcmp(core->parents[i].name, parent_core->name))
			return true;

	return false;
2471 2472
}
EXPORT_SYMBOL_GPL(clk_has_parent);
2473

2474 2475
static int clk_core_set_parent_nolock(struct clk_core *core,
				      struct clk_core *parent)
2476 2477 2478 2479 2480
{
	int ret = 0;
	int p_index = 0;
	unsigned long p_rate = 0;

2481 2482
	lockdep_assert_held(&prepare_lock);

2483 2484 2485 2486
	if (!core)
		return 0;

	if (core->parent == parent)
2487
		return 0;
2488

2489
	/* verify ops for multi-parent clks */
2490 2491
	if (core->num_parents > 1 && !core->ops->set_parent)
		return -EPERM;
2492

2493
	/* check that we are allowed to re-parent if the clock is in use */
2494 2495
	if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count)
		return -EBUSY;
2496

2497 2498
	if (clk_core_rate_is_protected(core))
		return -EBUSY;
2499

2500
	/* try finding the new parent index */
2501
	if (parent) {
2502
		p_index = clk_fetch_parent_index(core, parent);
2503
		if (p_index < 0) {
2504
			pr_debug("%s: clk %s can not be parent of clk %s\n",
2505
					__func__, parent->name, core->name);
2506
			return p_index;
2507
		}
2508
		p_rate = parent->rate;
2509 2510
	}

2511 2512
	ret = clk_pm_runtime_get(core);
	if (ret)
2513
		return ret;
2514

2515 2516
	/* propagate PRE_RATE_CHANGE notifications */
	ret = __clk_speculate_rates(core, p_rate);
2517

2518 2519
	/* abort if a driver objects */
	if (ret & NOTIFY_STOP_MASK)
2520
		goto runtime_put;
2521

2522 2523
	/* do the re-parent */
	ret = __clk_set_parent(core, parent, p_index);
2524

2525 2526 2527 2528 2529 2530
	/* propagate rate an accuracy recalculation accordingly */
	if (ret) {
		__clk_recalc_rates(core, ABORT_RATE_CHANGE);
	} else {
		__clk_recalc_rates(core, POST_RATE_CHANGE);
		__clk_recalc_accuracies(core);
2531 2532
	}

2533 2534
runtime_put:
	clk_pm_runtime_put(core);
2535

2536 2537
	return ret;
}
2538

2539 2540 2541 2542 2543 2544
int clk_hw_set_parent(struct clk_hw *hw, struct clk_hw *parent)
{
	return clk_core_set_parent_nolock(hw->core, parent->core);
}
EXPORT_SYMBOL_GPL(clk_hw_set_parent);

2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563
/**
 * clk_set_parent - switch the parent of a mux clk
 * @clk: the mux clk whose input we are switching
 * @parent: the new input to clk
 *
 * Re-parent clk to use parent as its new input source.  If clk is in
 * prepared state, the clk will get enabled for the duration of this call. If
 * that's not acceptable for a specific clk (Eg: the consumer can't handle
 * that, the reparenting is glitchy in hardware, etc), use the
 * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
 *
 * After successfully changing clk's parent clk_set_parent will update the
 * clk topology, sysfs topology and propagate rate recalculation via
 * __clk_recalc_rates.
 *
 * Returns 0 on success, -EERROR otherwise.
 */
int clk_set_parent(struct clk *clk, struct clk *parent)
{
2564 2565
	int ret;

2566 2567 2568
	if (!clk)
		return 0;

2569
	clk_prepare_lock();
J
Jerome Brunet 已提交
2570 2571 2572 2573

	if (clk->exclusive_count)
		clk_core_rate_unprotect(clk->core);

2574 2575
	ret = clk_core_set_parent_nolock(clk->core,
					 parent ? parent->core : NULL);
J
Jerome Brunet 已提交
2576 2577 2578 2579

	if (clk->exclusive_count)
		clk_core_rate_protect(clk->core);

2580 2581 2582
	clk_prepare_unlock();

	return ret;
2583
}
2584
EXPORT_SYMBOL_GPL(clk_set_parent);
2585

2586 2587 2588 2589 2590 2591 2592 2593 2594
static int clk_core_set_phase_nolock(struct clk_core *core, int degrees)
{
	int ret = -EINVAL;

	lockdep_assert_held(&prepare_lock);

	if (!core)
		return 0;

2595 2596 2597
	if (clk_core_rate_is_protected(core))
		return -EBUSY;

2598 2599
	trace_clk_set_phase(core, degrees);

2600
	if (core->ops->set_phase) {
2601
		ret = core->ops->set_phase(core->hw, degrees);
2602 2603 2604
		if (!ret)
			core->phase = degrees;
	}
2605 2606 2607 2608 2609 2610

	trace_clk_set_phase_complete(core, degrees);

	return ret;
}

2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629
/**
 * clk_set_phase - adjust the phase shift of a clock signal
 * @clk: clock signal source
 * @degrees: number of degrees the signal is shifted
 *
 * Shifts the phase of a clock signal by the specified
 * degrees. Returns 0 on success, -EERROR otherwise.
 *
 * This function makes no distinction about the input or reference
 * signal that we adjust the clock signal phase against. For example
 * phase locked-loop clock signal generators we may shift phase with
 * respect to feedback clock signal input, but for other cases the
 * clock phase may be shifted with respect to some other, unspecified
 * signal.
 *
 * Additionally the concept of phase shift does not propagate through
 * the clock tree hierarchy, which sets it apart from clock rates and
 * clock accuracy. A parent clock phase attribute does not have an
 * impact on the phase attribute of a child clock.
2630
 */
2631
int clk_set_phase(struct clk *clk, int degrees)
2632
{
2633
	int ret;
2634

2635 2636
	if (!clk)
		return 0;
2637

2638 2639 2640 2641
	/* sanity check degrees */
	degrees %= 360;
	if (degrees < 0)
		degrees += 360;
2642

2643
	clk_prepare_lock();
S
Stephen Boyd 已提交
2644

J
Jerome Brunet 已提交
2645 2646
	if (clk->exclusive_count)
		clk_core_rate_unprotect(clk->core);
S
Stephen Boyd 已提交
2647

2648
	ret = clk_core_set_phase_nolock(clk->core, degrees);
S
Stephen Boyd 已提交
2649

J
Jerome Brunet 已提交
2650 2651
	if (clk->exclusive_count)
		clk_core_rate_protect(clk->core);
2652

2653
	clk_prepare_unlock();
2654

2655 2656 2657
	return ret;
}
EXPORT_SYMBOL_GPL(clk_set_phase);
2658

2659 2660 2661
static int clk_core_get_phase(struct clk_core *core)
{
	int ret;
2662

2663
	clk_prepare_lock();
2664 2665 2666
	/* Always try to update cached phase if possible */
	if (core->ops->get_phase)
		core->phase = core->ops->get_phase(core->hw);
2667 2668
	ret = core->phase;
	clk_prepare_unlock();
2669

2670
	return ret;
2671 2672
}

2673 2674 2675 2676 2677 2678 2679 2680
/**
 * clk_get_phase - return the phase shift of a clock signal
 * @clk: clock signal source
 *
 * Returns the phase shift of a clock node in degrees, otherwise returns
 * -EERROR.
 */
int clk_get_phase(struct clk *clk)
2681
{
2682
	if (!clk)
2683 2684
		return 0;

2685 2686 2687
	return clk_core_get_phase(clk->core);
}
EXPORT_SYMBOL_GPL(clk_get_phase);
2688

J
Jerome Brunet 已提交
2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854
static void clk_core_reset_duty_cycle_nolock(struct clk_core *core)
{
	/* Assume a default value of 50% */
	core->duty.num = 1;
	core->duty.den = 2;
}

static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core);

static int clk_core_update_duty_cycle_nolock(struct clk_core *core)
{
	struct clk_duty *duty = &core->duty;
	int ret = 0;

	if (!core->ops->get_duty_cycle)
		return clk_core_update_duty_cycle_parent_nolock(core);

	ret = core->ops->get_duty_cycle(core->hw, duty);
	if (ret)
		goto reset;

	/* Don't trust the clock provider too much */
	if (duty->den == 0 || duty->num > duty->den) {
		ret = -EINVAL;
		goto reset;
	}

	return 0;

reset:
	clk_core_reset_duty_cycle_nolock(core);
	return ret;
}

static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core)
{
	int ret = 0;

	if (core->parent &&
	    core->flags & CLK_DUTY_CYCLE_PARENT) {
		ret = clk_core_update_duty_cycle_nolock(core->parent);
		memcpy(&core->duty, &core->parent->duty, sizeof(core->duty));
	} else {
		clk_core_reset_duty_cycle_nolock(core);
	}

	return ret;
}

static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core,
						 struct clk_duty *duty);

static int clk_core_set_duty_cycle_nolock(struct clk_core *core,
					  struct clk_duty *duty)
{
	int ret;

	lockdep_assert_held(&prepare_lock);

	if (clk_core_rate_is_protected(core))
		return -EBUSY;

	trace_clk_set_duty_cycle(core, duty);

	if (!core->ops->set_duty_cycle)
		return clk_core_set_duty_cycle_parent_nolock(core, duty);

	ret = core->ops->set_duty_cycle(core->hw, duty);
	if (!ret)
		memcpy(&core->duty, duty, sizeof(*duty));

	trace_clk_set_duty_cycle_complete(core, duty);

	return ret;
}

static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core,
						 struct clk_duty *duty)
{
	int ret = 0;

	if (core->parent &&
	    core->flags & (CLK_DUTY_CYCLE_PARENT | CLK_SET_RATE_PARENT)) {
		ret = clk_core_set_duty_cycle_nolock(core->parent, duty);
		memcpy(&core->duty, &core->parent->duty, sizeof(core->duty));
	}

	return ret;
}

/**
 * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal
 * @clk: clock signal source
 * @num: numerator of the duty cycle ratio to be applied
 * @den: denominator of the duty cycle ratio to be applied
 *
 * Apply the duty cycle ratio if the ratio is valid and the clock can
 * perform this operation
 *
 * Returns (0) on success, a negative errno otherwise.
 */
int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den)
{
	int ret;
	struct clk_duty duty;

	if (!clk)
		return 0;

	/* sanity check the ratio */
	if (den == 0 || num > den)
		return -EINVAL;

	duty.num = num;
	duty.den = den;

	clk_prepare_lock();

	if (clk->exclusive_count)
		clk_core_rate_unprotect(clk->core);

	ret = clk_core_set_duty_cycle_nolock(clk->core, &duty);

	if (clk->exclusive_count)
		clk_core_rate_protect(clk->core);

	clk_prepare_unlock();

	return ret;
}
EXPORT_SYMBOL_GPL(clk_set_duty_cycle);

static int clk_core_get_scaled_duty_cycle(struct clk_core *core,
					  unsigned int scale)
{
	struct clk_duty *duty = &core->duty;
	int ret;

	clk_prepare_lock();

	ret = clk_core_update_duty_cycle_nolock(core);
	if (!ret)
		ret = mult_frac(scale, duty->num, duty->den);

	clk_prepare_unlock();

	return ret;
}

/**
 * clk_get_scaled_duty_cycle - return the duty cycle ratio of a clock signal
 * @clk: clock signal source
 * @scale: scaling factor to be applied to represent the ratio as an integer
 *
 * Returns the duty cycle ratio of a clock node multiplied by the provided
 * scaling factor, or negative errno on error.
 */
int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale)
{
	if (!clk)
		return 0;

	return clk_core_get_scaled_duty_cycle(clk->core, scale);
}
EXPORT_SYMBOL_GPL(clk_get_scaled_duty_cycle);

2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870
/**
 * clk_is_match - check if two clk's point to the same hardware clock
 * @p: clk compared against q
 * @q: clk compared against p
 *
 * Returns true if the two struct clk pointers both point to the same hardware
 * clock node. Put differently, returns true if struct clk *p and struct clk *q
 * share the same struct clk_core object.
 *
 * Returns false otherwise. Note that two NULL clks are treated as matching.
 */
bool clk_is_match(const struct clk *p, const struct clk *q)
{
	/* trivial case: identical struct clk's or both NULL */
	if (p == q)
		return true;
2871

2872
	/* true if clk->core pointers match. Avoid dereferencing garbage */
2873 2874 2875
	if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q))
		if (p->core == q->core)
			return true;
2876

2877 2878 2879
	return false;
}
EXPORT_SYMBOL_GPL(clk_is_match);
2880

2881
/***        debugfs support        ***/
2882

2883 2884
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
2885

2886 2887 2888 2889
static struct dentry *rootdir;
static int inited = 0;
static DEFINE_MUTEX(clk_debug_lock);
static HLIST_HEAD(clk_debug_list);
2890

2891 2892 2893 2894 2895 2896 2897
static struct hlist_head *orphan_list[] = {
	&clk_orphan_list,
	NULL,
};

static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
				 int level)
2898
{
J
Jerome Brunet 已提交
2899
	seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu %5d %6d\n",
2900 2901
		   level * 3 + 1, "",
		   30 - level * 3, c->name,
2902 2903
		   c->enable_count, c->prepare_count, c->protect_count,
		   clk_core_get_rate(c), clk_core_get_accuracy(c),
J
Jerome Brunet 已提交
2904 2905
		   clk_core_get_phase(c),
		   clk_core_get_scaled_duty_cycle(c, 100000));
2906
}
2907

2908 2909 2910 2911
static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
				     int level)
{
	struct clk_core *child;
2912

2913
	clk_summary_show_one(s, c, level);
2914

2915 2916
	hlist_for_each_entry(child, &c->children, child_node)
		clk_summary_show_subtree(s, child, level + 1);
2917
}
2918

2919
static int clk_summary_show(struct seq_file *s, void *data)
2920
{
2921 2922
	struct clk_core *c;
	struct hlist_head **lists = (struct hlist_head **)s->private;
2923

J
Jerome Brunet 已提交
2924 2925 2926
	seq_puts(s, "                                 enable  prepare  protect                                duty\n");
	seq_puts(s, "   clock                          count    count    count        rate   accuracy phase  cycle\n");
	seq_puts(s, "---------------------------------------------------------------------------------------------\n");
2927

2928 2929
	clk_prepare_lock();

2930 2931 2932
	for (; *lists; lists++)
		hlist_for_each_entry(c, *lists, child_node)
			clk_summary_show_subtree(s, c, 0);
2933

2934
	clk_prepare_unlock();
2935

2936
	return 0;
2937
}
2938
DEFINE_SHOW_ATTRIBUTE(clk_summary);
2939

2940 2941
static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
{
2942 2943 2944
	unsigned long min_rate, max_rate;

	clk_core_get_boundaries(c, &min_rate, &max_rate);
2945

S
Stefan Wahren 已提交
2946
	/* This should be JSON format, i.e. elements separated with a comma */
2947 2948 2949
	seq_printf(s, "\"%s\": { ", c->name);
	seq_printf(s, "\"enable_count\": %d,", c->enable_count);
	seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
2950
	seq_printf(s, "\"protect_count\": %d,", c->protect_count);
S
Stefan Wahren 已提交
2951
	seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c));
2952 2953
	seq_printf(s, "\"min_rate\": %lu,", min_rate);
	seq_printf(s, "\"max_rate\": %lu,", max_rate);
S
Stefan Wahren 已提交
2954
	seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c));
2955
	seq_printf(s, "\"phase\": %d,", clk_core_get_phase(c));
J
Jerome Brunet 已提交
2956 2957
	seq_printf(s, "\"duty_cycle\": %u",
		   clk_core_get_scaled_duty_cycle(c, 100000));
2958 2959
}

2960
static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level)
2961
{
2962
	struct clk_core *child;
2963

2964
	clk_dump_one(s, c, level);
2965

2966
	hlist_for_each_entry(child, &c->children, child_node) {
2967
		seq_putc(s, ',');
2968
		clk_dump_subtree(s, child, level + 1);
2969 2970
	}

2971
	seq_putc(s, '}');
2972 2973
}

2974
static int clk_dump_show(struct seq_file *s, void *data)
T
Thierry Reding 已提交
2975
{
2976 2977 2978
	struct clk_core *c;
	bool first_node = true;
	struct hlist_head **lists = (struct hlist_head **)s->private;
T
Thierry Reding 已提交
2979

2980
	seq_putc(s, '{');
2981
	clk_prepare_lock();
2982

2983 2984 2985
	for (; *lists; lists++) {
		hlist_for_each_entry(c, *lists, child_node) {
			if (!first_node)
2986
				seq_putc(s, ',');
2987 2988 2989 2990
			first_node = false;
			clk_dump_subtree(s, c, 0);
		}
	}
T
Thierry Reding 已提交
2991

2992
	clk_prepare_unlock();
T
Thierry Reding 已提交
2993

2994
	seq_puts(s, "}\n");
2995
	return 0;
T
Thierry Reding 已提交
2996
}
2997
DEFINE_SHOW_ATTRIBUTE(clk_dump);
2998

2999 3000 3001 3002
static const struct {
	unsigned long flag;
	const char *name;
} clk_flags[] = {
3003
#define ENTRY(f) { f, #f }
3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014
	ENTRY(CLK_SET_RATE_GATE),
	ENTRY(CLK_SET_PARENT_GATE),
	ENTRY(CLK_SET_RATE_PARENT),
	ENTRY(CLK_IGNORE_UNUSED),
	ENTRY(CLK_GET_RATE_NOCACHE),
	ENTRY(CLK_SET_RATE_NO_REPARENT),
	ENTRY(CLK_GET_ACCURACY_NOCACHE),
	ENTRY(CLK_RECALC_NEW_RATES),
	ENTRY(CLK_SET_RATE_UNGATE),
	ENTRY(CLK_IS_CRITICAL),
	ENTRY(CLK_OPS_PARENT_ENABLE),
J
Jerome Brunet 已提交
3015
	ENTRY(CLK_DUTY_CYCLE_PARENT),
3016 3017 3018
#undef ENTRY
};

3019
static int clk_flags_show(struct seq_file *s, void *data)
3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037
{
	struct clk_core *core = s->private;
	unsigned long flags = core->flags;
	unsigned int i;

	for (i = 0; flags && i < ARRAY_SIZE(clk_flags); i++) {
		if (flags & clk_flags[i].flag) {
			seq_printf(s, "%s\n", clk_flags[i].name);
			flags &= ~clk_flags[i].flag;
		}
	}
	if (flags) {
		/* Unknown flags */
		seq_printf(s, "0x%lx\n", flags);
	}

	return 0;
}
3038
DEFINE_SHOW_ATTRIBUTE(clk_flags);
3039

3040 3041
static void possible_parent_show(struct seq_file *s, struct clk_core *core,
				 unsigned int i, char terminator)
3042
{
3043
	struct clk_core *parent;
3044

3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058
	/*
	 * Go through the following options to fetch a parent's name.
	 *
	 * 1. Fetch the registered parent clock and use its name
	 * 2. Use the global (fallback) name if specified
	 * 3. Use the local fw_name if provided
	 * 4. Fetch parent clock's clock-output-name if DT index was set
	 *
	 * This may still fail in some cases, such as when the parent is
	 * specified directly via a struct clk_hw pointer, but it isn't
	 * registered (yet).
	 */
	parent = clk_core_get_parent_by_index(core, i);
	if (parent)
3059
		seq_puts(s, parent->name);
3060
	else if (core->parents[i].name)
3061
		seq_puts(s, core->parents[i].name);
3062 3063 3064
	else if (core->parents[i].fw_name)
		seq_printf(s, "<%s>(fw)", core->parents[i].fw_name);
	else if (core->parents[i].index >= 0)
3065 3066 3067
		seq_puts(s,
			 of_clk_get_parent_name(core->of_node,
						core->parents[i].index));
3068 3069
	else
		seq_puts(s, "(missing)");
3070

3071 3072 3073
	seq_putc(s, terminator);
}

3074
static int possible_parents_show(struct seq_file *s, void *data)
3075 3076 3077 3078 3079
{
	struct clk_core *core = s->private;
	int i;

	for (i = 0; i < core->num_parents - 1; i++)
3080
		possible_parent_show(s, core, i, ' ');
3081

3082
	possible_parent_show(s, core, i, '\n');
3083 3084 3085

	return 0;
}
3086
DEFINE_SHOW_ATTRIBUTE(possible_parents);
3087

3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098
static int current_parent_show(struct seq_file *s, void *data)
{
	struct clk_core *core = s->private;

	if (core->parent)
		seq_printf(s, "%s\n", core->parent->name);

	return 0;
}
DEFINE_SHOW_ATTRIBUTE(current_parent);

J
Jerome Brunet 已提交
3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109
static int clk_duty_cycle_show(struct seq_file *s, void *data)
{
	struct clk_core *core = s->private;
	struct clk_duty *duty = &core->duty;

	seq_printf(s, "%u/%u\n", duty->num, duty->den);

	return 0;
}
DEFINE_SHOW_ATTRIBUTE(clk_duty_cycle);

3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137
static int clk_min_rate_show(struct seq_file *s, void *data)
{
	struct clk_core *core = s->private;
	unsigned long min_rate, max_rate;

	clk_prepare_lock();
	clk_core_get_boundaries(core, &min_rate, &max_rate);
	clk_prepare_unlock();
	seq_printf(s, "%lu\n", min_rate);

	return 0;
}
DEFINE_SHOW_ATTRIBUTE(clk_min_rate);

static int clk_max_rate_show(struct seq_file *s, void *data)
{
	struct clk_core *core = s->private;
	unsigned long min_rate, max_rate;

	clk_prepare_lock();
	clk_core_get_boundaries(core, &min_rate, &max_rate);
	clk_prepare_unlock();
	seq_printf(s, "%lu\n", max_rate);

	return 0;
}
DEFINE_SHOW_ATTRIBUTE(clk_max_rate);

3138
static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
3139
{
3140
	struct dentry *root;
3141

3142 3143
	if (!core || !pdentry)
		return;
3144

3145 3146
	root = debugfs_create_dir(core->name, pdentry);
	core->dentry = root;
3147

3148
	debugfs_create_ulong("clk_rate", 0444, root, &core->rate);
3149 3150
	debugfs_create_file("clk_min_rate", 0444, root, core, &clk_min_rate_fops);
	debugfs_create_file("clk_max_rate", 0444, root, core, &clk_max_rate_fops);
3151 3152 3153 3154 3155 3156 3157
	debugfs_create_ulong("clk_accuracy", 0444, root, &core->accuracy);
	debugfs_create_u32("clk_phase", 0444, root, &core->phase);
	debugfs_create_file("clk_flags", 0444, root, core, &clk_flags_fops);
	debugfs_create_u32("clk_prepare_count", 0444, root, &core->prepare_count);
	debugfs_create_u32("clk_enable_count", 0444, root, &core->enable_count);
	debugfs_create_u32("clk_protect_count", 0444, root, &core->protect_count);
	debugfs_create_u32("clk_notifier_count", 0444, root, &core->notifier_count);
J
Jerome Brunet 已提交
3158 3159
	debugfs_create_file("clk_duty_cycle", 0444, root, core,
			    &clk_duty_cycle_fops);
3160

3161 3162 3163 3164
	if (core->num_parents > 0)
		debugfs_create_file("clk_parent", 0444, root, core,
				    &current_parent_fops);

3165 3166 3167
	if (core->num_parents > 1)
		debugfs_create_file("clk_possible_parents", 0444, root, core,
				    &possible_parents_fops);
3168

3169 3170
	if (core->ops->debug_init)
		core->ops->debug_init(core->hw, core->dentry);
3171
}
3172 3173

/**
3174 3175
 * clk_debug_register - add a clk node to the debugfs clk directory
 * @core: the clk being added to the debugfs clk directory
3176
 *
3177 3178
 * Dynamically adds a clk to the debugfs clk directory if debugfs has been
 * initialized.  Otherwise it bails out early since the debugfs clk directory
3179
 * will be created lazily by clk_debug_init as part of a late_initcall.
3180
 */
3181
static void clk_debug_register(struct clk_core *core)
3182
{
3183 3184
	mutex_lock(&clk_debug_lock);
	hlist_add_head(&core->debug_node, &clk_debug_list);
3185
	if (inited)
3186
		clk_debug_create_one(core, rootdir);
3187
	mutex_unlock(&clk_debug_lock);
3188
}
3189

3190
 /**
3191 3192
 * clk_debug_unregister - remove a clk node from the debugfs clk directory
 * @core: the clk being removed from the debugfs clk directory
3193
 *
3194 3195
 * Dynamically removes a clk and all its child nodes from the
 * debugfs clk directory if clk->dentry points to debugfs created by
3196
 * clk_debug_register in __clk_core_init.
3197
 */
3198
static void clk_debug_unregister(struct clk_core *core)
3199
{
3200 3201 3202 3203 3204 3205
	mutex_lock(&clk_debug_lock);
	hlist_del_init(&core->debug_node);
	debugfs_remove_recursive(core->dentry);
	core->dentry = NULL;
	mutex_unlock(&clk_debug_lock);
}
3206

3207
/**
3208
 * clk_debug_init - lazily populate the debugfs clk directory
3209
 *
3210 3211 3212 3213 3214
 * clks are often initialized very early during boot before memory can be
 * dynamically allocated and well before debugfs is setup. This function
 * populates the debugfs clk directory once at boot-time when we know that
 * debugfs is setup. It should only be called once at boot-time, all other clks
 * added dynamically will be done so with clk_debug_register.
3215 3216 3217 3218
 */
static int __init clk_debug_init(void)
{
	struct clk_core *core;
3219

3220
	rootdir = debugfs_create_dir("clk", NULL);
3221

3222 3223 3224 3225 3226 3227 3228 3229
	debugfs_create_file("clk_summary", 0444, rootdir, &all_lists,
			    &clk_summary_fops);
	debugfs_create_file("clk_dump", 0444, rootdir, &all_lists,
			    &clk_dump_fops);
	debugfs_create_file("clk_orphan_summary", 0444, rootdir, &orphan_list,
			    &clk_summary_fops);
	debugfs_create_file("clk_orphan_dump", 0444, rootdir, &orphan_list,
			    &clk_dump_fops);
3230

3231 3232 3233
	mutex_lock(&clk_debug_lock);
	hlist_for_each_entry(core, &clk_debug_list, debug_node)
		clk_debug_create_one(core, rootdir);
3234

3235 3236
	inited = 1;
	mutex_unlock(&clk_debug_lock);
3237

3238 3239 3240 3241
	return 0;
}
late_initcall(clk_debug_init);
#else
3242
static inline void clk_debug_register(struct clk_core *core) { }
3243 3244
static inline void clk_debug_reparent(struct clk_core *core,
				      struct clk_core *new_parent)
3245 3246
{
}
3247
static inline void clk_debug_unregister(struct clk_core *core)
M
Michael Turquette 已提交
3248 3249
{
}
3250
#endif
M
Michael Turquette 已提交
3251

3252
/**
3253
 * __clk_core_init - initialize the data structures in a struct clk_core
3254
 * @core:	clk_core being initialized
3255
 *
3256
 * Initializes the lists in struct clk_core, queries the hardware for the
3257 3258
 * parent and rate and sets them both.
 */
3259
static int __clk_core_init(struct clk_core *core)
3260
{
3261
	int ret;
3262
	struct clk_core *orphan;
3263
	struct hlist_node *tmp2;
3264
	unsigned long rate;
3265

3266
	if (!core)
3267
		return -EINVAL;
3268

3269
	clk_prepare_lock();
3270

3271 3272 3273 3274
	ret = clk_pm_runtime_get(core);
	if (ret)
		goto unlock;

3275
	/* check to see if a clock with this name is already registered */
3276
	if (clk_core_lookup(core->name)) {
3277
		pr_debug("%s: clk %s already initialized\n",
3278
				__func__, core->name);
3279
		ret = -EEXIST;
3280
		goto out;
3281
	}
3282

3283
	/* check that clk_ops are sane.  See Documentation/driver-api/clk.rst */
3284 3285 3286
	if (core->ops->set_rate &&
	    !((core->ops->round_rate || core->ops->determine_rate) &&
	      core->ops->recalc_rate)) {
3287 3288
		pr_err("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n",
		       __func__, core->name);
3289
		ret = -EINVAL;
3290 3291 3292
		goto out;
	}

3293
	if (core->ops->set_parent && !core->ops->get_parent) {
3294 3295
		pr_err("%s: %s must implement .get_parent & .set_parent\n",
		       __func__, core->name);
3296
		ret = -EINVAL;
3297 3298 3299
		goto out;
	}

3300 3301 3302 3303 3304 3305 3306
	if (core->num_parents > 1 && !core->ops->get_parent) {
		pr_err("%s: %s must implement .get_parent as it has multi parents\n",
		       __func__, core->name);
		ret = -EINVAL;
		goto out;
	}

3307 3308
	if (core->ops->set_rate_and_parent &&
			!(core->ops->set_parent && core->ops->set_rate)) {
3309
		pr_err("%s: %s must implement .set_parent & .set_rate\n",
3310
				__func__, core->name);
S
Stephen Boyd 已提交
3311 3312 3313 3314
		ret = -EINVAL;
		goto out;
	}

3315 3316 3317 3318
	/*
	 * optional platform-specific magic
	 *
	 * The .init callback is not used by any of the basic clock types, but
3319 3320 3321 3322 3323 3324
	 * exists for weird hardware that must perform initialization magic for
	 * CCF to get an accurate view of clock for any other callbacks. It may
	 * also be used needs to perform dynamic allocations. Such allocation
	 * must be freed in the terminate() callback.
	 * This callback shall not be used to initialize the parameters state,
	 * such as rate, parent, etc ...
3325 3326 3327 3328
	 *
	 * If it exist, this callback should called before any other callback of
	 * the clock
	 */
3329 3330 3331 3332 3333
	if (core->ops->init) {
		ret = core->ops->init(core->hw);
		if (ret)
			goto out;
	}
3334

3335
	core->parent = __clk_init_parent(core);
3336 3337

	/*
3338 3339
	 * Populate core->parent if parent has already been clk_core_init'd. If
	 * parent has not yet been clk_core_init'd then place clk in the orphan
S
Stephen Boyd 已提交
3340
	 * list.  If clk doesn't have any parents then place it in the root
3341 3342 3343 3344 3345 3346
	 * clk list.
	 *
	 * Every time a new clk is clk_init'd then we walk the list of orphan
	 * clocks and re-parent any that are children of the clock currently
	 * being clk_init'd.
	 */
3347
	if (core->parent) {
3348 3349
		hlist_add_head(&core->child_node,
				&core->parent->children);
3350
		core->orphan = core->parent->orphan;
S
Stephen Boyd 已提交
3351
	} else if (!core->num_parents) {
3352
		hlist_add_head(&core->child_node, &clk_root_list);
3353 3354
		core->orphan = false;
	} else {
3355
		hlist_add_head(&core->child_node, &clk_orphan_list);
3356 3357
		core->orphan = true;
	}
3358

3359 3360 3361 3362 3363 3364 3365
	/*
	 * Set clk's accuracy.  The preferred method is to use
	 * .recalc_accuracy. For simple clocks and lazy developers the default
	 * fallback is to use the parent's accuracy.  If a clock doesn't have a
	 * parent (or is orphaned) then accuracy is set to zero (perfect
	 * clock).
	 */
3366 3367 3368 3369 3370
	if (core->ops->recalc_accuracy)
		core->accuracy = core->ops->recalc_accuracy(core->hw,
					__clk_get_accuracy(core->parent));
	else if (core->parent)
		core->accuracy = core->parent->accuracy;
3371
	else
3372
		core->accuracy = 0;
3373

3374 3375 3376 3377 3378
	/*
	 * Set clk's phase.
	 * Since a phase is by definition relative to its parent, just
	 * query the current clock phase, or just assume it's in phase.
	 */
3379 3380
	if (core->ops->get_phase)
		core->phase = core->ops->get_phase(core->hw);
3381
	else
3382
		core->phase = 0;
3383

J
Jerome Brunet 已提交
3384 3385 3386 3387 3388
	/*
	 * Set clk's duty cycle.
	 */
	clk_core_update_duty_cycle_nolock(core);

3389 3390 3391 3392 3393 3394
	/*
	 * Set clk's rate.  The preferred method is to use .recalc_rate.  For
	 * simple clocks and lazy developers the default fallback is to use the
	 * parent's rate.  If a clock doesn't have a parent (or is orphaned)
	 * then rate is set to zero.
	 */
3395 3396 3397 3398 3399
	if (core->ops->recalc_rate)
		rate = core->ops->recalc_rate(core->hw,
				clk_core_get_rate_nolock(core->parent));
	else if (core->parent)
		rate = core->parent->rate;
3400
	else
3401
		rate = 0;
3402
	core->rate = core->req_rate = rate;
3403

3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418
	/*
	 * Enable CLK_IS_CRITICAL clocks so newly added critical clocks
	 * don't get accidentally disabled when walking the orphan tree and
	 * reparenting clocks
	 */
	if (core->flags & CLK_IS_CRITICAL) {
		unsigned long flags;

		clk_core_prepare(core);

		flags = clk_enable_lock();
		clk_core_enable(core);
		clk_enable_unlock(flags);
	}

3419
	/*
3420 3421
	 * walk the list of orphan clocks and reparent any that newly finds a
	 * parent.
3422
	 */
3423
	hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
3424
		struct clk_core *parent = __clk_init_parent(orphan);
3425

3426
		/*
3427 3428 3429 3430
		 * We need to use __clk_set_parent_before() and _after() to
		 * to properly migrate any prepare/enable count of the orphan
		 * clock. This is important for CLK_IS_CRITICAL clocks, which
		 * are enabled during init but might not have a parent yet.
3431 3432
		 */
		if (parent) {
3433
			/* update the clk tree topology */
3434 3435
			__clk_set_parent_before(orphan, parent);
			__clk_set_parent_after(orphan, parent, NULL);
3436 3437 3438
			__clk_recalc_accuracies(orphan);
			__clk_recalc_rates(orphan, 0);
		}
3439
	}
3440

3441
	kref_init(&core->ref);
3442
out:
3443 3444
	clk_pm_runtime_put(core);
unlock:
3445
	clk_prepare_unlock();
3446

3447
	if (!ret)
3448
		clk_debug_register(core);
3449

3450
	return ret;
3451 3452
}

3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483
/**
 * clk_core_link_consumer - Add a clk consumer to the list of consumers in a clk_core
 * @core: clk to add consumer to
 * @clk: consumer to link to a clk
 */
static void clk_core_link_consumer(struct clk_core *core, struct clk *clk)
{
	clk_prepare_lock();
	hlist_add_head(&clk->clks_node, &core->clks);
	clk_prepare_unlock();
}

/**
 * clk_core_unlink_consumer - Remove a clk consumer from the list of consumers in a clk_core
 * @clk: consumer to unlink
 */
static void clk_core_unlink_consumer(struct clk *clk)
{
	lockdep_assert_held(&prepare_lock);
	hlist_del(&clk->clks_node);
}

/**
 * alloc_clk - Allocate a clk consumer, but leave it unlinked to the clk_core
 * @core: clk to allocate a consumer for
 * @dev_id: string describing device name
 * @con_id: connection ID string on device
 *
 * Returns: clk consumer left unlinked from the consumer list
 */
static struct clk *alloc_clk(struct clk_core *core, const char *dev_id,
3484
			     const char *con_id)
3485 3486 3487
{
	struct clk *clk;

3488 3489 3490 3491
	clk = kzalloc(sizeof(*clk), GFP_KERNEL);
	if (!clk)
		return ERR_PTR(-ENOMEM);

3492
	clk->core = core;
3493
	clk->dev_id = dev_id;
L
Leonard Crestez 已提交
3494
	clk->con_id = kstrdup_const(con_id, GFP_KERNEL);
3495 3496
	clk->max_rate = ULONG_MAX;

3497 3498
	return clk;
}
3499

3500 3501 3502 3503 3504 3505 3506 3507
/**
 * free_clk - Free a clk consumer
 * @clk: clk consumer to free
 *
 * Note, this assumes the clk has been unlinked from the clk_core consumer
 * list.
 */
static void free_clk(struct clk *clk)
3508
{
L
Leonard Crestez 已提交
3509
	kfree_const(clk->con_id);
3510 3511
	kfree(clk);
}
3512

3513 3514 3515
/**
 * clk_hw_create_clk: Allocate and link a clk consumer to a clk_core given
 * a clk_hw
3516
 * @dev: clk consumer device
3517 3518 3519 3520 3521 3522 3523 3524
 * @hw: clk_hw associated with the clk being consumed
 * @dev_id: string describing device name
 * @con_id: connection ID string on device
 *
 * This is the main function used to create a clk pointer for use by clk
 * consumers. It connects a consumer to the clk_core and clk_hw structures
 * used by the framework and clk provider respectively.
 */
3525
struct clk *clk_hw_create_clk(struct device *dev, struct clk_hw *hw,
3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538
			      const char *dev_id, const char *con_id)
{
	struct clk *clk;
	struct clk_core *core;

	/* This is to allow this function to be chained to others */
	if (IS_ERR_OR_NULL(hw))
		return ERR_CAST(hw);

	core = hw->core;
	clk = alloc_clk(core, dev_id, con_id);
	if (IS_ERR(clk))
		return clk;
3539
	clk->dev = dev;
3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551

	if (!try_module_get(core->owner)) {
		free_clk(clk);
		return ERR_PTR(-ENOENT);
	}

	kref_get(&core->ref);
	clk_core_link_consumer(core, clk);

	return clk;
}

3552
static int clk_cpy_name(const char **dst_p, const char *src, bool must_exist)
3553
{
3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568
	const char *dst;

	if (!src) {
		if (must_exist)
			return -EINVAL;
		return 0;
	}

	*dst_p = dst = kstrdup_const(src, GFP_KERNEL);
	if (!dst)
		return -ENOMEM;

	return 0;
}

3569 3570
static int clk_core_populate_parent_map(struct clk_core *core,
					const struct clk_init_data *init)
3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592
{
	u8 num_parents = init->num_parents;
	const char * const *parent_names = init->parent_names;
	const struct clk_hw **parent_hws = init->parent_hws;
	const struct clk_parent_data *parent_data = init->parent_data;
	int i, ret = 0;
	struct clk_parent_map *parents, *parent;

	if (!num_parents)
		return 0;

	/*
	 * Avoid unnecessary string look-ups of clk_core's possible parents by
	 * having a cache of names/clk_hw pointers to clk_core pointers.
	 */
	parents = kcalloc(num_parents, sizeof(*parents), GFP_KERNEL);
	core->parents = parents;
	if (!parents)
		return -ENOMEM;

	/* Copy everything over because it might be __initdata */
	for (i = 0, parent = parents; i < num_parents; i++, parent++) {
3593
		parent->index = -1;
3594 3595 3596 3597 3598 3599 3600 3601 3602
		if (parent_names) {
			/* throw a WARN if any entries are NULL */
			WARN(!parent_names[i],
				"%s: invalid NULL in %s's .parent_names\n",
				__func__, core->name);
			ret = clk_cpy_name(&parent->name, parent_names[i],
					   true);
		} else if (parent_data) {
			parent->hw = parent_data[i].hw;
3603
			parent->index = parent_data[i].index;
3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645
			ret = clk_cpy_name(&parent->fw_name,
					   parent_data[i].fw_name, false);
			if (!ret)
				ret = clk_cpy_name(&parent->name,
						   parent_data[i].name,
						   false);
		} else if (parent_hws) {
			parent->hw = parent_hws[i];
		} else {
			ret = -EINVAL;
			WARN(1, "Must specify parents if num_parents > 0\n");
		}

		if (ret) {
			do {
				kfree_const(parents[i].name);
				kfree_const(parents[i].fw_name);
			} while (--i >= 0);
			kfree(parents);

			return ret;
		}
	}

	return 0;
}

static void clk_core_free_parent_map(struct clk_core *core)
{
	int i = core->num_parents;

	if (!core->num_parents)
		return;

	while (--i >= 0) {
		kfree_const(core->parents[i].name);
		kfree_const(core->parents[i].fw_name);
	}

	kfree(core->parents);
}

3646 3647
static struct clk *
__clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
3648
{
3649
	int ret;
3650
	struct clk_core *core;
3651 3652 3653 3654 3655 3656 3657 3658
	const struct clk_init_data *init = hw->init;

	/*
	 * The init data is not supposed to be used outside of registration path.
	 * Set it to NULL so that provider drivers can't use it either and so that
	 * we catch use of hw->init early on in the core.
	 */
	hw->init = NULL;
3659

3660 3661
	core = kzalloc(sizeof(*core), GFP_KERNEL);
	if (!core) {
3662 3663 3664
		ret = -ENOMEM;
		goto fail_out;
	}
3665

3666
	core->name = kstrdup_const(init->name, GFP_KERNEL);
3667
	if (!core->name) {
3668 3669 3670
		ret = -ENOMEM;
		goto fail_name;
	}
3671

3672
	if (WARN_ON(!init->ops)) {
3673 3674 3675
		ret = -EINVAL;
		goto fail_ops;
	}
3676
	core->ops = init->ops;
3677

3678
	if (dev && pm_runtime_enabled(dev))
3679 3680
		core->rpm_enabled = true;
	core->dev = dev;
3681
	core->of_node = np;
3682
	if (dev && dev->driver)
3683 3684
		core->owner = dev->driver->owner;
	core->hw = hw;
3685 3686
	core->flags = init->flags;
	core->num_parents = init->num_parents;
3687 3688
	core->min_rate = 0;
	core->max_rate = ULONG_MAX;
3689
	hw->core = core;
3690

3691
	ret = clk_core_populate_parent_map(core, init);
3692
	if (ret)
3693 3694
		goto fail_parents;

3695
	INIT_HLIST_HEAD(&core->clks);
3696

3697 3698 3699 3700 3701
	/*
	 * Don't call clk_hw_create_clk() here because that would pin the
	 * provider module to itself and prevent it from ever being removed.
	 */
	hw->clk = alloc_clk(core, NULL, NULL);
3702 3703
	if (IS_ERR(hw->clk)) {
		ret = PTR_ERR(hw->clk);
3704
		goto fail_create_clk;
3705 3706
	}

3707 3708
	clk_core_link_consumer(hw->core, hw->clk);

3709
	ret = __clk_core_init(core);
3710
	if (!ret)
3711
		return hw->clk;
3712

3713 3714 3715 3716 3717
	clk_prepare_lock();
	clk_core_unlink_consumer(hw->clk);
	clk_prepare_unlock();

	free_clk(hw->clk);
3718
	hw->clk = NULL;
3719

3720 3721
fail_create_clk:
	clk_core_free_parent_map(core);
3722
fail_parents:
3723
fail_ops:
3724
	kfree_const(core->name);
3725
fail_name:
3726
	kfree(core);
3727 3728
fail_out:
	return ERR_PTR(ret);
3729
}
3730

3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752
/**
 * dev_or_parent_of_node() - Get device node of @dev or @dev's parent
 * @dev: Device to get device node of
 *
 * Return: device node pointer of @dev, or the device node pointer of
 * @dev->parent if dev doesn't have a device node, or NULL if neither
 * @dev or @dev->parent have a device node.
 */
static struct device_node *dev_or_parent_of_node(struct device *dev)
{
	struct device_node *np;

	if (!dev)
		return NULL;

	np = dev_of_node(dev);
	if (!np)
		np = dev_of_node(dev->parent);

	return np;
}

3753 3754 3755 3756 3757
/**
 * clk_register - allocate a new clock, register it and return an opaque cookie
 * @dev: device that is registering this clock
 * @hw: link to hardware-specific clock data
 *
3758 3759 3760 3761
 * clk_register is the *deprecated* interface for populating the clock tree with
 * new clock nodes. Use clk_hw_register() instead.
 *
 * Returns: a pointer to the newly allocated struct clk which
3762 3763 3764 3765 3766 3767
 * cannot be dereferenced by driver code but may be used in conjunction with the
 * rest of the clock API.  In the event of an error clk_register will return an
 * error code; drivers must test for an error code after calling clk_register.
 */
struct clk *clk_register(struct device *dev, struct clk_hw *hw)
{
3768
	return __clk_register(dev, dev_or_parent_of_node(dev), hw);
3769
}
3770 3771
EXPORT_SYMBOL_GPL(clk_register);

3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783
/**
 * clk_hw_register - register a clk_hw and return an error code
 * @dev: device that is registering this clock
 * @hw: link to hardware-specific clock data
 *
 * clk_hw_register is the primary interface for populating the clock tree with
 * new clock nodes. It returns an integer equal to zero indicating success or
 * less than zero indicating failure. Drivers must test for an error code after
 * calling clk_hw_register().
 */
int clk_hw_register(struct device *dev, struct clk_hw *hw)
{
3784 3785
	return PTR_ERR_OR_ZERO(__clk_register(dev, dev_or_parent_of_node(dev),
			       hw));
3786 3787 3788
}
EXPORT_SYMBOL_GPL(clk_hw_register);

3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805
/*
 * of_clk_hw_register - register a clk_hw and return an error code
 * @node: device_node of device that is registering this clock
 * @hw: link to hardware-specific clock data
 *
 * of_clk_hw_register() is the primary interface for populating the clock tree
 * with new clock nodes when a struct device is not available, but a struct
 * device_node is. It returns an integer equal to zero indicating success or
 * less than zero indicating failure. Drivers must test for an error code after
 * calling of_clk_hw_register().
 */
int of_clk_hw_register(struct device_node *node, struct clk_hw *hw)
{
	return PTR_ERR_OR_ZERO(__clk_register(NULL, node, hw));
}
EXPORT_SYMBOL_GPL(of_clk_hw_register);

3806
/* Free memory allocated for a clock. */
S
Sylwester Nawrocki 已提交
3807 3808
static void __clk_release(struct kref *ref)
{
3809
	struct clk_core *core = container_of(ref, struct clk_core, ref);
S
Sylwester Nawrocki 已提交
3810

3811 3812
	lockdep_assert_held(&prepare_lock);

3813
	clk_core_free_parent_map(core);
3814 3815
	kfree_const(core->name);
	kfree(core);
S
Sylwester Nawrocki 已提交
3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852
}

/*
 * Empty clk_ops for unregistered clocks. These are used temporarily
 * after clk_unregister() was called on a clock and until last clock
 * consumer calls clk_put() and the struct clk object is freed.
 */
static int clk_nodrv_prepare_enable(struct clk_hw *hw)
{
	return -ENXIO;
}

static void clk_nodrv_disable_unprepare(struct clk_hw *hw)
{
	WARN_ON_ONCE(1);
}

static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate,
					unsigned long parent_rate)
{
	return -ENXIO;
}

static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index)
{
	return -ENXIO;
}

static const struct clk_ops clk_nodrv_ops = {
	.enable		= clk_nodrv_prepare_enable,
	.disable	= clk_nodrv_disable_unprepare,
	.prepare	= clk_nodrv_prepare_enable,
	.unprepare	= clk_nodrv_disable_unprepare,
	.set_rate	= clk_nodrv_set_rate,
	.set_parent	= clk_nodrv_set_parent,
};

3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880
static void clk_core_evict_parent_cache_subtree(struct clk_core *root,
						struct clk_core *target)
{
	int i;
	struct clk_core *child;

	for (i = 0; i < root->num_parents; i++)
		if (root->parents[i].core == target)
			root->parents[i].core = NULL;

	hlist_for_each_entry(child, &root->children, child_node)
		clk_core_evict_parent_cache_subtree(child, target);
}

/* Remove this clk from all parent caches */
static void clk_core_evict_parent_cache(struct clk_core *core)
{
	struct hlist_head **lists;
	struct clk_core *root;

	lockdep_assert_held(&prepare_lock);

	for (lists = all_lists; *lists; lists++)
		hlist_for_each_entry(root, *lists, child_node)
			clk_core_evict_parent_cache_subtree(root, core);

}

M
Mark Brown 已提交
3881 3882 3883 3884
/**
 * clk_unregister - unregister a currently registered clock
 * @clk: clock to unregister
 */
S
Sylwester Nawrocki 已提交
3885 3886 3887
void clk_unregister(struct clk *clk)
{
	unsigned long flags;
3888
	const struct clk_ops *ops;
S
Sylwester Nawrocki 已提交
3889

3890 3891 3892
	if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
		return;

3893
	clk_debug_unregister(clk->core);
S
Sylwester Nawrocki 已提交
3894 3895 3896

	clk_prepare_lock();

3897 3898
	ops = clk->core->ops;
	if (ops == &clk_nodrv_ops) {
3899 3900
		pr_err("%s: unregistered clock: %s\n", __func__,
		       clk->core->name);
3901
		goto unlock;
S
Sylwester Nawrocki 已提交
3902 3903 3904 3905 3906 3907
	}
	/*
	 * Assign empty clock ops for consumers that might still hold
	 * a reference to this clock.
	 */
	flags = clk_enable_lock();
3908
	clk->core->ops = &clk_nodrv_ops;
S
Sylwester Nawrocki 已提交
3909 3910
	clk_enable_unlock(flags);

3911 3912 3913
	if (ops->terminate)
		ops->terminate(clk->core->hw);

3914 3915
	if (!hlist_empty(&clk->core->children)) {
		struct clk_core *child;
3916
		struct hlist_node *t;
S
Sylwester Nawrocki 已提交
3917 3918

		/* Reparent all children to the orphan list. */
3919 3920
		hlist_for_each_entry_safe(child, t, &clk->core->children,
					  child_node)
3921
			clk_core_set_parent_nolock(child, NULL);
S
Sylwester Nawrocki 已提交
3922 3923
	}

3924 3925
	clk_core_evict_parent_cache(clk->core);

3926
	hlist_del_init(&clk->core->child_node);
S
Sylwester Nawrocki 已提交
3927

3928
	if (clk->core->prepare_count)
S
Sylwester Nawrocki 已提交
3929
		pr_warn("%s: unregistering prepared clock: %s\n",
3930
					__func__, clk->core->name);
3931 3932 3933 3934 3935

	if (clk->core->protect_count)
		pr_warn("%s: unregistering protected clock: %s\n",
					__func__, clk->core->name);

3936
	kref_put(&clk->core->ref, __clk_release);
3937
	free_clk(clk);
3938
unlock:
S
Sylwester Nawrocki 已提交
3939 3940
	clk_prepare_unlock();
}
M
Mark Brown 已提交
3941 3942
EXPORT_SYMBOL_GPL(clk_unregister);

3943 3944 3945 3946 3947 3948 3949 3950 3951 3952
/**
 * clk_hw_unregister - unregister a currently registered clk_hw
 * @hw: hardware-specific clock data to unregister
 */
void clk_hw_unregister(struct clk_hw *hw)
{
	clk_unregister(hw->clk);
}
EXPORT_SYMBOL_GPL(clk_hw_unregister);

3953 3954
static void devm_clk_release(struct device *dev, void *res)
{
3955
	clk_unregister(*(struct clk **)res);
3956 3957
}

3958 3959 3960 3961 3962
static void devm_clk_hw_release(struct device *dev, void *res)
{
	clk_hw_unregister(*(struct clk_hw **)res);
}

3963 3964 3965 3966 3967
/**
 * devm_clk_register - resource managed clk_register()
 * @dev: device that is registering this clock
 * @hw: link to hardware-specific clock data
 *
S
Stephen Boyd 已提交
3968 3969 3970 3971
 * Managed clk_register(). This function is *deprecated*, use devm_clk_hw_register() instead.
 *
 * Clocks returned from this function are automatically clk_unregister()ed on
 * driver detach. See clk_register() for more information.
3972 3973 3974 3975
 */
struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
{
	struct clk *clk;
3976
	struct clk **clkp;
3977

3978 3979
	clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL);
	if (!clkp)
3980 3981
		return ERR_PTR(-ENOMEM);

3982 3983 3984 3985
	clk = clk_register(dev, hw);
	if (!IS_ERR(clk)) {
		*clkp = clk;
		devres_add(dev, clkp);
3986
	} else {
3987
		devres_free(clkp);
3988 3989 3990 3991 3992 3993
	}

	return clk;
}
EXPORT_SYMBOL_GPL(devm_clk_register);

3994 3995 3996 3997 3998
/**
 * devm_clk_hw_register - resource managed clk_hw_register()
 * @dev: device that is registering this clock
 * @hw: link to hardware-specific clock data
 *
3999
 * Managed clk_hw_register(). Clocks registered by this function are
4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023
 * automatically clk_hw_unregister()ed on driver detach. See clk_hw_register()
 * for more information.
 */
int devm_clk_hw_register(struct device *dev, struct clk_hw *hw)
{
	struct clk_hw **hwp;
	int ret;

	hwp = devres_alloc(devm_clk_hw_release, sizeof(*hwp), GFP_KERNEL);
	if (!hwp)
		return -ENOMEM;

	ret = clk_hw_register(dev, hw);
	if (!ret) {
		*hwp = hw;
		devres_add(dev, hwp);
	} else {
		devres_free(hwp);
	}

	return ret;
}
EXPORT_SYMBOL_GPL(devm_clk_hw_register);

4024 4025 4026 4027 4028 4029 4030 4031
static int devm_clk_match(struct device *dev, void *res, void *data)
{
	struct clk *c = res;
	if (WARN_ON(!c))
		return 0;
	return c == data;
}

4032 4033 4034 4035 4036 4037 4038 4039 4040
static int devm_clk_hw_match(struct device *dev, void *res, void *data)
{
	struct clk_hw *hw = res;

	if (WARN_ON(!hw))
		return 0;
	return hw == data;
}

4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054
/**
 * devm_clk_unregister - resource managed clk_unregister()
 * @clk: clock to unregister
 *
 * Deallocate a clock allocated with devm_clk_register(). Normally
 * this function will not need to be called and the resource management
 * code will ensure that the resource is freed.
 */
void devm_clk_unregister(struct device *dev, struct clk *clk)
{
	WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk));
}
EXPORT_SYMBOL_GPL(devm_clk_unregister);

4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070
/**
 * devm_clk_hw_unregister - resource managed clk_hw_unregister()
 * @dev: device that is unregistering the hardware-specific clock data
 * @hw: link to hardware-specific clock data
 *
 * Unregister a clk_hw registered with devm_clk_hw_register(). Normally
 * this function will not need to be called and the resource management
 * code will ensure that the resource is freed.
 */
void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw)
{
	WARN_ON(devres_release(dev, devm_clk_hw_release, devm_clk_hw_match,
				hw));
}
EXPORT_SYMBOL_GPL(devm_clk_hw_unregister);

4071 4072 4073 4074 4075 4076
/*
 * clkdev helpers
 */

void __clk_put(struct clk *clk)
{
4077 4078
	struct module *owner;

4079
	if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
4080 4081
		return;

S
Sylwester Nawrocki 已提交
4082
	clk_prepare_lock();
4083

J
Jerome Brunet 已提交
4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095
	/*
	 * Before calling clk_put, all calls to clk_rate_exclusive_get() from a
	 * given user should be balanced with calls to clk_rate_exclusive_put()
	 * and by that same consumer
	 */
	if (WARN_ON(clk->exclusive_count)) {
		/* We voiced our concern, let's sanitize the situation */
		clk->core->protect_count -= (clk->exclusive_count - 1);
		clk_core_rate_unprotect(clk->core);
		clk->exclusive_count = 0;
	}

4096
	hlist_del(&clk->clks_node);
4097 4098 4099 4100
	if (clk->min_rate > clk->core->req_rate ||
	    clk->max_rate < clk->core->req_rate)
		clk_core_set_rate_nolock(clk->core, clk->core->req_rate);

4101 4102 4103
	owner = clk->core->owner;
	kref_put(&clk->core->ref, __clk_release);

S
Sylwester Nawrocki 已提交
4104 4105
	clk_prepare_unlock();

4106
	module_put(owner);
4107

4108
	free_clk(clk);
4109 4110
}

4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123
/***        clk rate change notifiers        ***/

/**
 * clk_notifier_register - add a clk rate change notifier
 * @clk: struct clk * to watch
 * @nb: struct notifier_block * with callback info
 *
 * Request notification when clk's rate changes.  This uses an SRCU
 * notifier because we want it to block and notifier unregistrations are
 * uncommon.  The callbacks associated with the notifier must not
 * re-enter into the clk framework by calling any top-level clk APIs;
 * this will cause a nested prepare_lock mutex.
 *
4124 4125 4126
 * In all notification cases (pre, post and abort rate change) the original
 * clock rate is passed to the callback via struct clk_notifier_data.old_rate
 * and the new frequency is passed via struct clk_notifier_data.new_rate.
4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140
 *
 * clk_notifier_register() must be called from non-atomic context.
 * Returns -EINVAL if called with null arguments, -ENOMEM upon
 * allocation failure; otherwise, passes along the return value of
 * srcu_notifier_chain_register().
 */
int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
{
	struct clk_notifier *cn;
	int ret = -ENOMEM;

	if (!clk || !nb)
		return -EINVAL;

4141
	clk_prepare_lock();
4142 4143 4144 4145 4146 4147 4148 4149

	/* search the list of notifiers for this clk */
	list_for_each_entry(cn, &clk_notifier_list, node)
		if (cn->clk == clk)
			break;

	/* if clk wasn't in the notifier list, allocate new clk_notifier */
	if (cn->clk != clk) {
4150
		cn = kzalloc(sizeof(*cn), GFP_KERNEL);
4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161
		if (!cn)
			goto out;

		cn->clk = clk;
		srcu_init_notifier_head(&cn->notifier_head);

		list_add(&cn->node, &clk_notifier_list);
	}

	ret = srcu_notifier_chain_register(&cn->notifier_head, nb);

4162
	clk->core->notifier_count++;
4163 4164

out:
4165
	clk_prepare_unlock();
4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189

	return ret;
}
EXPORT_SYMBOL_GPL(clk_notifier_register);

/**
 * clk_notifier_unregister - remove a clk rate change notifier
 * @clk: struct clk *
 * @nb: struct notifier_block * with callback info
 *
 * Request no further notification for changes to 'clk' and frees memory
 * allocated in clk_notifier_register.
 *
 * Returns -EINVAL if called with null arguments; otherwise, passes
 * along the return value of srcu_notifier_chain_unregister().
 */
int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
{
	struct clk_notifier *cn = NULL;
	int ret = -EINVAL;

	if (!clk || !nb)
		return -EINVAL;

4190
	clk_prepare_lock();
4191 4192 4193 4194 4195 4196 4197 4198

	list_for_each_entry(cn, &clk_notifier_list, node)
		if (cn->clk == clk)
			break;

	if (cn->clk == clk) {
		ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);

4199
		clk->core->notifier_count--;
4200 4201 4202 4203

		/* XXX the notifier code should handle this better */
		if (!cn->notifier_head.head) {
			srcu_cleanup_notifier_head(&cn->notifier_head);
4204
			list_del(&cn->node);
4205 4206 4207 4208 4209 4210 4211
			kfree(cn);
		}

	} else {
		ret = -ENOENT;
	}

4212
	clk_prepare_unlock();
4213 4214 4215 4216

	return ret;
}
EXPORT_SYMBOL_GPL(clk_notifier_unregister);
G
Grant Likely 已提交
4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231

#ifdef CONFIG_OF
/**
 * struct of_clk_provider - Clock provider registration structure
 * @link: Entry in global list of clock providers
 * @node: Pointer to device tree node of clock provider
 * @get: Get clock callback.  Returns NULL or a struct clk for the
 *       given clock specifier
 * @data: context pointer to be passed into @get callback
 */
struct of_clk_provider {
	struct list_head link;

	struct device_node *node;
	struct clk *(*get)(struct of_phandle_args *clkspec, void *data);
S
Stephen Boyd 已提交
4232
	struct clk_hw *(*get_hw)(struct of_phandle_args *clkspec, void *data);
G
Grant Likely 已提交
4233 4234 4235
	void *data;
};

S
Stephen Boyd 已提交
4236
extern struct of_device_id __clk_of_table;
4237 4238 4239
static const struct of_device_id __clk_of_table_sentinel
	__used __section(__clk_of_table_end);

G
Grant Likely 已提交
4240
static LIST_HEAD(of_clk_providers);
4241 4242
static DEFINE_MUTEX(of_clk_mutex);

G
Grant Likely 已提交
4243 4244 4245 4246 4247 4248 4249
struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
				     void *data)
{
	return data;
}
EXPORT_SYMBOL_GPL(of_clk_src_simple_get);

S
Stephen Boyd 已提交
4250 4251 4252 4253 4254 4255
struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec, void *data)
{
	return data;
}
EXPORT_SYMBOL_GPL(of_clk_hw_simple_get);

4256 4257 4258 4259 4260 4261
struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
{
	struct clk_onecell_data *clk_data = data;
	unsigned int idx = clkspec->args[0];

	if (idx >= clk_data->clk_num) {
4262
		pr_err("%s: invalid clock index %u\n", __func__, idx);
4263 4264 4265 4266 4267 4268 4269
		return ERR_PTR(-EINVAL);
	}

	return clk_data->clks[idx];
}
EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);

S
Stephen Boyd 已提交
4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284
struct clk_hw *
of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data)
{
	struct clk_hw_onecell_data *hw_data = data;
	unsigned int idx = clkspec->args[0];

	if (idx >= hw_data->num) {
		pr_err("%s: invalid index %u\n", __func__, idx);
		return ERR_PTR(-EINVAL);
	}

	return hw_data->hws[idx];
}
EXPORT_SYMBOL_GPL(of_clk_hw_onecell_get);

G
Grant Likely 已提交
4285 4286 4287 4288 4289
/**
 * of_clk_add_provider() - Register a clock provider for a node
 * @np: Device node pointer associated with clock provider
 * @clk_src_get: callback for decoding clock
 * @data: context pointer for @clk_src_get callback.
S
Stephen Boyd 已提交
4290 4291
 *
 * This function is *deprecated*. Use of_clk_add_hw_provider() instead.
G
Grant Likely 已提交
4292 4293 4294 4295 4296 4297 4298
 */
int of_clk_add_provider(struct device_node *np,
			struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
						   void *data),
			void *data)
{
	struct of_clk_provider *cp;
4299
	int ret;
G
Grant Likely 已提交
4300

4301
	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
G
Grant Likely 已提交
4302 4303 4304 4305 4306 4307 4308
	if (!cp)
		return -ENOMEM;

	cp->node = of_node_get(np);
	cp->data = data;
	cp->get = clk_src_get;

4309
	mutex_lock(&of_clk_mutex);
G
Grant Likely 已提交
4310
	list_add(&cp->link, &of_clk_providers);
4311
	mutex_unlock(&of_clk_mutex);
4312
	pr_debug("Added clock from %pOF\n", np);
G
Grant Likely 已提交
4313

4314 4315 4316 4317 4318
	ret = of_clk_set_defaults(np, true);
	if (ret < 0)
		of_clk_del_provider(np);

	return ret;
G
Grant Likely 已提交
4319 4320 4321
}
EXPORT_SYMBOL_GPL(of_clk_add_provider);

S
Stephen Boyd 已提交
4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346
/**
 * of_clk_add_hw_provider() - Register a clock provider for a node
 * @np: Device node pointer associated with clock provider
 * @get: callback for decoding clk_hw
 * @data: context pointer for @get callback.
 */
int of_clk_add_hw_provider(struct device_node *np,
			   struct clk_hw *(*get)(struct of_phandle_args *clkspec,
						 void *data),
			   void *data)
{
	struct of_clk_provider *cp;
	int ret;

	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
	if (!cp)
		return -ENOMEM;

	cp->node = of_node_get(np);
	cp->data = data;
	cp->get_hw = get;

	mutex_lock(&of_clk_mutex);
	list_add(&cp->link, &of_clk_providers);
	mutex_unlock(&of_clk_mutex);
4347
	pr_debug("Added clk_hw provider from %pOF\n", np);
S
Stephen Boyd 已提交
4348 4349 4350 4351 4352 4353 4354 4355 4356

	ret = of_clk_set_defaults(np, true);
	if (ret < 0)
		of_clk_del_provider(np);

	return ret;
}
EXPORT_SYMBOL_GPL(of_clk_add_hw_provider);

4357 4358 4359 4360 4361
static void devm_of_clk_release_provider(struct device *dev, void *res)
{
	of_clk_del_provider(*(struct device_node **)res);
}

4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380
/*
 * We allow a child device to use its parent device as the clock provider node
 * for cases like MFD sub-devices where the child device driver wants to use
 * devm_*() APIs but not list the device in DT as a sub-node.
 */
static struct device_node *get_clk_provider_node(struct device *dev)
{
	struct device_node *np, *parent_np;

	np = dev->of_node;
	parent_np = dev->parent ? dev->parent->of_node : NULL;

	if (!of_find_property(np, "#clock-cells", NULL))
		if (of_find_property(parent_np, "#clock-cells", NULL))
			np = parent_np;

	return np;
}

4381 4382 4383 4384 4385 4386
/**
 * devm_of_clk_add_hw_provider() - Managed clk provider node registration
 * @dev: Device acting as the clock provider (used for DT node and lifetime)
 * @get: callback for decoding clk_hw
 * @data: context pointer for @get callback
 *
4387 4388 4389 4390 4391
 * Registers clock provider for given device's node. If the device has no DT
 * node or if the device node lacks of clock provider information (#clock-cells)
 * then the parent device's node is scanned for this information. If parent node
 * has the #clock-cells then it is used in registration. Provider is
 * automatically released at device exit.
4392 4393 4394
 *
 * Return: 0 on success or an errno on failure.
 */
4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407
int devm_of_clk_add_hw_provider(struct device *dev,
			struct clk_hw *(*get)(struct of_phandle_args *clkspec,
					      void *data),
			void *data)
{
	struct device_node **ptr, *np;
	int ret;

	ptr = devres_alloc(devm_of_clk_release_provider, sizeof(*ptr),
			   GFP_KERNEL);
	if (!ptr)
		return -ENOMEM;

4408
	np = get_clk_provider_node(dev);
4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420
	ret = of_clk_add_hw_provider(np, get, data);
	if (!ret) {
		*ptr = np;
		devres_add(dev, ptr);
	} else {
		devres_free(ptr);
	}

	return ret;
}
EXPORT_SYMBOL_GPL(devm_of_clk_add_hw_provider);

G
Grant Likely 已提交
4421 4422 4423 4424 4425 4426 4427 4428
/**
 * of_clk_del_provider() - Remove a previously registered clock provider
 * @np: Device node pointer associated with clock provider
 */
void of_clk_del_provider(struct device_node *np)
{
	struct of_clk_provider *cp;

4429
	mutex_lock(&of_clk_mutex);
G
Grant Likely 已提交
4430 4431 4432 4433 4434 4435 4436 4437
	list_for_each_entry(cp, &of_clk_providers, link) {
		if (cp->node == np) {
			list_del(&cp->link);
			of_node_put(cp->node);
			kfree(cp);
			break;
		}
	}
4438
	mutex_unlock(&of_clk_mutex);
G
Grant Likely 已提交
4439 4440 4441
}
EXPORT_SYMBOL_GPL(of_clk_del_provider);

4442 4443 4444 4445 4446 4447 4448 4449 4450 4451
static int devm_clk_provider_match(struct device *dev, void *res, void *data)
{
	struct device_node **np = res;

	if (WARN_ON(!np || !*np))
		return 0;

	return *np == data;
}

4452 4453 4454 4455
/**
 * devm_of_clk_del_provider() - Remove clock provider registered using devm
 * @dev: Device to whose lifetime the clock provider was bound
 */
4456 4457 4458
void devm_of_clk_del_provider(struct device *dev)
{
	int ret;
4459
	struct device_node *np = get_clk_provider_node(dev);
4460 4461

	ret = devres_release(dev, devm_of_clk_release_provider,
4462
			     devm_clk_provider_match, np);
4463 4464 4465 4466 4467

	WARN_ON(ret);
}
EXPORT_SYMBOL(devm_of_clk_del_provider);

4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504
/**
 * of_parse_clkspec() - Parse a DT clock specifier for a given device node
 * @np: device node to parse clock specifier from
 * @index: index of phandle to parse clock out of. If index < 0, @name is used
 * @name: clock name to find and parse. If name is NULL, the index is used
 * @out_args: Result of parsing the clock specifier
 *
 * Parses a device node's "clocks" and "clock-names" properties to find the
 * phandle and cells for the index or name that is desired. The resulting clock
 * specifier is placed into @out_args, or an errno is returned when there's a
 * parsing error. The @index argument is ignored if @name is non-NULL.
 *
 * Example:
 *
 * phandle1: clock-controller@1 {
 *	#clock-cells = <2>;
 * }
 *
 * phandle2: clock-controller@2 {
 *	#clock-cells = <1>;
 * }
 *
 * clock-consumer@3 {
 *	clocks = <&phandle1 1 2 &phandle2 3>;
 *	clock-names = "name1", "name2";
 * }
 *
 * To get a device_node for `clock-controller@2' node you may call this
 * function a few different ways:
 *
 *   of_parse_clkspec(clock-consumer@3, -1, "name2", &args);
 *   of_parse_clkspec(clock-consumer@3, 1, NULL, &args);
 *   of_parse_clkspec(clock-consumer@3, 1, "name2", &args);
 *
 * Return: 0 upon successfully parsing the clock specifier. Otherwise, -ENOENT
 * if @name is NULL or -EINVAL if @name is non-NULL and it can't be found in
 * the "clock-names" property of @np.
4505
 */
4506 4507
static int of_parse_clkspec(const struct device_node *np, int index,
			    const char *name, struct of_phandle_args *out_args)
4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541
{
	int ret = -ENOENT;

	/* Walk up the tree of devices looking for a clock property that matches */
	while (np) {
		/*
		 * For named clocks, first look up the name in the
		 * "clock-names" property.  If it cannot be found, then index
		 * will be an error code and of_parse_phandle_with_args() will
		 * return -EINVAL.
		 */
		if (name)
			index = of_property_match_string(np, "clock-names", name);
		ret = of_parse_phandle_with_args(np, "clocks", "#clock-cells",
						 index, out_args);
		if (!ret)
			break;
		if (name && index >= 0)
			break;

		/*
		 * No matching clock found on this node.  If the parent node
		 * has a "clock-ranges" property, then we can try one of its
		 * clocks.
		 */
		np = np->parent;
		if (np && !of_get_property(np, "clock-ranges", NULL))
			break;
		index = 0;
	}

	return ret;
}

S
Stephen Boyd 已提交
4542 4543 4544 4545 4546 4547
static struct clk_hw *
__of_clk_get_hw_from_provider(struct of_clk_provider *provider,
			      struct of_phandle_args *clkspec)
{
	struct clk *clk;

4548 4549
	if (provider->get_hw)
		return provider->get_hw(clkspec, provider->data);
S
Stephen Boyd 已提交
4550

4551 4552 4553 4554
	clk = provider->get(clkspec, provider->data);
	if (IS_ERR(clk))
		return ERR_CAST(clk);
	return __clk_get_hw(clk);
S
Stephen Boyd 已提交
4555 4556
}

4557 4558
static struct clk_hw *
of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
G
Grant Likely 已提交
4559 4560
{
	struct of_clk_provider *provider;
4561
	struct clk_hw *hw = ERR_PTR(-EPROBE_DEFER);
G
Grant Likely 已提交
4562

4563 4564 4565 4566
	if (!clkspec)
		return ERR_PTR(-EINVAL);

	mutex_lock(&of_clk_mutex);
G
Grant Likely 已提交
4567
	list_for_each_entry(provider, &of_clk_providers, link) {
4568
		if (provider->node == clkspec->np) {
S
Stephen Boyd 已提交
4569
			hw = __of_clk_get_hw_from_provider(provider, clkspec);
4570 4571
			if (!IS_ERR(hw))
				break;
4572
		}
G
Grant Likely 已提交
4573
	}
4574
	mutex_unlock(&of_clk_mutex);
4575

4576
	return hw;
4577 4578
}

4579 4580 4581 4582 4583 4584 4585 4586
/**
 * of_clk_get_from_provider() - Lookup a clock from a clock provider
 * @clkspec: pointer to a clock specifier data structure
 *
 * This function looks up a struct clk from the registered list of clock
 * providers, an input is a clock specifier data structure as returned
 * from the of_parse_phandle_with_args() function call.
 */
4587 4588
struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
{
4589 4590
	struct clk_hw *hw = of_clk_get_hw_from_clkspec(clkspec);

4591
	return clk_hw_create_clk(NULL, hw, NULL, __func__);
G
Grant Likely 已提交
4592
}
4593
EXPORT_SYMBOL_GPL(of_clk_get_from_provider);
G
Grant Likely 已提交
4594

4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640
struct clk_hw *of_clk_get_hw(struct device_node *np, int index,
			     const char *con_id)
{
	int ret;
	struct clk_hw *hw;
	struct of_phandle_args clkspec;

	ret = of_parse_clkspec(np, index, con_id, &clkspec);
	if (ret)
		return ERR_PTR(ret);

	hw = of_clk_get_hw_from_clkspec(&clkspec);
	of_node_put(clkspec.np);

	return hw;
}

static struct clk *__of_clk_get(struct device_node *np,
				int index, const char *dev_id,
				const char *con_id)
{
	struct clk_hw *hw = of_clk_get_hw(np, index, con_id);

	return clk_hw_create_clk(NULL, hw, dev_id, con_id);
}

struct clk *of_clk_get(struct device_node *np, int index)
{
	return __of_clk_get(np, index, np->full_name, NULL);
}
EXPORT_SYMBOL(of_clk_get);

/**
 * of_clk_get_by_name() - Parse and lookup a clock referenced by a device node
 * @np: pointer to clock consumer node
 * @name: name of consumer's clock input, or NULL for the first clock reference
 *
 * This function parses the clocks and clock-names properties,
 * and uses them to look up the struct clk from the registered list of clock
 * providers.
 */
struct clk *of_clk_get_by_name(struct device_node *np, const char *name)
{
	if (!np)
		return ERR_PTR(-ENOENT);

4641
	return __of_clk_get(np, 0, np->full_name, name);
4642 4643 4644
}
EXPORT_SYMBOL(of_clk_get_by_name);

4645 4646 4647 4648 4649 4650 4651
/**
 * of_clk_get_parent_count() - Count the number of clocks a device node has
 * @np: device node to count
 *
 * Returns: The number of clocks that are possible parents of this node
 */
unsigned int of_clk_get_parent_count(struct device_node *np)
4652
{
4653 4654 4655 4656 4657 4658 4659
	int count;

	count = of_count_phandle_with_args(np, "clocks", "#clock-cells");
	if (count < 0)
		return 0;

	return count;
4660 4661 4662
}
EXPORT_SYMBOL_GPL(of_clk_get_parent_count);

G
Grant Likely 已提交
4663 4664 4665
const char *of_clk_get_parent_name(struct device_node *np, int index)
{
	struct of_phandle_args clkspec;
B
Ben Dooks 已提交
4666
	struct property *prop;
G
Grant Likely 已提交
4667
	const char *clk_name;
B
Ben Dooks 已提交
4668 4669
	const __be32 *vp;
	u32 pv;
G
Grant Likely 已提交
4670
	int rc;
B
Ben Dooks 已提交
4671
	int count;
4672
	struct clk *clk;
G
Grant Likely 已提交
4673 4674 4675 4676 4677 4678

	rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
					&clkspec);
	if (rc)
		return NULL;

B
Ben Dooks 已提交
4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691
	index = clkspec.args_count ? clkspec.args[0] : 0;
	count = 0;

	/* if there is an indices property, use it to transfer the index
	 * specified into an array offset for the clock-output-names property.
	 */
	of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) {
		if (index == pv) {
			index = count;
			break;
		}
		count++;
	}
4692 4693 4694
	/* We went off the end of 'clock-indices' without finding it */
	if (prop && !vp)
		return NULL;
B
Ben Dooks 已提交
4695

G
Grant Likely 已提交
4696
	if (of_property_read_string_index(clkspec.np, "clock-output-names",
B
Ben Dooks 已提交
4697
					  index,
4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716
					  &clk_name) < 0) {
		/*
		 * Best effort to get the name if the clock has been
		 * registered with the framework. If the clock isn't
		 * registered, we return the node name as the name of
		 * the clock as long as #clock-cells = 0.
		 */
		clk = of_clk_get_from_provider(&clkspec);
		if (IS_ERR(clk)) {
			if (clkspec.args_count == 0)
				clk_name = clkspec.np->name;
			else
				clk_name = NULL;
		} else {
			clk_name = __clk_get_name(clk);
			clk_put(clk);
		}
	}

G
Grant Likely 已提交
4717 4718 4719 4720 4721 4722

	of_node_put(clkspec.np);
	return clk_name;
}
EXPORT_SYMBOL_GPL(of_clk_get_parent_name);

4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743
/**
 * of_clk_parent_fill() - Fill @parents with names of @np's parents and return
 * number of parents
 * @np: Device node pointer associated with clock provider
 * @parents: pointer to char array that hold the parents' names
 * @size: size of the @parents array
 *
 * Return: number of parents for the clock node.
 */
int of_clk_parent_fill(struct device_node *np, const char **parents,
		       unsigned int size)
{
	unsigned int i = 0;

	while (i < size && (parents[i] = of_clk_get_parent_name(np, i)) != NULL)
		i++;

	return i;
}
EXPORT_SYMBOL_GPL(of_clk_parent_fill);

4744
struct clock_provider {
4745
	void (*clk_init_cb)(struct device_node *);
4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784
	struct device_node *np;
	struct list_head node;
};

/*
 * This function looks for a parent clock. If there is one, then it
 * checks that the provider for this parent clock was initialized, in
 * this case the parent clock will be ready.
 */
static int parent_ready(struct device_node *np)
{
	int i = 0;

	while (true) {
		struct clk *clk = of_clk_get(np, i);

		/* this parent is ready we can check the next one */
		if (!IS_ERR(clk)) {
			clk_put(clk);
			i++;
			continue;
		}

		/* at least one parent is not ready, we exit now */
		if (PTR_ERR(clk) == -EPROBE_DEFER)
			return 0;

		/*
		 * Here we make assumption that the device tree is
		 * written correctly. So an error means that there is
		 * no more parent. As we didn't exit yet, then the
		 * previous parent are ready. If there is no clock
		 * parent, no need to wait for them, then we can
		 * consider their absence as being ready
		 */
		return 1;
	}
}

4785 4786 4787 4788
/**
 * of_clk_detect_critical() - set CLK_IS_CRITICAL flag from Device Tree
 * @np: Device node pointer associated with clock provider
 * @index: clock index
4789
 * @flags: pointer to top-level framework flags
4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819
 *
 * Detects if the clock-critical property exists and, if so, sets the
 * corresponding CLK_IS_CRITICAL flag.
 *
 * Do not use this function. It exists only for legacy Device Tree
 * bindings, such as the one-clock-per-node style that are outdated.
 * Those bindings typically put all clock data into .dts and the Linux
 * driver has no clock data, thus making it impossible to set this flag
 * correctly from the driver. Only those drivers may call
 * of_clk_detect_critical from their setup functions.
 *
 * Return: error code or zero on success
 */
int of_clk_detect_critical(struct device_node *np,
					  int index, unsigned long *flags)
{
	struct property *prop;
	const __be32 *cur;
	uint32_t idx;

	if (!np || !flags)
		return -EINVAL;

	of_property_for_each_u32(np, "clock-critical", prop, cur, idx)
		if (index == idx)
			*flags |= CLK_IS_CRITICAL;

	return 0;
}

G
Grant Likely 已提交
4820 4821 4822 4823
/**
 * of_clk_init() - Scan and init clock providers from the DT
 * @matches: array of compatible values and init functions for providers.
 *
4824
 * This function scans the device tree for matching clock providers
4825
 * and calls their initialization functions. It also does it by trying
4826
 * to follow the dependencies.
G
Grant Likely 已提交
4827 4828 4829
 */
void __init of_clk_init(const struct of_device_id *matches)
{
4830
	const struct of_device_id *match;
G
Grant Likely 已提交
4831
	struct device_node *np;
4832 4833 4834
	struct clock_provider *clk_provider, *next;
	bool is_init_done;
	bool force = false;
4835
	LIST_HEAD(clk_provider_list);
G
Grant Likely 已提交
4836

4837
	if (!matches)
4838
		matches = &__clk_of_table;
4839

4840
	/* First prepare the list of the clocks providers */
4841
	for_each_matching_node_and_match(np, matches, &match) {
4842 4843
		struct clock_provider *parent;

4844 4845 4846
		if (!of_device_is_available(np))
			continue;

4847 4848 4849 4850 4851
		parent = kzalloc(sizeof(*parent), GFP_KERNEL);
		if (!parent) {
			list_for_each_entry_safe(clk_provider, next,
						 &clk_provider_list, node) {
				list_del(&clk_provider->node);
J
Julia Lawall 已提交
4852
				of_node_put(clk_provider->np);
4853 4854
				kfree(clk_provider);
			}
J
Julia Lawall 已提交
4855
			of_node_put(np);
4856 4857
			return;
		}
4858 4859

		parent->clk_init_cb = match->data;
J
Julia Lawall 已提交
4860
		parent->np = of_node_get(np);
4861
		list_add_tail(&parent->node, &clk_provider_list);
4862 4863 4864 4865 4866 4867 4868
	}

	while (!list_empty(&clk_provider_list)) {
		is_init_done = false;
		list_for_each_entry_safe(clk_provider, next,
					&clk_provider_list, node) {
			if (force || parent_ready(clk_provider->np)) {
4869

4870 4871 4872 4873
				/* Don't populate platform devices */
				of_node_set_flag(clk_provider->np,
						 OF_POPULATED);

4874
				clk_provider->clk_init_cb(clk_provider->np);
4875 4876
				of_clk_set_defaults(clk_provider->np, true);

4877
				list_del(&clk_provider->node);
J
Julia Lawall 已提交
4878
				of_node_put(clk_provider->np);
4879 4880 4881 4882 4883 4884
				kfree(clk_provider);
				is_init_done = true;
			}
		}

		/*
4885
		 * We didn't manage to initialize any of the
4886 4887 4888 4889 4890 4891
		 * remaining providers during the last loop, so now we
		 * initialize all the remaining ones unconditionally
		 * in case the clock parent was not mandatory
		 */
		if (!is_init_done)
			force = true;
G
Grant Likely 已提交
4892 4893 4894
	}
}
#endif