clk.c 75.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
/*
 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * Standard functionality for the common clock API.  See Documentation/clk.txt
 */

M
Michael Turquette 已提交
12
#include <linux/clk-provider.h>
13
#include <linux/clk/clk-conf.h>
14 15 16 17 18 19
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/err.h>
#include <linux/list.h>
#include <linux/slab.h>
G
Grant Likely 已提交
20
#include <linux/of.h>
21
#include <linux/device.h>
22
#include <linux/init.h>
23
#include <linux/sched.h>
24

25 26
#include "clk.h"

27 28 29
static DEFINE_SPINLOCK(enable_lock);
static DEFINE_MUTEX(prepare_lock);

30 31 32 33 34 35
static struct task_struct *prepare_owner;
static struct task_struct *enable_owner;

static int prepare_refcnt;
static int enable_refcnt;

36 37 38 39
static HLIST_HEAD(clk_root_list);
static HLIST_HEAD(clk_orphan_list);
static LIST_HEAD(clk_notifier_list);

M
Michael Turquette 已提交
40 41 42 43 44 45 46 47 48 49 50 51 52
/***    private data structures    ***/

struct clk_core {
	const char		*name;
	const struct clk_ops	*ops;
	struct clk_hw		*hw;
	struct module		*owner;
	struct clk_core		*parent;
	const char		**parent_names;
	struct clk_core		**parents;
	u8			num_parents;
	u8			new_parent_index;
	unsigned long		rate;
53
	unsigned long		req_rate;
M
Michael Turquette 已提交
54 55 56 57 58 59 60 61 62 63 64
	unsigned long		new_rate;
	struct clk_core		*new_parent;
	struct clk_core		*new_child;
	unsigned long		flags;
	unsigned int		enable_count;
	unsigned int		prepare_count;
	unsigned long		accuracy;
	int			phase;
	struct hlist_head	children;
	struct hlist_node	child_node;
	struct hlist_node	debug_node;
65
	struct hlist_head	clks;
M
Michael Turquette 已提交
66 67 68 69 70 71 72
	unsigned int		notifier_count;
#ifdef CONFIG_DEBUG_FS
	struct dentry		*dentry;
#endif
	struct kref		ref;
};

73 74 75
#define CREATE_TRACE_POINTS
#include <trace/events/clk.h>

M
Michael Turquette 已提交
76 77 78 79
struct clk {
	struct clk_core	*core;
	const char *dev_id;
	const char *con_id;
80 81
	unsigned long min_rate;
	unsigned long max_rate;
82
	struct hlist_node clks_node;
M
Michael Turquette 已提交
83 84
};

85 86 87
/***           locking             ***/
static void clk_prepare_lock(void)
{
88 89 90 91 92 93 94 95 96 97 98
	if (!mutex_trylock(&prepare_lock)) {
		if (prepare_owner == current) {
			prepare_refcnt++;
			return;
		}
		mutex_lock(&prepare_lock);
	}
	WARN_ON_ONCE(prepare_owner != NULL);
	WARN_ON_ONCE(prepare_refcnt != 0);
	prepare_owner = current;
	prepare_refcnt = 1;
99 100 101 102
}

static void clk_prepare_unlock(void)
{
103 104 105 106 107 108
	WARN_ON_ONCE(prepare_owner != current);
	WARN_ON_ONCE(prepare_refcnt == 0);

	if (--prepare_refcnt)
		return;
	prepare_owner = NULL;
109 110 111 112 113 114
	mutex_unlock(&prepare_lock);
}

static unsigned long clk_enable_lock(void)
{
	unsigned long flags;
115 116 117 118 119 120 121 122 123 124 125 126

	if (!spin_trylock_irqsave(&enable_lock, flags)) {
		if (enable_owner == current) {
			enable_refcnt++;
			return flags;
		}
		spin_lock_irqsave(&enable_lock, flags);
	}
	WARN_ON_ONCE(enable_owner != NULL);
	WARN_ON_ONCE(enable_refcnt != 0);
	enable_owner = current;
	enable_refcnt = 1;
127 128 129 130 131
	return flags;
}

static void clk_enable_unlock(unsigned long flags)
{
132 133 134 135 136 137
	WARN_ON_ONCE(enable_owner != current);
	WARN_ON_ONCE(enable_refcnt == 0);

	if (--enable_refcnt)
		return;
	enable_owner = NULL;
138 139 140
	spin_unlock_irqrestore(&enable_lock, flags);
}

141 142 143 144 145 146 147 148
static bool clk_core_is_prepared(struct clk_core *core)
{
	/*
	 * .is_prepared is optional for clocks that can prepare
	 * fall back to software usage counter if it is missing
	 */
	if (!core->ops->is_prepared)
		return core->prepare_count;
149

150 151
	return core->ops->is_prepared(core->hw);
}
152

153 154 155 156 157 158 159 160
static bool clk_core_is_enabled(struct clk_core *core)
{
	/*
	 * .is_enabled is only mandatory for clocks that gate
	 * fall back to software usage counter if .is_enabled is missing
	 */
	if (!core->ops->is_enabled)
		return core->enable_count;
S
Sachin Kamat 已提交
161

162 163
	return core->ops->is_enabled(core->hw);
}
S
Sachin Kamat 已提交
164

165
static void clk_unprepare_unused_subtree(struct clk_core *core)
166
{
167 168 169 170 171 172 173 174
	struct clk_core *child;

	lockdep_assert_held(&prepare_lock);

	hlist_for_each_entry(child, &core->children, child_node)
		clk_unprepare_unused_subtree(child);

	if (core->prepare_count)
175 176
		return;

177 178 179 180 181 182 183 184 185 186 187
	if (core->flags & CLK_IGNORE_UNUSED)
		return;

	if (clk_core_is_prepared(core)) {
		trace_clk_unprepare(core);
		if (core->ops->unprepare_unused)
			core->ops->unprepare_unused(core->hw);
		else if (core->ops->unprepare)
			core->ops->unprepare(core->hw);
		trace_clk_unprepare_complete(core);
	}
188 189
}

190
static void clk_disable_unused_subtree(struct clk_core *core)
191
{
192
	struct clk_core *child;
193
	unsigned long flags;
194

195
	lockdep_assert_held(&prepare_lock);
196

197 198
	hlist_for_each_entry(child, &core->children, child_node)
		clk_disable_unused_subtree(child);
199

200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
	flags = clk_enable_lock();

	if (core->enable_count)
		goto unlock_out;

	if (core->flags & CLK_IGNORE_UNUSED)
		goto unlock_out;

	/*
	 * some gate clocks have special needs during the disable-unused
	 * sequence.  call .disable_unused if available, otherwise fall
	 * back to .disable
	 */
	if (clk_core_is_enabled(core)) {
		trace_clk_disable(core);
		if (core->ops->disable_unused)
			core->ops->disable_unused(core->hw);
		else if (core->ops->disable)
			core->ops->disable(core->hw);
		trace_clk_disable_complete(core);
	}

unlock_out:
	clk_enable_unlock(flags);
224 225
}

226 227
static bool clk_ignore_unused;
static int __init clk_ignore_unused_setup(char *__unused)
228
{
229 230 231 232
	clk_ignore_unused = true;
	return 1;
}
__setup("clk_ignore_unused", clk_ignore_unused_setup);
233

234 235 236 237 238 239 240 241
static int clk_disable_unused(void)
{
	struct clk_core *core;

	if (clk_ignore_unused) {
		pr_warn("clk: Not disabling unused clocks\n");
		return 0;
	}
242

243
	clk_prepare_lock();
244

245 246 247 248 249 250 251 252 253 254 255
	hlist_for_each_entry(core, &clk_root_list, child_node)
		clk_disable_unused_subtree(core);

	hlist_for_each_entry(core, &clk_orphan_list, child_node)
		clk_disable_unused_subtree(core);

	hlist_for_each_entry(core, &clk_root_list, child_node)
		clk_unprepare_unused_subtree(core);

	hlist_for_each_entry(core, &clk_orphan_list, child_node)
		clk_unprepare_unused_subtree(core);
256

257
	clk_prepare_unlock();
258 259 260

	return 0;
}
261
late_initcall_sync(clk_disable_unused);
262

263
/***    helper functions   ***/
264

265
const char *__clk_get_name(struct clk *clk)
266
{
267
	return !clk ? NULL : clk->core->name;
268
}
269
EXPORT_SYMBOL_GPL(__clk_get_name);
270

271 272 273 274 275
struct clk_hw *__clk_get_hw(struct clk *clk)
{
	return !clk ? NULL : clk->core->hw;
}
EXPORT_SYMBOL_GPL(__clk_get_hw);
276

277
u8 __clk_get_num_parents(struct clk *clk)
278
{
279 280 281
	return !clk ? 0 : clk->core->num_parents;
}
EXPORT_SYMBOL_GPL(__clk_get_num_parents);
282

283 284 285 286 287 288 289
struct clk *__clk_get_parent(struct clk *clk)
{
	if (!clk)
		return NULL;

	/* TODO: Create a per-user clk and change callers to call clk_put */
	return !clk->core->parent ? NULL : clk->core->parent->hw->clk;
290
}
291
EXPORT_SYMBOL_GPL(__clk_get_parent);
292

293 294
static struct clk_core *__clk_lookup_subtree(const char *name,
					     struct clk_core *core)
295
{
296
	struct clk_core *child;
297
	struct clk_core *ret;
298

299 300
	if (!strcmp(core->name, name))
		return core;
301

302 303 304 305
	hlist_for_each_entry(child, &core->children, child_node) {
		ret = __clk_lookup_subtree(name, child);
		if (ret)
			return ret;
306 307
	}

308
	return NULL;
309 310
}

311
static struct clk_core *clk_core_lookup(const char *name)
312
{
313 314
	struct clk_core *root_clk;
	struct clk_core *ret;
315

316 317
	if (!name)
		return NULL;
318

319 320 321 322 323
	/* search the 'proper' clk tree first */
	hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
		ret = __clk_lookup_subtree(name, root_clk);
		if (ret)
			return ret;
324 325
	}

326 327 328 329 330 331
	/* if not found, then search the orphan tree */
	hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
		ret = __clk_lookup_subtree(name, root_clk);
		if (ret)
			return ret;
	}
332

333
	return NULL;
334 335
}

336 337
static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core,
							 u8 index)
338
{
339 340 341 342 343 344 345 346 347
	if (!core || index >= core->num_parents)
		return NULL;
	else if (!core->parents)
		return clk_core_lookup(core->parent_names[index]);
	else if (!core->parents[index])
		return core->parents[index] =
			clk_core_lookup(core->parent_names[index]);
	else
		return core->parents[index];
348 349
}

350
struct clk *clk_get_parent_by_index(struct clk *clk, u8 index)
351
{
352
	struct clk_core *parent;
353

354 355
	if (!clk)
		return NULL;
356

357
	parent = clk_core_get_parent_by_index(clk->core, index);
358

359 360 361
	return !parent ? NULL : parent->hw->clk;
}
EXPORT_SYMBOL_GPL(clk_get_parent_by_index);
362

363 364 365 366
unsigned int __clk_get_enable_count(struct clk *clk)
{
	return !clk ? 0 : clk->core->enable_count;
}
367

368 369 370
static unsigned long clk_core_get_rate_nolock(struct clk_core *core)
{
	unsigned long ret;
371

372 373 374 375
	if (!core) {
		ret = 0;
		goto out;
	}
376

377
	ret = core->rate;
378

379 380
	if (core->flags & CLK_IS_ROOT)
		goto out;
381

382 383
	if (!core->parent)
		ret = 0;
384 385 386 387 388

out:
	return ret;
}

389
unsigned long __clk_get_rate(struct clk *clk)
390
{
391 392
	if (!clk)
		return 0;
393

394 395 396
	return clk_core_get_rate_nolock(clk->core);
}
EXPORT_SYMBOL_GPL(__clk_get_rate);
397

398 399 400 401
static unsigned long __clk_get_accuracy(struct clk_core *core)
{
	if (!core)
		return 0;
402

403
	return core->accuracy;
404 405
}

406
unsigned long __clk_get_flags(struct clk *clk)
S
Sylwester Nawrocki 已提交
407
{
408
	return !clk ? 0 : clk->core->flags;
S
Sylwester Nawrocki 已提交
409
}
410
EXPORT_SYMBOL_GPL(__clk_get_flags);
S
Sylwester Nawrocki 已提交
411

412
bool __clk_is_prepared(struct clk *clk)
413
{
414 415
	if (!clk)
		return false;
416

417
	return clk_core_is_prepared(clk->core);
418 419
}

420
bool __clk_is_enabled(struct clk *clk)
421
{
422 423
	if (!clk)
		return false;
424

425 426 427
	return clk_core_is_enabled(clk->core);
}
EXPORT_SYMBOL_GPL(__clk_is_enabled);
428

429 430 431 432 433
static bool mux_is_better_rate(unsigned long rate, unsigned long now,
			   unsigned long best, unsigned long flags)
{
	if (flags & CLK_MUX_ROUND_CLOSEST)
		return abs(now - rate) < abs(best - rate);
434

435 436
	return now <= rate && now > best;
}
437

438 439 440 441 442 443 444 445 446 447 448
static long
clk_mux_determine_rate_flags(struct clk_hw *hw, unsigned long rate,
			     unsigned long min_rate,
			     unsigned long max_rate,
			     unsigned long *best_parent_rate,
			     struct clk_hw **best_parent_p,
			     unsigned long flags)
{
	struct clk_core *core = hw->core, *parent, *best_parent = NULL;
	int i, num_parents;
	unsigned long parent_rate, best = 0;
449

450 451 452 453 454 455 456 457 458 459 460 461
	/* if NO_REPARENT flag set, pass through to current parent */
	if (core->flags & CLK_SET_RATE_NO_REPARENT) {
		parent = core->parent;
		if (core->flags & CLK_SET_RATE_PARENT)
			best = __clk_determine_rate(parent ? parent->hw : NULL,
						    rate, min_rate, max_rate);
		else if (parent)
			best = clk_core_get_rate_nolock(parent);
		else
			best = clk_core_get_rate_nolock(core);
		goto out;
	}
462

463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479
	/* find the parent that can provide the fastest rate <= rate */
	num_parents = core->num_parents;
	for (i = 0; i < num_parents; i++) {
		parent = clk_core_get_parent_by_index(core, i);
		if (!parent)
			continue;
		if (core->flags & CLK_SET_RATE_PARENT)
			parent_rate = __clk_determine_rate(parent->hw, rate,
							   min_rate,
							   max_rate);
		else
			parent_rate = clk_core_get_rate_nolock(parent);
		if (mux_is_better_rate(rate, parent_rate, best, flags)) {
			best_parent = parent;
			best = parent_rate;
		}
	}
480

481 482 483 484
out:
	if (best_parent)
		*best_parent_p = best_parent->hw;
	*best_parent_rate = best;
485

486
	return best;
487
}
488 489

struct clk *__clk_lookup(const char *name)
S
Sylwester Nawrocki 已提交
490
{
491 492 493
	struct clk_core *core = clk_core_lookup(name);

	return !core ? NULL : core->hw->clk;
S
Sylwester Nawrocki 已提交
494
}
495

496 497 498
static void clk_core_get_boundaries(struct clk_core *core,
				    unsigned long *min_rate,
				    unsigned long *max_rate)
499
{
500
	struct clk *clk_user;
501

502 503
	*min_rate = 0;
	*max_rate = ULONG_MAX;
504

505 506
	hlist_for_each_entry(clk_user, &core->clks, clks_node)
		*min_rate = max(*min_rate, clk_user->min_rate);
507

508 509 510
	hlist_for_each_entry(clk_user, &core->clks, clks_node)
		*max_rate = min(*max_rate, clk_user->max_rate);
}
511

512 513 514 515 516 517 518 519 520 521 522 523 524 525
/*
 * Helper for finding best parent to provide a given frequency. This can be used
 * directly as a determine_rate callback (e.g. for a mux), or from a more
 * complex clock that may combine a mux with other operations.
 */
long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
			      unsigned long min_rate,
			      unsigned long max_rate,
			      unsigned long *best_parent_rate,
			      struct clk_hw **best_parent_p)
{
	return clk_mux_determine_rate_flags(hw, rate, min_rate, max_rate,
					    best_parent_rate,
					    best_parent_p, 0);
526
}
527
EXPORT_SYMBOL_GPL(__clk_mux_determine_rate);
528

529 530 531 532 533
long __clk_mux_determine_rate_closest(struct clk_hw *hw, unsigned long rate,
			      unsigned long min_rate,
			      unsigned long max_rate,
			      unsigned long *best_parent_rate,
			      struct clk_hw **best_parent_p)
534
{
535 536 537 538 539 540
	return clk_mux_determine_rate_flags(hw, rate, min_rate, max_rate,
					    best_parent_rate,
					    best_parent_p,
					    CLK_MUX_ROUND_CLOSEST);
}
EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest);
541

542
/***        clk api        ***/
543

544 545 546 547
static void clk_core_unprepare(struct clk_core *core)
{
	if (!core)
		return;
548

549 550
	if (WARN_ON(core->prepare_count == 0))
		return;
551

552 553
	if (--core->prepare_count > 0)
		return;
554

555
	WARN_ON(core->enable_count > 0);
556

557
	trace_clk_unprepare(core);
558

559 560 561 562 563
	if (core->ops->unprepare)
		core->ops->unprepare(core->hw);

	trace_clk_unprepare_complete(core);
	clk_core_unprepare(core->parent);
564 565
}

566 567 568 569 570 571 572 573 574 575 576 577
/**
 * clk_unprepare - undo preparation of a clock source
 * @clk: the clk being unprepared
 *
 * clk_unprepare may sleep, which differentiates it from clk_disable.  In a
 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
 * if the operation may sleep.  One example is a clk which is accessed over
 * I2c.  In the complex case a clk gate operation may require a fast and a slow
 * part.  It is this reason that clk_unprepare and clk_disable are not mutually
 * exclusive.  In fact clk_disable must be called before clk_unprepare.
 */
void clk_unprepare(struct clk *clk)
578
{
579 580 581 582 583 584
	if (IS_ERR_OR_NULL(clk))
		return;

	clk_prepare_lock();
	clk_core_unprepare(clk->core);
	clk_prepare_unlock();
585
}
586
EXPORT_SYMBOL_GPL(clk_unprepare);
587

588
static int clk_core_prepare(struct clk_core *core)
589
{
590
	int ret = 0;
591

592
	if (!core)
593 594
		return 0;

595 596 597 598
	if (core->prepare_count == 0) {
		ret = clk_core_prepare(core->parent);
		if (ret)
			return ret;
599

600
		trace_clk_prepare(core);
601

602 603
		if (core->ops->prepare)
			ret = core->ops->prepare(core->hw);
604

605
		trace_clk_prepare_complete(core);
606

607 608 609 610 611
		if (ret) {
			clk_core_unprepare(core->parent);
			return ret;
		}
	}
612

613
	core->prepare_count++;
614 615 616 617

	return 0;
}

618 619 620 621 622 623 624 625 626 627 628 629 630
/**
 * clk_prepare - prepare a clock source
 * @clk: the clk being prepared
 *
 * clk_prepare may sleep, which differentiates it from clk_enable.  In a simple
 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
 * operation may sleep.  One example is a clk which is accessed over I2c.  In
 * the complex case a clk ungate operation may require a fast and a slow part.
 * It is this reason that clk_prepare and clk_enable are not mutually
 * exclusive.  In fact clk_prepare must be called before clk_enable.
 * Returns 0 on success, -EERROR otherwise.
 */
int clk_prepare(struct clk *clk)
631
{
632
	int ret;
633

634 635
	if (!clk)
		return 0;
636

637 638 639 640 641
	clk_prepare_lock();
	ret = clk_core_prepare(clk->core);
	clk_prepare_unlock();

	return ret;
642
}
643
EXPORT_SYMBOL_GPL(clk_prepare);
644

645
static void clk_core_disable(struct clk_core *core)
646
{
647 648
	if (!core)
		return;
649

650 651
	if (WARN_ON(core->enable_count == 0))
		return;
652

653 654
	if (--core->enable_count > 0)
		return;
655

656
	trace_clk_disable(core);
657

658 659
	if (core->ops->disable)
		core->ops->disable(core->hw);
660

661
	trace_clk_disable_complete(core);
662

663
	clk_core_disable(core->parent);
664
}
J
James Hogan 已提交
665

666 667 668 669 670 671 672 673 674 675 676 677 678
/**
 * clk_disable - gate a clock
 * @clk: the clk being gated
 *
 * clk_disable must not sleep, which differentiates it from clk_unprepare.  In
 * a simple case, clk_disable can be used instead of clk_unprepare to gate a
 * clk if the operation is fast and will never sleep.  One example is a
 * SoC-internal clk which is controlled via simple register writes.  In the
 * complex case a clk gate operation may require a fast and a slow part.  It is
 * this reason that clk_unprepare and clk_disable are not mutually exclusive.
 * In fact clk_disable must be called before clk_unprepare.
 */
void clk_disable(struct clk *clk)
679
{
680 681 682 683 684 685 686 687
	unsigned long flags;

	if (IS_ERR_OR_NULL(clk))
		return;

	flags = clk_enable_lock();
	clk_core_disable(clk->core);
	clk_enable_unlock(flags);
688
}
689
EXPORT_SYMBOL_GPL(clk_disable);
690

691
static int clk_core_enable(struct clk_core *core)
692
{
693
	int ret = 0;
694

695 696
	if (!core)
		return 0;
697

698 699
	if (WARN_ON(core->prepare_count == 0))
		return -ESHUTDOWN;
700

701 702
	if (core->enable_count == 0) {
		ret = clk_core_enable(core->parent);
703

704 705
		if (ret)
			return ret;
706

707
		trace_clk_enable(core);
708

709 710
		if (core->ops->enable)
			ret = core->ops->enable(core->hw);
711

712 713 714 715 716 717 718 719 720 721
		trace_clk_enable_complete(core);

		if (ret) {
			clk_core_disable(core->parent);
			return ret;
		}
	}

	core->enable_count++;
	return 0;
722
}
723

724 725 726 727 728 729 730 731 732 733 734 735 736 737
/**
 * clk_enable - ungate a clock
 * @clk: the clk being ungated
 *
 * clk_enable must not sleep, which differentiates it from clk_prepare.  In a
 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
 * if the operation will never sleep.  One example is a SoC-internal clk which
 * is controlled via simple register writes.  In the complex case a clk ungate
 * operation may require a fast and a slow part.  It is this reason that
 * clk_enable and clk_prepare are not mutually exclusive.  In fact clk_prepare
 * must be called before clk_enable.  Returns 0 on success, -EERROR
 * otherwise.
 */
int clk_enable(struct clk *clk)
738
{
739 740 741 742
	unsigned long flags;
	int ret;

	if (!clk)
743 744
		return 0;

745 746 747
	flags = clk_enable_lock();
	ret = clk_core_enable(clk->core);
	clk_enable_unlock(flags);
748

749
	return ret;
750
}
751
EXPORT_SYMBOL_GPL(clk_enable);
752

753 754 755 756
static unsigned long clk_core_round_rate_nolock(struct clk_core *core,
						unsigned long rate,
						unsigned long min_rate,
						unsigned long max_rate)
757
{
758 759 760 761 762
	unsigned long parent_rate = 0;
	struct clk_core *parent;
	struct clk_hw *parent_hw;

	lockdep_assert_held(&prepare_lock);
763

764
	if (!core)
765
		return 0;
766

767 768 769
	parent = core->parent;
	if (parent)
		parent_rate = parent->rate;
770

771 772 773 774 775 776 777 778 779 780 781 782
	if (core->ops->determine_rate) {
		parent_hw = parent ? parent->hw : NULL;
		return core->ops->determine_rate(core->hw, rate,
						min_rate, max_rate,
						&parent_rate, &parent_hw);
	} else if (core->ops->round_rate)
		return core->ops->round_rate(core->hw, rate, &parent_rate);
	else if (core->flags & CLK_SET_RATE_PARENT)
		return clk_core_round_rate_nolock(core->parent, rate, min_rate,
						  max_rate);
	else
		return core->rate;
783 784
}

785 786 787 788 789 790 791
/**
 * __clk_determine_rate - get the closest rate actually supported by a clock
 * @hw: determine the rate of this clock
 * @rate: target rate
 * @min_rate: returned rate must be greater than this rate
 * @max_rate: returned rate must be less than this rate
 *
792
 * Useful for clk_ops such as .set_rate and .determine_rate.
793 794 795 796 797
 */
unsigned long __clk_determine_rate(struct clk_hw *hw,
				   unsigned long rate,
				   unsigned long min_rate,
				   unsigned long max_rate)
798
{
799 800
	if (!hw)
		return 0;
801

802
	return clk_core_round_rate_nolock(hw->core, rate, min_rate, max_rate);
803
}
804
EXPORT_SYMBOL_GPL(__clk_determine_rate);
805

806 807 808 809 810
/**
 * __clk_round_rate - round the given rate for a clk
 * @clk: round the rate of this clock
 * @rate: the rate which is to be rounded
 *
811
 * Useful for clk_ops such as .set_rate
812 813
 */
unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
814
{
815 816
	unsigned long min_rate;
	unsigned long max_rate;
817

818 819
	if (!clk)
		return 0;
820

821
	clk_core_get_boundaries(clk->core, &min_rate, &max_rate);
822

823
	return clk_core_round_rate_nolock(clk->core, rate, min_rate, max_rate);
824
}
825
EXPORT_SYMBOL_GPL(__clk_round_rate);
826

827 828 829 830 831 832 833 834 835 836
/**
 * clk_round_rate - round the given rate for a clk
 * @clk: the clk for which we are rounding a rate
 * @rate: the rate which is to be rounded
 *
 * Takes in a rate as input and rounds it to a rate that the clk can actually
 * use which is then returned.  If clk doesn't support round_rate operation
 * then the parent rate is returned.
 */
long clk_round_rate(struct clk *clk, unsigned long rate)
837
{
838 839
	unsigned long ret;

840
	if (!clk)
841
		return 0;
842

843 844 845 846 847
	clk_prepare_lock();
	ret = __clk_round_rate(clk, rate);
	clk_prepare_unlock();

	return ret;
848
}
849
EXPORT_SYMBOL_GPL(clk_round_rate);
850

851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866
/**
 * __clk_notify - call clk notifier chain
 * @core: clk that is changing rate
 * @msg: clk notifier type (see include/linux/clk.h)
 * @old_rate: old clk rate
 * @new_rate: new clk rate
 *
 * Triggers a notifier call chain on the clk rate-change notification
 * for 'clk'.  Passes a pointer to the struct clk and the previous
 * and current rates to the notifier callback.  Intended to be called by
 * internal clock code only.  Returns NOTIFY_DONE from the last driver
 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
 * a driver returns that.
 */
static int __clk_notify(struct clk_core *core, unsigned long msg,
		unsigned long old_rate, unsigned long new_rate)
867
{
868 869 870
	struct clk_notifier *cn;
	struct clk_notifier_data cnd;
	int ret = NOTIFY_DONE;
871

872 873
	cnd.old_rate = old_rate;
	cnd.new_rate = new_rate;
874

875 876 877 878 879 880
	list_for_each_entry(cn, &clk_notifier_list, node) {
		if (cn->clk->core == core) {
			cnd.clk = cn->clk;
			ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
					&cnd);
		}
881 882
	}

883
	return ret;
884 885
}

886 887 888 889 890 891
/**
 * __clk_recalc_accuracies
 * @core: first clk in the subtree
 *
 * Walks the subtree of clks starting with clk and recalculates accuracies as
 * it goes.  Note that if a clk does not implement the .recalc_accuracy
892
 * callback then it is assumed that the clock will take on the accuracy of its
893 894 895
 * parent.
 */
static void __clk_recalc_accuracies(struct clk_core *core)
896
{
897 898
	unsigned long parent_accuracy = 0;
	struct clk_core *child;
899

900
	lockdep_assert_held(&prepare_lock);
901

902 903
	if (core->parent)
		parent_accuracy = core->parent->accuracy;
904

905 906 907 908 909
	if (core->ops->recalc_accuracy)
		core->accuracy = core->ops->recalc_accuracy(core->hw,
							  parent_accuracy);
	else
		core->accuracy = parent_accuracy;
910

911 912
	hlist_for_each_entry(child, &core->children, child_node)
		__clk_recalc_accuracies(child);
913 914
}

915
static long clk_core_get_accuracy(struct clk_core *core)
916
{
917
	unsigned long accuracy;
918

919 920 921
	clk_prepare_lock();
	if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE))
		__clk_recalc_accuracies(core);
922

923 924
	accuracy = __clk_get_accuracy(core);
	clk_prepare_unlock();
925

926
	return accuracy;
927
}
928

929 930 931 932 933 934 935 936 937 938
/**
 * clk_get_accuracy - return the accuracy of clk
 * @clk: the clk whose accuracy is being returned
 *
 * Simply returns the cached accuracy of the clk, unless
 * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be
 * issued.
 * If clk is NULL then returns 0.
 */
long clk_get_accuracy(struct clk *clk)
939
{
940 941
	if (!clk)
		return 0;
942

943
	return clk_core_get_accuracy(clk->core);
944
}
945
EXPORT_SYMBOL_GPL(clk_get_accuracy);
946

947 948
static unsigned long clk_recalc(struct clk_core *core,
				unsigned long parent_rate)
949
{
950 951 952
	if (core->ops->recalc_rate)
		return core->ops->recalc_rate(core->hw, parent_rate);
	return parent_rate;
953 954
}

955 956 957 958 959 960 961 962 963 964 965
/**
 * __clk_recalc_rates
 * @core: first clk in the subtree
 * @msg: notification type (see include/linux/clk.h)
 *
 * Walks the subtree of clks starting with clk and recalculates rates as it
 * goes.  Note that if a clk does not implement the .recalc_rate callback then
 * it is assumed that the clock will take on the rate of its parent.
 *
 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
 * if necessary.
966
 */
967
static void __clk_recalc_rates(struct clk_core *core, unsigned long msg)
968
{
969 970 971
	unsigned long old_rate;
	unsigned long parent_rate = 0;
	struct clk_core *child;
972

973
	lockdep_assert_held(&prepare_lock);
974

975
	old_rate = core->rate;
976

977 978
	if (core->parent)
		parent_rate = core->parent->rate;
979

980
	core->rate = clk_recalc(core, parent_rate);
981

982 983 984 985 986 987
	/*
	 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
	 * & ABORT_RATE_CHANGE notifiers
	 */
	if (core->notifier_count && msg)
		__clk_notify(core, msg, old_rate, core->rate);
988

989 990 991
	hlist_for_each_entry(child, &core->children, child_node)
		__clk_recalc_rates(child, msg);
}
992

993 994 995
static unsigned long clk_core_get_rate(struct clk_core *core)
{
	unsigned long rate;
996

997
	clk_prepare_lock();
998

999 1000 1001 1002 1003 1004 1005
	if (core && (core->flags & CLK_GET_RATE_NOCACHE))
		__clk_recalc_rates(core, 0);

	rate = clk_core_get_rate_nolock(core);
	clk_prepare_unlock();

	return rate;
1006 1007 1008
}

/**
1009 1010
 * clk_get_rate - return the rate of clk
 * @clk: the clk whose rate is being returned
1011
 *
1012 1013 1014
 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
 * is set, which means a recalc_rate will be issued.
 * If clk is NULL then returns 0.
1015
 */
1016
unsigned long clk_get_rate(struct clk *clk)
1017
{
1018 1019
	if (!clk)
		return 0;
1020

1021
	return clk_core_get_rate(clk->core);
1022
}
1023
EXPORT_SYMBOL_GPL(clk_get_rate);
1024

1025 1026
static int clk_fetch_parent_index(struct clk_core *core,
				  struct clk_core *parent)
1027
{
1028
	int i;
1029

1030 1031 1032 1033 1034 1035
	if (!core->parents) {
		core->parents = kcalloc(core->num_parents,
					sizeof(struct clk *), GFP_KERNEL);
		if (!core->parents)
			return -ENOMEM;
	}
1036

1037 1038 1039 1040 1041 1042 1043 1044
	/*
	 * find index of new parent clock using cached parent ptrs,
	 * or if not yet cached, use string name comparison and cache
	 * them now to avoid future calls to clk_core_lookup.
	 */
	for (i = 0; i < core->num_parents; i++) {
		if (core->parents[i] == parent)
			return i;
1045

1046 1047
		if (core->parents[i])
			continue;
1048

1049 1050 1051
		if (!strcmp(core->parent_names[i], parent->name)) {
			core->parents[i] = clk_core_lookup(parent->name);
			return i;
1052 1053 1054
		}
	}

1055
	return -EINVAL;
1056 1057
}

1058
static void clk_reparent(struct clk_core *core, struct clk_core *new_parent)
1059
{
1060
	hlist_del(&core->child_node);
1061

1062 1063 1064 1065
	if (new_parent) {
		/* avoid duplicate POST_RATE_CHANGE notifications */
		if (new_parent->new_child == core)
			new_parent->new_child = NULL;
1066

1067 1068 1069 1070
		hlist_add_head(&core->child_node, &new_parent->children);
	} else {
		hlist_add_head(&core->child_node, &clk_orphan_list);
	}
1071

1072
	core->parent = new_parent;
1073 1074
}

1075 1076
static struct clk_core *__clk_set_parent_before(struct clk_core *core,
					   struct clk_core *parent)
1077 1078
{
	unsigned long flags;
1079
	struct clk_core *old_parent = core->parent;
1080

1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102
	/*
	 * Migrate prepare state between parents and prevent race with
	 * clk_enable().
	 *
	 * If the clock is not prepared, then a race with
	 * clk_enable/disable() is impossible since we already have the
	 * prepare lock (future calls to clk_enable() need to be preceded by
	 * a clk_prepare()).
	 *
	 * If the clock is prepared, migrate the prepared state to the new
	 * parent and also protect against a race with clk_enable() by
	 * forcing the clock and the new parent on.  This ensures that all
	 * future calls to clk_enable() are practically NOPs with respect to
	 * hardware and software states.
	 *
	 * See also: Comment for clk_set_parent() below.
	 */
	if (core->prepare_count) {
		clk_core_prepare(parent);
		clk_core_enable(parent);
		clk_core_enable(core);
	}
1103

1104
	/* update the clk tree topology */
1105
	flags = clk_enable_lock();
1106
	clk_reparent(core, parent);
1107
	clk_enable_unlock(flags);
1108 1109

	return old_parent;
1110 1111
}

1112 1113 1114
static void __clk_set_parent_after(struct clk_core *core,
				   struct clk_core *parent,
				   struct clk_core *old_parent)
1115
{
1116 1117 1118 1119 1120 1121 1122 1123 1124 1125
	/*
	 * Finish the migration of prepare state and undo the changes done
	 * for preventing a race with clk_enable().
	 */
	if (core->prepare_count) {
		clk_core_disable(core);
		clk_core_disable(old_parent);
		clk_core_unprepare(old_parent);
	}
}
1126

1127 1128 1129 1130 1131 1132
static int __clk_set_parent(struct clk_core *core, struct clk_core *parent,
			    u8 p_index)
{
	unsigned long flags;
	int ret = 0;
	struct clk_core *old_parent;
1133

1134
	old_parent = __clk_set_parent_before(core, parent);
1135

1136
	trace_clk_set_parent(core, parent);
1137

1138 1139 1140
	/* change clock input source */
	if (parent && core->ops->set_parent)
		ret = core->ops->set_parent(core->hw, p_index);
1141

1142
	trace_clk_set_parent_complete(core, parent);
1143

1144 1145 1146 1147
	if (ret) {
		flags = clk_enable_lock();
		clk_reparent(core, old_parent);
		clk_enable_unlock(flags);
1148

1149 1150 1151 1152
		if (core->prepare_count) {
			clk_core_disable(core);
			clk_core_disable(parent);
			clk_core_unprepare(parent);
1153
		}
1154
		return ret;
1155 1156
	}

1157 1158
	__clk_set_parent_after(core, parent, old_parent);

1159 1160 1161 1162
	return 0;
}

/**
1163 1164 1165
 * __clk_speculate_rates
 * @core: first clk in the subtree
 * @parent_rate: the "future" rate of clk's parent
1166
 *
1167 1168 1169 1170 1171 1172 1173 1174
 * Walks the subtree of clks starting with clk, speculating rates as it
 * goes and firing off PRE_RATE_CHANGE notifications as necessary.
 *
 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
 * pre-rate change notifications and returns early if no clks in the
 * subtree have subscribed to the notifications.  Note that if a clk does not
 * implement the .recalc_rate callback then it is assumed that the clock will
 * take on the rate of its parent.
1175
 */
1176 1177
static int __clk_speculate_rates(struct clk_core *core,
				 unsigned long parent_rate)
1178
{
1179 1180 1181
	struct clk_core *child;
	unsigned long new_rate;
	int ret = NOTIFY_DONE;
1182

1183
	lockdep_assert_held(&prepare_lock);
1184

1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201
	new_rate = clk_recalc(core, parent_rate);

	/* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */
	if (core->notifier_count)
		ret = __clk_notify(core, PRE_RATE_CHANGE, core->rate, new_rate);

	if (ret & NOTIFY_STOP_MASK) {
		pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n",
				__func__, core->name, ret);
		goto out;
	}

	hlist_for_each_entry(child, &core->children, child_node) {
		ret = __clk_speculate_rates(child, new_rate);
		if (ret & NOTIFY_STOP_MASK)
			break;
	}
1202

1203
out:
1204 1205 1206
	return ret;
}

1207 1208
static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate,
			     struct clk_core *new_parent, u8 p_index)
1209
{
1210
	struct clk_core *child;
1211

1212 1213 1214 1215 1216 1217 1218
	core->new_rate = new_rate;
	core->new_parent = new_parent;
	core->new_parent_index = p_index;
	/* include clk in new parent's PRE_RATE_CHANGE notifications */
	core->new_child = NULL;
	if (new_parent && new_parent != core->parent)
		new_parent->new_child = core;
1219

1220 1221 1222 1223 1224
	hlist_for_each_entry(child, &core->children, child_node) {
		child->new_rate = clk_recalc(child, new_rate);
		clk_calc_subtree(child, child->new_rate, NULL, 0);
	}
}
1225

1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248
/*
 * calculate the new rates returning the topmost clock that has to be
 * changed.
 */
static struct clk_core *clk_calc_new_rates(struct clk_core *core,
					   unsigned long rate)
{
	struct clk_core *top = core;
	struct clk_core *old_parent, *parent;
	struct clk_hw *parent_hw;
	unsigned long best_parent_rate = 0;
	unsigned long new_rate;
	unsigned long min_rate;
	unsigned long max_rate;
	int p_index = 0;
	long ret;

	/* sanity */
	if (IS_ERR_OR_NULL(core))
		return NULL;

	/* save parent rate, if it exists */
	parent = old_parent = core->parent;
1249
	if (parent)
1250
		best_parent_rate = parent->rate;
1251

1252 1253 1254
	clk_core_get_boundaries(core, &min_rate, &max_rate);

	/* find the closest rate and parent clk/rate */
1255
	if (core->ops->determine_rate) {
1256
		parent_hw = parent ? parent->hw : NULL;
1257 1258 1259 1260 1261 1262 1263
		ret = core->ops->determine_rate(core->hw, rate,
					       min_rate,
					       max_rate,
					       &best_parent_rate,
					       &parent_hw);
		if (ret < 0)
			return NULL;
1264

1265 1266 1267 1268 1269 1270 1271
		new_rate = ret;
		parent = parent_hw ? parent_hw->core : NULL;
	} else if (core->ops->round_rate) {
		ret = core->ops->round_rate(core->hw, rate,
					   &best_parent_rate);
		if (ret < 0)
			return NULL;
1272

1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285
		new_rate = ret;
		if (new_rate < min_rate || new_rate > max_rate)
			return NULL;
	} else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) {
		/* pass-through clock without adjustable parent */
		core->new_rate = core->rate;
		return NULL;
	} else {
		/* pass-through clock with adjustable parent */
		top = clk_calc_new_rates(parent, rate);
		new_rate = parent->new_rate;
		goto out;
	}
1286

1287 1288 1289 1290 1291 1292 1293
	/* some clocks must be gated to change parent */
	if (parent != old_parent &&
	    (core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) {
		pr_debug("%s: %s not gated but wants to reparent\n",
			 __func__, core->name);
		return NULL;
	}
1294

1295 1296 1297 1298 1299 1300 1301 1302 1303
	/* try finding the new parent index */
	if (parent && core->num_parents > 1) {
		p_index = clk_fetch_parent_index(core, parent);
		if (p_index < 0) {
			pr_debug("%s: clk %s can not be parent of clk %s\n",
				 __func__, parent->name, core->name);
			return NULL;
		}
	}
1304

1305 1306 1307
	if ((core->flags & CLK_SET_RATE_PARENT) && parent &&
	    best_parent_rate != parent->rate)
		top = clk_calc_new_rates(parent, best_parent_rate);
1308

1309 1310
out:
	clk_calc_subtree(core, new_rate, parent, p_index);
1311

1312
	return top;
1313 1314
}

1315 1316 1317 1318
/*
 * Notify about rate changes in a subtree. Always walk down the whole tree
 * so that in case of an error we can walk down the whole tree again and
 * abort the change.
1319
 */
1320 1321
static struct clk_core *clk_propagate_rate_change(struct clk_core *core,
						  unsigned long event)
1322
{
1323
	struct clk_core *child, *tmp_clk, *fail_clk = NULL;
1324 1325
	int ret = NOTIFY_DONE;

1326 1327
	if (core->rate == core->new_rate)
		return NULL;
1328

1329 1330 1331 1332
	if (core->notifier_count) {
		ret = __clk_notify(core, event, core->rate, core->new_rate);
		if (ret & NOTIFY_STOP_MASK)
			fail_clk = core;
1333 1334
	}

1335 1336 1337 1338 1339 1340 1341 1342
	hlist_for_each_entry(child, &core->children, child_node) {
		/* Skip children who will be reparented to another clock */
		if (child->new_parent && child->new_parent != core)
			continue;
		tmp_clk = clk_propagate_rate_change(child, event);
		if (tmp_clk)
			fail_clk = tmp_clk;
	}
1343

1344 1345 1346 1347 1348 1349
	/* handle the new child who might not be in core->children yet */
	if (core->new_child) {
		tmp_clk = clk_propagate_rate_change(core->new_child, event);
		if (tmp_clk)
			fail_clk = tmp_clk;
	}
1350

1351
	return fail_clk;
1352 1353
}

1354 1355 1356 1357 1358
/*
 * walk down a subtree and set the new rates notifying the rate
 * change on the way
 */
static void clk_change_rate(struct clk_core *core)
1359
{
1360 1361 1362 1363 1364 1365
	struct clk_core *child;
	struct hlist_node *tmp;
	unsigned long old_rate;
	unsigned long best_parent_rate = 0;
	bool skip_set_rate = false;
	struct clk_core *old_parent;
1366

1367
	old_rate = core->rate;
1368

1369 1370 1371 1372
	if (core->new_parent)
		best_parent_rate = core->new_parent->rate;
	else if (core->parent)
		best_parent_rate = core->parent->rate;
1373

1374 1375 1376
	if (core->new_parent && core->new_parent != core->parent) {
		old_parent = __clk_set_parent_before(core, core->new_parent);
		trace_clk_set_parent(core, core->new_parent);
1377

1378 1379 1380 1381 1382 1383 1384 1385
		if (core->ops->set_rate_and_parent) {
			skip_set_rate = true;
			core->ops->set_rate_and_parent(core->hw, core->new_rate,
					best_parent_rate,
					core->new_parent_index);
		} else if (core->ops->set_parent) {
			core->ops->set_parent(core->hw, core->new_parent_index);
		}
1386

1387 1388 1389
		trace_clk_set_parent_complete(core, core->new_parent);
		__clk_set_parent_after(core, core->new_parent, old_parent);
	}
1390

1391
	trace_clk_set_rate(core, core->new_rate);
1392

1393 1394
	if (!skip_set_rate && core->ops->set_rate)
		core->ops->set_rate(core->hw, core->new_rate, best_parent_rate);
1395

1396
	trace_clk_set_rate_complete(core, core->new_rate);
1397

1398
	core->rate = clk_recalc(core, best_parent_rate);
1399

1400 1401
	if (core->notifier_count && old_rate != core->rate)
		__clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate);
1402 1403

	/*
1404 1405
	 * Use safe iteration, as change_rate can actually swap parents
	 * for certain clock types.
1406
	 */
1407 1408 1409 1410 1411 1412
	hlist_for_each_entry_safe(child, tmp, &core->children, child_node) {
		/* Skip children who will be reparented to another clock */
		if (child->new_parent && child->new_parent != core)
			continue;
		clk_change_rate(child);
	}
1413

1414 1415 1416
	/* handle the new child who might not be in core->children yet */
	if (core->new_child)
		clk_change_rate(core->new_child);
1417 1418
}

1419 1420
static int clk_core_set_rate_nolock(struct clk_core *core,
				    unsigned long req_rate)
1421
{
1422 1423 1424
	struct clk_core *top, *fail_clk;
	unsigned long rate = req_rate;
	int ret = 0;
1425

1426 1427
	if (!core)
		return 0;
1428

1429 1430 1431
	/* bail early if nothing to do */
	if (rate == clk_core_get_rate_nolock(core))
		return 0;
1432

1433 1434
	if ((core->flags & CLK_SET_RATE_GATE) && core->prepare_count)
		return -EBUSY;
1435

1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455
	/* calculate new rates and get the topmost changed clock */
	top = clk_calc_new_rates(core, rate);
	if (!top)
		return -EINVAL;

	/* notify that we are about to change rates */
	fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
	if (fail_clk) {
		pr_debug("%s: failed to set %s rate\n", __func__,
				fail_clk->name);
		clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
		return -EBUSY;
	}

	/* change the rates */
	clk_change_rate(top);

	core->req_rate = req_rate;

	return ret;
1456
}
1457 1458

/**
1459 1460 1461
 * clk_set_rate - specify a new rate for clk
 * @clk: the clk whose rate is being changed
 * @rate: the new rate for clk
1462
 *
1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477
 * In the simplest case clk_set_rate will only adjust the rate of clk.
 *
 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
 * propagate up to clk's parent; whether or not this happens depends on the
 * outcome of clk's .round_rate implementation.  If *parent_rate is unchanged
 * after calling .round_rate then upstream parent propagation is ignored.  If
 * *parent_rate comes back with a new rate for clk's parent then we propagate
 * up to clk's parent and set its rate.  Upward propagation will continue
 * until either a clk does not support the CLK_SET_RATE_PARENT flag or
 * .round_rate stops requesting changes to clk's parent_rate.
 *
 * Rate changes are accomplished via tree traversal that also recalculates the
 * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
 *
 * Returns 0 on success, -EERROR otherwise.
1478
 */
1479
int clk_set_rate(struct clk *clk, unsigned long rate)
1480
{
1481 1482
	int ret;

1483 1484 1485
	if (!clk)
		return 0;

1486 1487
	/* prevent racing with updates to the clock topology */
	clk_prepare_lock();
1488

1489
	ret = clk_core_set_rate_nolock(clk->core, rate);
1490

1491
	clk_prepare_unlock();
1492

1493
	return ret;
1494
}
1495
EXPORT_SYMBOL_GPL(clk_set_rate);
1496

1497 1498 1499 1500 1501 1502 1503 1504 1505
/**
 * clk_set_rate_range - set a rate range for a clock source
 * @clk: clock source
 * @min: desired minimum clock rate in Hz, inclusive
 * @max: desired maximum clock rate in Hz, inclusive
 *
 * Returns success (0) or negative errno.
 */
int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
1506
{
1507
	int ret = 0;
1508

1509 1510
	if (!clk)
		return 0;
1511

1512 1513 1514 1515 1516
	if (min > max) {
		pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n",
		       __func__, clk->core->name, clk->dev_id, clk->con_id,
		       min, max);
		return -EINVAL;
1517
	}
1518

1519
	clk_prepare_lock();
1520

1521 1522 1523 1524
	if (min != clk->min_rate || max != clk->max_rate) {
		clk->min_rate = min;
		clk->max_rate = max;
		ret = clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
1525 1526
	}

1527
	clk_prepare_unlock();
1528

1529
	return ret;
S
Stephen Boyd 已提交
1530
}
1531
EXPORT_SYMBOL_GPL(clk_set_rate_range);
S
Stephen Boyd 已提交
1532

1533 1534 1535 1536 1537 1538 1539 1540
/**
 * clk_set_min_rate - set a minimum clock rate for a clock source
 * @clk: clock source
 * @rate: desired minimum clock rate in Hz, inclusive
 *
 * Returns success (0) or negative errno.
 */
int clk_set_min_rate(struct clk *clk, unsigned long rate)
S
Stephen Boyd 已提交
1541
{
1542 1543 1544 1545
	if (!clk)
		return 0;

	return clk_set_rate_range(clk, rate, clk->max_rate);
S
Stephen Boyd 已提交
1546
}
1547
EXPORT_SYMBOL_GPL(clk_set_min_rate);
S
Stephen Boyd 已提交
1548

1549 1550 1551 1552 1553 1554 1555 1556
/**
 * clk_set_max_rate - set a maximum clock rate for a clock source
 * @clk: clock source
 * @rate: desired maximum clock rate in Hz, inclusive
 *
 * Returns success (0) or negative errno.
 */
int clk_set_max_rate(struct clk *clk, unsigned long rate)
S
Stephen Boyd 已提交
1557
{
1558 1559
	if (!clk)
		return 0;
1560

1561
	return clk_set_rate_range(clk, clk->min_rate, rate);
1562
}
1563
EXPORT_SYMBOL_GPL(clk_set_max_rate);
1564

1565
/**
1566 1567
 * clk_get_parent - return the parent of a clk
 * @clk: the clk whose parent gets returned
1568
 *
1569
 * Simply returns clk->parent.  Returns NULL if clk is NULL.
1570
 */
1571
struct clk *clk_get_parent(struct clk *clk)
1572
{
1573
	struct clk *parent;
1574

1575 1576 1577
	clk_prepare_lock();
	parent = __clk_get_parent(clk);
	clk_prepare_unlock();
1578

1579 1580 1581
	return parent;
}
EXPORT_SYMBOL_GPL(clk_get_parent);
1582

1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595
/*
 * .get_parent is mandatory for clocks with multiple possible parents.  It is
 * optional for single-parent clocks.  Always call .get_parent if it is
 * available and WARN if it is missing for multi-parent clocks.
 *
 * For single-parent clocks without .get_parent, first check to see if the
 * .parents array exists, and if so use it to avoid an expensive tree
 * traversal.  If .parents does not exist then walk the tree.
 */
static struct clk_core *__clk_init_parent(struct clk_core *core)
{
	struct clk_core *ret = NULL;
	u8 index;
1596

1597 1598 1599
	/* handle the trivial cases */

	if (!core->num_parents)
1600 1601
		goto out;

1602 1603 1604 1605 1606
	if (core->num_parents == 1) {
		if (IS_ERR_OR_NULL(core->parent))
			core->parent = clk_core_lookup(core->parent_names[0]);
		ret = core->parent;
		goto out;
1607 1608
	}

1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630
	if (!core->ops->get_parent) {
		WARN(!core->ops->get_parent,
			"%s: multi-parent clocks must implement .get_parent\n",
			__func__);
		goto out;
	};

	/*
	 * Do our best to cache parent clocks in core->parents.  This prevents
	 * unnecessary and expensive lookups.  We don't set core->parent here;
	 * that is done by the calling function.
	 */

	index = core->ops->get_parent(core->hw);

	if (!core->parents)
		core->parents =
			kcalloc(core->num_parents, sizeof(struct clk *),
					GFP_KERNEL);

	ret = clk_core_get_parent_by_index(core, index);

1631 1632 1633 1634
out:
	return ret;
}

1635 1636
static void clk_core_reparent(struct clk_core *core,
				  struct clk_core *new_parent)
1637
{
1638 1639 1640
	clk_reparent(core, new_parent);
	__clk_recalc_accuracies(core);
	__clk_recalc_rates(core, POST_RATE_CHANGE);
1641 1642
}

1643 1644 1645 1646 1647 1648 1649 1650 1651
/**
 * clk_has_parent - check if a clock is a possible parent for another
 * @clk: clock source
 * @parent: parent clock source
 *
 * This function can be used in drivers that need to check that a clock can be
 * the parent of another without actually changing the parent.
 *
 * Returns true if @parent is a possible parent for @clk, false otherwise.
1652
 */
1653
bool clk_has_parent(struct clk *clk, struct clk *parent)
1654
{
1655 1656
	struct clk_core *core, *parent_core;
	unsigned int i;
1657

1658 1659 1660
	/* NULL clocks should be nops, so return success if either is NULL. */
	if (!clk || !parent)
		return true;
1661

1662 1663
	core = clk->core;
	parent_core = parent->core;
1664

1665 1666 1667
	/* Optimize for the case where the parent is already the parent. */
	if (core->parent == parent_core)
		return true;
1668

1669 1670 1671
	for (i = 0; i < core->num_parents; i++)
		if (strcmp(core->parent_names[i], parent_core->name) == 0)
			return true;
1672

1673 1674 1675
	return false;
}
EXPORT_SYMBOL_GPL(clk_has_parent);
1676

1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694
static int clk_core_set_parent(struct clk_core *core, struct clk_core *parent)
{
	int ret = 0;
	int p_index = 0;
	unsigned long p_rate = 0;

	if (!core)
		return 0;

	/* prevent racing with updates to the clock topology */
	clk_prepare_lock();

	if (core->parent == parent)
		goto out;

	/* verify ops for for multi-parent clks */
	if ((core->num_parents > 1) && (!core->ops->set_parent)) {
		ret = -ENOSYS;
1695
		goto out;
1696 1697
	}

1698 1699 1700 1701
	/* check that we are allowed to re-parent if the clock is in use */
	if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) {
		ret = -EBUSY;
		goto out;
1702 1703
	}

1704
	/* try finding the new parent index */
1705
	if (parent) {
1706
		p_index = clk_fetch_parent_index(core, parent);
1707
		p_rate = parent->rate;
1708
		if (p_index < 0) {
1709
			pr_debug("%s: clk %s can not be parent of clk %s\n",
1710 1711 1712
					__func__, parent->name, core->name);
			ret = p_index;
			goto out;
1713
		}
1714 1715
	}

1716 1717
	/* propagate PRE_RATE_CHANGE notifications */
	ret = __clk_speculate_rates(core, p_rate);
1718

1719 1720 1721
	/* abort if a driver objects */
	if (ret & NOTIFY_STOP_MASK)
		goto out;
1722

1723 1724
	/* do the re-parent */
	ret = __clk_set_parent(core, parent, p_index);
1725

1726 1727 1728 1729 1730 1731
	/* propagate rate an accuracy recalculation accordingly */
	if (ret) {
		__clk_recalc_rates(core, ABORT_RATE_CHANGE);
	} else {
		__clk_recalc_rates(core, POST_RATE_CHANGE);
		__clk_recalc_accuracies(core);
1732 1733
	}

1734 1735
out:
	clk_prepare_unlock();
1736

1737 1738
	return ret;
}
1739

1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762
/**
 * clk_set_parent - switch the parent of a mux clk
 * @clk: the mux clk whose input we are switching
 * @parent: the new input to clk
 *
 * Re-parent clk to use parent as its new input source.  If clk is in
 * prepared state, the clk will get enabled for the duration of this call. If
 * that's not acceptable for a specific clk (Eg: the consumer can't handle
 * that, the reparenting is glitchy in hardware, etc), use the
 * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
 *
 * After successfully changing clk's parent clk_set_parent will update the
 * clk topology, sysfs topology and propagate rate recalculation via
 * __clk_recalc_rates.
 *
 * Returns 0 on success, -EERROR otherwise.
 */
int clk_set_parent(struct clk *clk, struct clk *parent)
{
	if (!clk)
		return 0;

	return clk_core_set_parent(clk->core, parent ? parent->core : NULL);
1763
}
1764
EXPORT_SYMBOL_GPL(clk_set_parent);
1765

1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784
/**
 * clk_set_phase - adjust the phase shift of a clock signal
 * @clk: clock signal source
 * @degrees: number of degrees the signal is shifted
 *
 * Shifts the phase of a clock signal by the specified
 * degrees. Returns 0 on success, -EERROR otherwise.
 *
 * This function makes no distinction about the input or reference
 * signal that we adjust the clock signal phase against. For example
 * phase locked-loop clock signal generators we may shift phase with
 * respect to feedback clock signal input, but for other cases the
 * clock phase may be shifted with respect to some other, unspecified
 * signal.
 *
 * Additionally the concept of phase shift does not propagate through
 * the clock tree hierarchy, which sets it apart from clock rates and
 * clock accuracy. A parent clock phase attribute does not have an
 * impact on the phase attribute of a child clock.
1785
 */
1786
int clk_set_phase(struct clk *clk, int degrees)
1787
{
1788
	int ret = -EINVAL;
1789

1790 1791
	if (!clk)
		return 0;
1792

1793 1794 1795 1796
	/* sanity check degrees */
	degrees %= 360;
	if (degrees < 0)
		degrees += 360;
1797

1798
	clk_prepare_lock();
S
Stephen Boyd 已提交
1799

1800
	trace_clk_set_phase(clk->core, degrees);
S
Stephen Boyd 已提交
1801

1802 1803
	if (clk->core->ops->set_phase)
		ret = clk->core->ops->set_phase(clk->core->hw, degrees);
S
Stephen Boyd 已提交
1804

1805
	trace_clk_set_phase_complete(clk->core, degrees);
1806

1807 1808
	if (!ret)
		clk->core->phase = degrees;
1809

1810
	clk_prepare_unlock();
1811

1812 1813 1814
	return ret;
}
EXPORT_SYMBOL_GPL(clk_set_phase);
1815

1816 1817 1818
static int clk_core_get_phase(struct clk_core *core)
{
	int ret;
1819

1820 1821 1822
	clk_prepare_lock();
	ret = core->phase;
	clk_prepare_unlock();
1823

1824
	return ret;
1825 1826
}

1827 1828 1829 1830 1831 1832 1833 1834
/**
 * clk_get_phase - return the phase shift of a clock signal
 * @clk: clock signal source
 *
 * Returns the phase shift of a clock node in degrees, otherwise returns
 * -EERROR.
 */
int clk_get_phase(struct clk *clk)
1835
{
1836
	if (!clk)
1837 1838
		return 0;

1839 1840 1841
	return clk_core_get_phase(clk->core);
}
EXPORT_SYMBOL_GPL(clk_get_phase);
1842

1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858
/**
 * clk_is_match - check if two clk's point to the same hardware clock
 * @p: clk compared against q
 * @q: clk compared against p
 *
 * Returns true if the two struct clk pointers both point to the same hardware
 * clock node. Put differently, returns true if struct clk *p and struct clk *q
 * share the same struct clk_core object.
 *
 * Returns false otherwise. Note that two NULL clks are treated as matching.
 */
bool clk_is_match(const struct clk *p, const struct clk *q)
{
	/* trivial case: identical struct clk's or both NULL */
	if (p == q)
		return true;
1859

1860 1861 1862 1863
	/* true if clk->core pointers match. Avoid derefing garbage */
	if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q))
		if (p->core == q->core)
			return true;
1864

1865 1866 1867
	return false;
}
EXPORT_SYMBOL_GPL(clk_is_match);
1868

1869
/***        debugfs support        ***/
1870

1871 1872
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
1873

1874 1875 1876 1877
static struct dentry *rootdir;
static int inited = 0;
static DEFINE_MUTEX(clk_debug_lock);
static HLIST_HEAD(clk_debug_list);
1878

1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891
static struct hlist_head *all_lists[] = {
	&clk_root_list,
	&clk_orphan_list,
	NULL,
};

static struct hlist_head *orphan_list[] = {
	&clk_orphan_list,
	NULL,
};

static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
				 int level)
1892
{
1893 1894
	if (!c)
		return;
1895

1896 1897 1898 1899 1900 1901
	seq_printf(s, "%*s%-*s %11d %12d %11lu %10lu %-3d\n",
		   level * 3 + 1, "",
		   30 - level * 3, c->name,
		   c->enable_count, c->prepare_count, clk_core_get_rate(c),
		   clk_core_get_accuracy(c), clk_core_get_phase(c));
}
1902

1903 1904 1905 1906
static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
				     int level)
{
	struct clk_core *child;
1907

1908 1909
	if (!c)
		return;
1910

1911
	clk_summary_show_one(s, c, level);
1912

1913 1914
	hlist_for_each_entry(child, &c->children, child_node)
		clk_summary_show_subtree(s, child, level + 1);
1915
}
1916

1917
static int clk_summary_show(struct seq_file *s, void *data)
1918
{
1919 1920
	struct clk_core *c;
	struct hlist_head **lists = (struct hlist_head **)s->private;
1921

1922 1923
	seq_puts(s, "   clock                         enable_cnt  prepare_cnt        rate   accuracy   phase\n");
	seq_puts(s, "----------------------------------------------------------------------------------------\n");
1924

1925 1926
	clk_prepare_lock();

1927 1928 1929
	for (; *lists; lists++)
		hlist_for_each_entry(c, *lists, child_node)
			clk_summary_show_subtree(s, c, 0);
1930

1931
	clk_prepare_unlock();
1932

1933
	return 0;
1934
}
1935 1936


1937
static int clk_summary_open(struct inode *inode, struct file *file)
1938
{
1939
	return single_open(file, clk_summary_show, inode->i_private);
1940
}
1941

1942 1943 1944 1945 1946 1947
static const struct file_operations clk_summary_fops = {
	.open		= clk_summary_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= single_release,
};
1948

1949 1950 1951 1952
static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
{
	if (!c)
		return;
1953

1954 1955 1956 1957 1958 1959
	seq_printf(s, "\"%s\": { ", c->name);
	seq_printf(s, "\"enable_count\": %d,", c->enable_count);
	seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
	seq_printf(s, "\"rate\": %lu", clk_core_get_rate(c));
	seq_printf(s, "\"accuracy\": %lu", clk_core_get_accuracy(c));
	seq_printf(s, "\"phase\": %d", clk_core_get_phase(c));
1960 1961
}

1962
static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level)
1963
{
1964
	struct clk_core *child;
1965

1966 1967
	if (!c)
		return;
1968

1969
	clk_dump_one(s, c, level);
1970

1971 1972 1973
	hlist_for_each_entry(child, &c->children, child_node) {
		seq_printf(s, ",");
		clk_dump_subtree(s, child, level + 1);
1974 1975
	}

1976
	seq_printf(s, "}");
1977 1978
}

1979
static int clk_dump(struct seq_file *s, void *data)
T
Thierry Reding 已提交
1980
{
1981 1982 1983
	struct clk_core *c;
	bool first_node = true;
	struct hlist_head **lists = (struct hlist_head **)s->private;
T
Thierry Reding 已提交
1984

1985
	seq_printf(s, "{");
T
Thierry Reding 已提交
1986

1987
	clk_prepare_lock();
1988

1989 1990 1991 1992 1993 1994 1995 1996
	for (; *lists; lists++) {
		hlist_for_each_entry(c, *lists, child_node) {
			if (!first_node)
				seq_puts(s, ",");
			first_node = false;
			clk_dump_subtree(s, c, 0);
		}
	}
T
Thierry Reding 已提交
1997

1998
	clk_prepare_unlock();
T
Thierry Reding 已提交
1999

2000 2001
	seq_printf(s, "}");
	return 0;
T
Thierry Reding 已提交
2002 2003
}

2004 2005

static int clk_dump_open(struct inode *inode, struct file *file)
2006
{
2007 2008
	return single_open(file, clk_dump, inode->i_private);
}
2009

2010 2011 2012 2013 2014 2015
static const struct file_operations clk_dump_fops = {
	.open		= clk_dump_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= single_release,
};
2016

2017 2018 2019 2020
static int clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
{
	struct dentry *d;
	int ret = -ENOMEM;
2021

2022 2023
	if (!core || !pdentry) {
		ret = -EINVAL;
2024
		goto out;
2025
	}
2026

2027 2028
	d = debugfs_create_dir(core->name, pdentry);
	if (!d)
2029 2030
		goto out;

2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046
	core->dentry = d;

	d = debugfs_create_u32("clk_rate", S_IRUGO, core->dentry,
			(u32 *)&core->rate);
	if (!d)
		goto err_out;

	d = debugfs_create_u32("clk_accuracy", S_IRUGO, core->dentry,
			(u32 *)&core->accuracy);
	if (!d)
		goto err_out;

	d = debugfs_create_u32("clk_phase", S_IRUGO, core->dentry,
			(u32 *)&core->phase);
	if (!d)
		goto err_out;
2047

2048 2049 2050 2051
	d = debugfs_create_x32("clk_flags", S_IRUGO, core->dentry,
			(u32 *)&core->flags);
	if (!d)
		goto err_out;
2052

2053 2054 2055 2056
	d = debugfs_create_u32("clk_prepare_count", S_IRUGO, core->dentry,
			(u32 *)&core->prepare_count);
	if (!d)
		goto err_out;
2057

2058 2059 2060 2061
	d = debugfs_create_u32("clk_enable_count", S_IRUGO, core->dentry,
			(u32 *)&core->enable_count);
	if (!d)
		goto err_out;
2062

2063 2064 2065 2066
	d = debugfs_create_u32("clk_notifier_count", S_IRUGO, core->dentry,
			(u32 *)&core->notifier_count);
	if (!d)
		goto err_out;
2067

2068 2069 2070 2071
	if (core->ops->debug_init) {
		ret = core->ops->debug_init(core->hw, core->dentry);
		if (ret)
			goto err_out;
2072
	}
2073

2074 2075
	ret = 0;
	goto out;
2076

2077 2078 2079 2080
err_out:
	debugfs_remove_recursive(core->dentry);
	core->dentry = NULL;
out:
2081 2082
	return ret;
}
2083 2084

/**
2085 2086
 * clk_debug_register - add a clk node to the debugfs clk directory
 * @core: the clk being added to the debugfs clk directory
2087
 *
2088 2089
 * Dynamically adds a clk to the debugfs clk directory if debugfs has been
 * initialized.  Otherwise it bails out early since the debugfs clk directory
2090
 * will be created lazily by clk_debug_init as part of a late_initcall.
2091
 */
2092
static int clk_debug_register(struct clk_core *core)
2093
{
2094
	int ret = 0;
2095

2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106
	mutex_lock(&clk_debug_lock);
	hlist_add_head(&core->debug_node, &clk_debug_list);

	if (!inited)
		goto unlock;

	ret = clk_debug_create_one(core, rootdir);
unlock:
	mutex_unlock(&clk_debug_lock);

	return ret;
2107
}
2108

2109
 /**
2110 2111
 * clk_debug_unregister - remove a clk node from the debugfs clk directory
 * @core: the clk being removed from the debugfs clk directory
2112
 *
2113 2114
 * Dynamically removes a clk and all its child nodes from the
 * debugfs clk directory if clk->dentry points to debugfs created by
2115
 * clk_debug_register in __clk_init.
2116
 */
2117
static void clk_debug_unregister(struct clk_core *core)
2118
{
2119 2120 2121 2122 2123 2124
	mutex_lock(&clk_debug_lock);
	hlist_del_init(&core->debug_node);
	debugfs_remove_recursive(core->dentry);
	core->dentry = NULL;
	mutex_unlock(&clk_debug_lock);
}
2125

2126 2127 2128 2129
struct dentry *clk_debugfs_add_file(struct clk_hw *hw, char *name, umode_t mode,
				void *data, const struct file_operations *fops)
{
	struct dentry *d = NULL;
2130

2131 2132 2133
	if (hw->core->dentry)
		d = debugfs_create_file(name, mode, hw->core->dentry, data,
					fops);
2134

2135 2136 2137
	return d;
}
EXPORT_SYMBOL_GPL(clk_debugfs_add_file);
2138

2139
/**
2140
 * clk_debug_init - lazily populate the debugfs clk directory
2141
 *
2142 2143 2144 2145 2146
 * clks are often initialized very early during boot before memory can be
 * dynamically allocated and well before debugfs is setup. This function
 * populates the debugfs clk directory once at boot-time when we know that
 * debugfs is setup. It should only be called once at boot-time, all other clks
 * added dynamically will be done so with clk_debug_register.
2147 2148 2149 2150 2151
 */
static int __init clk_debug_init(void)
{
	struct clk_core *core;
	struct dentry *d;
2152

2153
	rootdir = debugfs_create_dir("clk", NULL);
2154

2155 2156
	if (!rootdir)
		return -ENOMEM;
2157

2158 2159 2160 2161
	d = debugfs_create_file("clk_summary", S_IRUGO, rootdir, &all_lists,
				&clk_summary_fops);
	if (!d)
		return -ENOMEM;
2162

2163 2164 2165 2166
	d = debugfs_create_file("clk_dump", S_IRUGO, rootdir, &all_lists,
				&clk_dump_fops);
	if (!d)
		return -ENOMEM;
2167

2168 2169 2170 2171
	d = debugfs_create_file("clk_orphan_summary", S_IRUGO, rootdir,
				&orphan_list, &clk_summary_fops);
	if (!d)
		return -ENOMEM;
2172

2173 2174 2175 2176
	d = debugfs_create_file("clk_orphan_dump", S_IRUGO, rootdir,
				&orphan_list, &clk_dump_fops);
	if (!d)
		return -ENOMEM;
2177

2178 2179 2180
	mutex_lock(&clk_debug_lock);
	hlist_for_each_entry(core, &clk_debug_list, debug_node)
		clk_debug_create_one(core, rootdir);
2181

2182 2183
	inited = 1;
	mutex_unlock(&clk_debug_lock);
2184

2185 2186 2187 2188 2189 2190 2191
	return 0;
}
late_initcall(clk_debug_init);
#else
static inline int clk_debug_register(struct clk_core *core) { return 0; }
static inline void clk_debug_reparent(struct clk_core *core,
				      struct clk_core *new_parent)
2192 2193
{
}
2194
static inline void clk_debug_unregister(struct clk_core *core)
M
Michael Turquette 已提交
2195 2196
{
}
2197
#endif
M
Michael Turquette 已提交
2198

2199 2200 2201 2202 2203
/**
 * __clk_init - initialize the data structures in a struct clk
 * @dev:	device initializing this clk, placeholder for now
 * @clk:	clk being initialized
 *
2204
 * Initializes the lists in struct clk_core, queries the hardware for the
2205 2206
 * parent and rate and sets them both.
 */
M
Michael Turquette 已提交
2207
static int __clk_init(struct device *dev, struct clk *clk_user)
2208
{
2209
	int i, ret = 0;
2210
	struct clk_core *orphan;
2211
	struct hlist_node *tmp2;
2212
	struct clk_core *core;
2213
	unsigned long rate;
2214

2215
	if (!clk_user)
2216
		return -EINVAL;
2217

2218
	core = clk_user->core;
2219

2220
	clk_prepare_lock();
2221 2222

	/* check to see if a clock with this name is already registered */
2223
	if (clk_core_lookup(core->name)) {
2224
		pr_debug("%s: clk %s already initialized\n",
2225
				__func__, core->name);
2226
		ret = -EEXIST;
2227
		goto out;
2228
	}
2229

2230
	/* check that clk_ops are sane.  See Documentation/clk.txt */
2231 2232 2233
	if (core->ops->set_rate &&
	    !((core->ops->round_rate || core->ops->determine_rate) &&
	      core->ops->recalc_rate)) {
2234
		pr_warning("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n",
2235
				__func__, core->name);
2236
		ret = -EINVAL;
2237 2238 2239
		goto out;
	}

2240
	if (core->ops->set_parent && !core->ops->get_parent) {
2241
		pr_warning("%s: %s must implement .get_parent & .set_parent\n",
2242
				__func__, core->name);
2243
		ret = -EINVAL;
2244 2245 2246
		goto out;
	}

2247 2248
	if (core->ops->set_rate_and_parent &&
			!(core->ops->set_parent && core->ops->set_rate)) {
S
Stephen Boyd 已提交
2249
		pr_warn("%s: %s must implement .set_parent & .set_rate\n",
2250
				__func__, core->name);
S
Stephen Boyd 已提交
2251 2252 2253 2254
		ret = -EINVAL;
		goto out;
	}

2255
	/* throw a WARN if any entries in parent_names are NULL */
2256 2257
	for (i = 0; i < core->num_parents; i++)
		WARN(!core->parent_names[i],
2258
				"%s: invalid NULL in %s's .parent_names\n",
2259
				__func__, core->name);
2260 2261 2262 2263

	/*
	 * Allocate an array of struct clk *'s to avoid unnecessary string
	 * look-ups of clk's possible parents.  This can fail for clocks passed
2264
	 * in to clk_init during early boot; thus any access to core->parents[]
2265 2266 2267
	 * must always check for a NULL pointer and try to populate it if
	 * necessary.
	 *
2268 2269
	 * If core->parents is not NULL we skip this entire block.  This allows
	 * for clock drivers to statically initialize core->parents.
2270
	 */
2271 2272
	if (core->num_parents > 1 && !core->parents) {
		core->parents = kcalloc(core->num_parents, sizeof(struct clk *),
2273
					GFP_KERNEL);
2274
		/*
2275
		 * clk_core_lookup returns NULL for parents that have not been
2276 2277 2278 2279
		 * clk_init'd; thus any access to clk->parents[] must check
		 * for a NULL pointer.  We can always perform lazy lookups for
		 * missing parents later on.
		 */
2280 2281 2282 2283
		if (core->parents)
			for (i = 0; i < core->num_parents; i++)
				core->parents[i] =
					clk_core_lookup(core->parent_names[i]);
2284 2285
	}

2286
	core->parent = __clk_init_parent(core);
2287 2288

	/*
2289
	 * Populate core->parent if parent has already been __clk_init'd.  If
2290 2291 2292 2293 2294 2295 2296 2297
	 * parent has not yet been __clk_init'd then place clk in the orphan
	 * list.  If clk has set the CLK_IS_ROOT flag then place it in the root
	 * clk list.
	 *
	 * Every time a new clk is clk_init'd then we walk the list of orphan
	 * clocks and re-parent any that are children of the clock currently
	 * being clk_init'd.
	 */
2298 2299 2300 2301 2302
	if (core->parent)
		hlist_add_head(&core->child_node,
				&core->parent->children);
	else if (core->flags & CLK_IS_ROOT)
		hlist_add_head(&core->child_node, &clk_root_list);
2303
	else
2304
		hlist_add_head(&core->child_node, &clk_orphan_list);
2305

2306 2307 2308 2309 2310 2311 2312
	/*
	 * Set clk's accuracy.  The preferred method is to use
	 * .recalc_accuracy. For simple clocks and lazy developers the default
	 * fallback is to use the parent's accuracy.  If a clock doesn't have a
	 * parent (or is orphaned) then accuracy is set to zero (perfect
	 * clock).
	 */
2313 2314 2315 2316 2317
	if (core->ops->recalc_accuracy)
		core->accuracy = core->ops->recalc_accuracy(core->hw,
					__clk_get_accuracy(core->parent));
	else if (core->parent)
		core->accuracy = core->parent->accuracy;
2318
	else
2319
		core->accuracy = 0;
2320

2321 2322 2323 2324 2325
	/*
	 * Set clk's phase.
	 * Since a phase is by definition relative to its parent, just
	 * query the current clock phase, or just assume it's in phase.
	 */
2326 2327
	if (core->ops->get_phase)
		core->phase = core->ops->get_phase(core->hw);
2328
	else
2329
		core->phase = 0;
2330

2331 2332 2333 2334 2335 2336
	/*
	 * Set clk's rate.  The preferred method is to use .recalc_rate.  For
	 * simple clocks and lazy developers the default fallback is to use the
	 * parent's rate.  If a clock doesn't have a parent (or is orphaned)
	 * then rate is set to zero.
	 */
2337 2338 2339 2340 2341
	if (core->ops->recalc_rate)
		rate = core->ops->recalc_rate(core->hw,
				clk_core_get_rate_nolock(core->parent));
	else if (core->parent)
		rate = core->parent->rate;
2342
	else
2343
		rate = 0;
2344
	core->rate = core->req_rate = rate;
2345 2346 2347 2348 2349

	/*
	 * walk the list of orphan clocks and reparent any that are children of
	 * this clock
	 */
2350
	hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
2351
		if (orphan->num_parents && orphan->ops->get_parent) {
2352
			i = orphan->ops->get_parent(orphan->hw);
2353 2354
			if (!strcmp(core->name, orphan->parent_names[i]))
				clk_core_reparent(orphan, core);
2355 2356 2357
			continue;
		}

2358
		for (i = 0; i < orphan->num_parents; i++)
2359 2360
			if (!strcmp(core->name, orphan->parent_names[i])) {
				clk_core_reparent(orphan, core);
2361 2362
				break;
			}
2363
	 }
2364 2365 2366 2367 2368 2369 2370

	/*
	 * optional platform-specific magic
	 *
	 * The .init callback is not used by any of the basic clock types, but
	 * exists for weird hardware that must perform initialization magic.
	 * Please consider other ways of solving initialization problems before
P
Peter Meerwald 已提交
2371
	 * using this callback, as its use is discouraged.
2372
	 */
2373 2374
	if (core->ops->init)
		core->ops->init(core->hw);
2375

2376
	kref_init(&core->ref);
2377
out:
2378
	clk_prepare_unlock();
2379

2380
	if (!ret)
2381
		clk_debug_register(core);
2382

2383
	return ret;
2384 2385
}

2386 2387
struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id,
			     const char *con_id)
2388 2389 2390
{
	struct clk *clk;

2391 2392 2393
	/* This is to allow this function to be chained to others */
	if (!hw || IS_ERR(hw))
		return (struct clk *) hw;
2394

2395 2396 2397 2398 2399 2400 2401
	clk = kzalloc(sizeof(*clk), GFP_KERNEL);
	if (!clk)
		return ERR_PTR(-ENOMEM);

	clk->core = hw->core;
	clk->dev_id = dev_id;
	clk->con_id = con_id;
2402 2403 2404
	clk->max_rate = ULONG_MAX;

	clk_prepare_lock();
2405
	hlist_add_head(&clk->clks_node, &hw->core->clks);
2406
	clk_prepare_unlock();
2407 2408 2409

	return clk;
}
2410

2411
void __clk_free_clk(struct clk *clk)
2412 2413
{
	clk_prepare_lock();
2414
	hlist_del(&clk->clks_node);
2415 2416 2417 2418
	clk_prepare_unlock();

	kfree(clk);
}
2419

2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431
/**
 * clk_register - allocate a new clock, register it and return an opaque cookie
 * @dev: device that is registering this clock
 * @hw: link to hardware-specific clock data
 *
 * clk_register is the primary interface for populating the clock tree with new
 * clock nodes.  It returns a pointer to the newly allocated struct clk which
 * cannot be dereferenced by driver code but may be used in conjuction with the
 * rest of the clock API.  In the event of an error clk_register will return an
 * error code; drivers must test for an error code after calling clk_register.
 */
struct clk *clk_register(struct device *dev, struct clk_hw *hw)
2432
{
2433
	int i, ret;
2434
	struct clk_core *core;
2435

2436 2437
	core = kzalloc(sizeof(*core), GFP_KERNEL);
	if (!core) {
2438 2439 2440
		ret = -ENOMEM;
		goto fail_out;
	}
2441

2442 2443
	core->name = kstrdup_const(hw->init->name, GFP_KERNEL);
	if (!core->name) {
2444 2445 2446
		ret = -ENOMEM;
		goto fail_name;
	}
2447
	core->ops = hw->init->ops;
2448
	if (dev && dev->driver)
2449 2450 2451 2452 2453
		core->owner = dev->driver->owner;
	core->hw = hw;
	core->flags = hw->init->flags;
	core->num_parents = hw->init->num_parents;
	hw->core = core;
2454

2455
	/* allocate local copy in case parent_names is __initdata */
2456
	core->parent_names = kcalloc(core->num_parents, sizeof(char *),
2457
					GFP_KERNEL);
2458

2459
	if (!core->parent_names) {
2460 2461 2462 2463 2464 2465
		ret = -ENOMEM;
		goto fail_parent_names;
	}


	/* copy each string name in case parent_names is __initdata */
2466 2467
	for (i = 0; i < core->num_parents; i++) {
		core->parent_names[i] = kstrdup_const(hw->init->parent_names[i],
2468
						GFP_KERNEL);
2469
		if (!core->parent_names[i]) {
2470 2471 2472 2473 2474
			ret = -ENOMEM;
			goto fail_parent_names_copy;
		}
	}

2475
	INIT_HLIST_HEAD(&core->clks);
2476

2477 2478 2479 2480 2481 2482 2483
	hw->clk = __clk_create_clk(hw, NULL, NULL);
	if (IS_ERR(hw->clk)) {
		ret = PTR_ERR(hw->clk);
		goto fail_parent_names_copy;
	}

	ret = __clk_init(dev, hw->clk);
2484
	if (!ret)
2485
		return hw->clk;
2486

2487
	__clk_free_clk(hw->clk);
2488
	hw->clk = NULL;
2489

2490 2491
fail_parent_names_copy:
	while (--i >= 0)
2492 2493
		kfree_const(core->parent_names[i]);
	kfree(core->parent_names);
2494
fail_parent_names:
2495
	kfree_const(core->name);
2496
fail_name:
2497
	kfree(core);
2498 2499
fail_out:
	return ERR_PTR(ret);
2500 2501 2502
}
EXPORT_SYMBOL_GPL(clk_register);

2503
/* Free memory allocated for a clock. */
S
Sylwester Nawrocki 已提交
2504 2505
static void __clk_release(struct kref *ref)
{
2506 2507
	struct clk_core *core = container_of(ref, struct clk_core, ref);
	int i = core->num_parents;
S
Sylwester Nawrocki 已提交
2508

2509 2510
	lockdep_assert_held(&prepare_lock);

2511
	kfree(core->parents);
S
Sylwester Nawrocki 已提交
2512
	while (--i >= 0)
2513
		kfree_const(core->parent_names[i]);
S
Sylwester Nawrocki 已提交
2514

2515 2516 2517
	kfree(core->parent_names);
	kfree_const(core->name);
	kfree(core);
S
Sylwester Nawrocki 已提交
2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554
}

/*
 * Empty clk_ops for unregistered clocks. These are used temporarily
 * after clk_unregister() was called on a clock and until last clock
 * consumer calls clk_put() and the struct clk object is freed.
 */
static int clk_nodrv_prepare_enable(struct clk_hw *hw)
{
	return -ENXIO;
}

static void clk_nodrv_disable_unprepare(struct clk_hw *hw)
{
	WARN_ON_ONCE(1);
}

static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate,
					unsigned long parent_rate)
{
	return -ENXIO;
}

static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index)
{
	return -ENXIO;
}

static const struct clk_ops clk_nodrv_ops = {
	.enable		= clk_nodrv_prepare_enable,
	.disable	= clk_nodrv_disable_unprepare,
	.prepare	= clk_nodrv_prepare_enable,
	.unprepare	= clk_nodrv_disable_unprepare,
	.set_rate	= clk_nodrv_set_rate,
	.set_parent	= clk_nodrv_set_parent,
};

M
Mark Brown 已提交
2555 2556 2557 2558
/**
 * clk_unregister - unregister a currently registered clock
 * @clk: clock to unregister
 */
S
Sylwester Nawrocki 已提交
2559 2560 2561 2562
void clk_unregister(struct clk *clk)
{
	unsigned long flags;

2563 2564 2565
	if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
		return;

2566
	clk_debug_unregister(clk->core);
S
Sylwester Nawrocki 已提交
2567 2568 2569

	clk_prepare_lock();

2570 2571 2572
	if (clk->core->ops == &clk_nodrv_ops) {
		pr_err("%s: unregistered clock: %s\n", __func__,
		       clk->core->name);
2573
		return;
S
Sylwester Nawrocki 已提交
2574 2575 2576 2577 2578 2579
	}
	/*
	 * Assign empty clock ops for consumers that might still hold
	 * a reference to this clock.
	 */
	flags = clk_enable_lock();
2580
	clk->core->ops = &clk_nodrv_ops;
S
Sylwester Nawrocki 已提交
2581 2582
	clk_enable_unlock(flags);

2583 2584
	if (!hlist_empty(&clk->core->children)) {
		struct clk_core *child;
2585
		struct hlist_node *t;
S
Sylwester Nawrocki 已提交
2586 2587

		/* Reparent all children to the orphan list. */
2588 2589 2590
		hlist_for_each_entry_safe(child, t, &clk->core->children,
					  child_node)
			clk_core_set_parent(child, NULL);
S
Sylwester Nawrocki 已提交
2591 2592
	}

2593
	hlist_del_init(&clk->core->child_node);
S
Sylwester Nawrocki 已提交
2594

2595
	if (clk->core->prepare_count)
S
Sylwester Nawrocki 已提交
2596
		pr_warn("%s: unregistering prepared clock: %s\n",
2597 2598
					__func__, clk->core->name);
	kref_put(&clk->core->ref, __clk_release);
2599

S
Sylwester Nawrocki 已提交
2600 2601
	clk_prepare_unlock();
}
M
Mark Brown 已提交
2602 2603
EXPORT_SYMBOL_GPL(clk_unregister);

2604 2605
static void devm_clk_release(struct device *dev, void *res)
{
2606
	clk_unregister(*(struct clk **)res);
2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620
}

/**
 * devm_clk_register - resource managed clk_register()
 * @dev: device that is registering this clock
 * @hw: link to hardware-specific clock data
 *
 * Managed clk_register(). Clocks returned from this function are
 * automatically clk_unregister()ed on driver detach. See clk_register() for
 * more information.
 */
struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
{
	struct clk *clk;
2621
	struct clk **clkp;
2622

2623 2624
	clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL);
	if (!clkp)
2625 2626
		return ERR_PTR(-ENOMEM);

2627 2628 2629 2630
	clk = clk_register(dev, hw);
	if (!IS_ERR(clk)) {
		*clkp = clk;
		devres_add(dev, clkp);
2631
	} else {
2632
		devres_free(clkp);
2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660
	}

	return clk;
}
EXPORT_SYMBOL_GPL(devm_clk_register);

static int devm_clk_match(struct device *dev, void *res, void *data)
{
	struct clk *c = res;
	if (WARN_ON(!c))
		return 0;
	return c == data;
}

/**
 * devm_clk_unregister - resource managed clk_unregister()
 * @clk: clock to unregister
 *
 * Deallocate a clock allocated with devm_clk_register(). Normally
 * this function will not need to be called and the resource management
 * code will ensure that the resource is freed.
 */
void devm_clk_unregister(struct device *dev, struct clk *clk)
{
	WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk));
}
EXPORT_SYMBOL_GPL(devm_clk_unregister);

2661 2662 2663 2664 2665
/*
 * clkdev helpers
 */
int __clk_get(struct clk *clk)
{
2666 2667 2668 2669
	struct clk_core *core = !clk ? NULL : clk->core;

	if (core) {
		if (!try_module_get(core->owner))
2670
			return 0;
2671

2672
		kref_get(&core->ref);
2673
	}
2674 2675 2676 2677 2678
	return 1;
}

void __clk_put(struct clk *clk)
{
2679 2680
	struct module *owner;

2681
	if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
2682 2683
		return;

S
Sylwester Nawrocki 已提交
2684
	clk_prepare_lock();
2685

2686
	hlist_del(&clk->clks_node);
2687 2688 2689 2690
	if (clk->min_rate > clk->core->req_rate ||
	    clk->max_rate < clk->core->req_rate)
		clk_core_set_rate_nolock(clk->core, clk->core->req_rate);

2691 2692 2693
	owner = clk->core->owner;
	kref_put(&clk->core->ref, __clk_release);

S
Sylwester Nawrocki 已提交
2694 2695
	clk_prepare_unlock();

2696
	module_put(owner);
2697 2698

	kfree(clk);
2699 2700
}

2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713
/***        clk rate change notifiers        ***/

/**
 * clk_notifier_register - add a clk rate change notifier
 * @clk: struct clk * to watch
 * @nb: struct notifier_block * with callback info
 *
 * Request notification when clk's rate changes.  This uses an SRCU
 * notifier because we want it to block and notifier unregistrations are
 * uncommon.  The callbacks associated with the notifier must not
 * re-enter into the clk framework by calling any top-level clk APIs;
 * this will cause a nested prepare_lock mutex.
 *
2714 2715 2716
 * In all notification cases cases (pre, post and abort rate change) the
 * original clock rate is passed to the callback via struct
 * clk_notifier_data.old_rate and the new frequency is passed via struct
2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731
 * clk_notifier_data.new_rate.
 *
 * clk_notifier_register() must be called from non-atomic context.
 * Returns -EINVAL if called with null arguments, -ENOMEM upon
 * allocation failure; otherwise, passes along the return value of
 * srcu_notifier_chain_register().
 */
int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
{
	struct clk_notifier *cn;
	int ret = -ENOMEM;

	if (!clk || !nb)
		return -EINVAL;

2732
	clk_prepare_lock();
2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752

	/* search the list of notifiers for this clk */
	list_for_each_entry(cn, &clk_notifier_list, node)
		if (cn->clk == clk)
			break;

	/* if clk wasn't in the notifier list, allocate new clk_notifier */
	if (cn->clk != clk) {
		cn = kzalloc(sizeof(struct clk_notifier), GFP_KERNEL);
		if (!cn)
			goto out;

		cn->clk = clk;
		srcu_init_notifier_head(&cn->notifier_head);

		list_add(&cn->node, &clk_notifier_list);
	}

	ret = srcu_notifier_chain_register(&cn->notifier_head, nb);

2753
	clk->core->notifier_count++;
2754 2755

out:
2756
	clk_prepare_unlock();
2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780

	return ret;
}
EXPORT_SYMBOL_GPL(clk_notifier_register);

/**
 * clk_notifier_unregister - remove a clk rate change notifier
 * @clk: struct clk *
 * @nb: struct notifier_block * with callback info
 *
 * Request no further notification for changes to 'clk' and frees memory
 * allocated in clk_notifier_register.
 *
 * Returns -EINVAL if called with null arguments; otherwise, passes
 * along the return value of srcu_notifier_chain_unregister().
 */
int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
{
	struct clk_notifier *cn = NULL;
	int ret = -EINVAL;

	if (!clk || !nb)
		return -EINVAL;

2781
	clk_prepare_lock();
2782 2783 2784 2785 2786 2787 2788 2789

	list_for_each_entry(cn, &clk_notifier_list, node)
		if (cn->clk == clk)
			break;

	if (cn->clk == clk) {
		ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);

2790
		clk->core->notifier_count--;
2791 2792 2793 2794

		/* XXX the notifier code should handle this better */
		if (!cn->notifier_head.head) {
			srcu_cleanup_notifier_head(&cn->notifier_head);
2795
			list_del(&cn->node);
2796 2797 2798 2799 2800 2801 2802
			kfree(cn);
		}

	} else {
		ret = -ENOENT;
	}

2803
	clk_prepare_unlock();
2804 2805 2806 2807

	return ret;
}
EXPORT_SYMBOL_GPL(clk_notifier_unregister);
G
Grant Likely 已提交
2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825

#ifdef CONFIG_OF
/**
 * struct of_clk_provider - Clock provider registration structure
 * @link: Entry in global list of clock providers
 * @node: Pointer to device tree node of clock provider
 * @get: Get clock callback.  Returns NULL or a struct clk for the
 *       given clock specifier
 * @data: context pointer to be passed into @get callback
 */
struct of_clk_provider {
	struct list_head link;

	struct device_node *node;
	struct clk *(*get)(struct of_phandle_args *clkspec, void *data);
	void *data;
};

2826 2827 2828
static const struct of_device_id __clk_of_table_sentinel
	__used __section(__clk_of_table_end);

G
Grant Likely 已提交
2829
static LIST_HEAD(of_clk_providers);
2830 2831
static DEFINE_MUTEX(of_clk_mutex);

G
Grant Likely 已提交
2832 2833 2834 2835 2836 2837 2838
struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
				     void *data)
{
	return data;
}
EXPORT_SYMBOL_GPL(of_clk_src_simple_get);

2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852
struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
{
	struct clk_onecell_data *clk_data = data;
	unsigned int idx = clkspec->args[0];

	if (idx >= clk_data->clk_num) {
		pr_err("%s: invalid clock index %d\n", __func__, idx);
		return ERR_PTR(-EINVAL);
	}

	return clk_data->clks[idx];
}
EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);

G
Grant Likely 已提交
2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864
/**
 * of_clk_add_provider() - Register a clock provider for a node
 * @np: Device node pointer associated with clock provider
 * @clk_src_get: callback for decoding clock
 * @data: context pointer for @clk_src_get callback.
 */
int of_clk_add_provider(struct device_node *np,
			struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
						   void *data),
			void *data)
{
	struct of_clk_provider *cp;
2865
	int ret;
G
Grant Likely 已提交
2866 2867 2868 2869 2870 2871 2872 2873 2874

	cp = kzalloc(sizeof(struct of_clk_provider), GFP_KERNEL);
	if (!cp)
		return -ENOMEM;

	cp->node = of_node_get(np);
	cp->data = data;
	cp->get = clk_src_get;

2875
	mutex_lock(&of_clk_mutex);
G
Grant Likely 已提交
2876
	list_add(&cp->link, &of_clk_providers);
2877
	mutex_unlock(&of_clk_mutex);
G
Grant Likely 已提交
2878 2879
	pr_debug("Added clock from %s\n", np->full_name);

2880 2881 2882 2883 2884
	ret = of_clk_set_defaults(np, true);
	if (ret < 0)
		of_clk_del_provider(np);

	return ret;
G
Grant Likely 已提交
2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895
}
EXPORT_SYMBOL_GPL(of_clk_add_provider);

/**
 * of_clk_del_provider() - Remove a previously registered clock provider
 * @np: Device node pointer associated with clock provider
 */
void of_clk_del_provider(struct device_node *np)
{
	struct of_clk_provider *cp;

2896
	mutex_lock(&of_clk_mutex);
G
Grant Likely 已提交
2897 2898 2899 2900 2901 2902 2903 2904
	list_for_each_entry(cp, &of_clk_providers, link) {
		if (cp->node == np) {
			list_del(&cp->link);
			of_node_put(cp->node);
			kfree(cp);
			break;
		}
	}
2905
	mutex_unlock(&of_clk_mutex);
G
Grant Likely 已提交
2906 2907 2908
}
EXPORT_SYMBOL_GPL(of_clk_del_provider);

2909 2910
struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
				       const char *dev_id, const char *con_id)
G
Grant Likely 已提交
2911 2912
{
	struct of_clk_provider *provider;
2913
	struct clk *clk = ERR_PTR(-EPROBE_DEFER);
G
Grant Likely 已提交
2914

2915 2916 2917
	if (!clkspec)
		return ERR_PTR(-EINVAL);

G
Grant Likely 已提交
2918
	/* Check if we have such a provider in our array */
2919
	mutex_lock(&of_clk_mutex);
G
Grant Likely 已提交
2920 2921 2922
	list_for_each_entry(provider, &of_clk_providers, link) {
		if (provider->node == clkspec->np)
			clk = provider->get(clkspec, provider->data);
2923 2924 2925 2926 2927 2928 2929 2930 2931
		if (!IS_ERR(clk)) {
			clk = __clk_create_clk(__clk_get_hw(clk), dev_id,
					       con_id);

			if (!IS_ERR(clk) && !__clk_get(clk)) {
				__clk_free_clk(clk);
				clk = ERR_PTR(-ENOENT);
			}

G
Grant Likely 已提交
2932
			break;
2933
		}
G
Grant Likely 已提交
2934
	}
2935
	mutex_unlock(&of_clk_mutex);
2936 2937 2938 2939

	return clk;
}

2940 2941 2942 2943 2944 2945 2946 2947
/**
 * of_clk_get_from_provider() - Lookup a clock from a clock provider
 * @clkspec: pointer to a clock specifier data structure
 *
 * This function looks up a struct clk from the registered list of clock
 * providers, an input is a clock specifier data structure as returned
 * from the of_parse_phandle_with_args() function call.
 */
2948 2949
struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
{
2950
	return __of_clk_get_from_provider(clkspec, NULL, __func__);
G
Grant Likely 已提交
2951 2952
}

2953 2954 2955 2956 2957 2958
int of_clk_get_parent_count(struct device_node *np)
{
	return of_count_phandle_with_args(np, "clocks", "#clock-cells");
}
EXPORT_SYMBOL_GPL(of_clk_get_parent_count);

G
Grant Likely 已提交
2959 2960 2961
const char *of_clk_get_parent_name(struct device_node *np, int index)
{
	struct of_phandle_args clkspec;
B
Ben Dooks 已提交
2962
	struct property *prop;
G
Grant Likely 已提交
2963
	const char *clk_name;
B
Ben Dooks 已提交
2964 2965
	const __be32 *vp;
	u32 pv;
G
Grant Likely 已提交
2966
	int rc;
B
Ben Dooks 已提交
2967
	int count;
G
Grant Likely 已提交
2968 2969 2970 2971 2972 2973 2974 2975 2976

	if (index < 0)
		return NULL;

	rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
					&clkspec);
	if (rc)
		return NULL;

B
Ben Dooks 已提交
2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990
	index = clkspec.args_count ? clkspec.args[0] : 0;
	count = 0;

	/* if there is an indices property, use it to transfer the index
	 * specified into an array offset for the clock-output-names property.
	 */
	of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) {
		if (index == pv) {
			index = count;
			break;
		}
		count++;
	}

G
Grant Likely 已提交
2991
	if (of_property_read_string_index(clkspec.np, "clock-output-names",
B
Ben Dooks 已提交
2992
					  index,
G
Grant Likely 已提交
2993 2994 2995 2996 2997 2998 2999 3000
					  &clk_name) < 0)
		clk_name = clkspec.np->name;

	of_node_put(clkspec.np);
	return clk_name;
}
EXPORT_SYMBOL_GPL(of_clk_get_parent_name);

3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043
struct clock_provider {
	of_clk_init_cb_t clk_init_cb;
	struct device_node *np;
	struct list_head node;
};

static LIST_HEAD(clk_provider_list);

/*
 * This function looks for a parent clock. If there is one, then it
 * checks that the provider for this parent clock was initialized, in
 * this case the parent clock will be ready.
 */
static int parent_ready(struct device_node *np)
{
	int i = 0;

	while (true) {
		struct clk *clk = of_clk_get(np, i);

		/* this parent is ready we can check the next one */
		if (!IS_ERR(clk)) {
			clk_put(clk);
			i++;
			continue;
		}

		/* at least one parent is not ready, we exit now */
		if (PTR_ERR(clk) == -EPROBE_DEFER)
			return 0;

		/*
		 * Here we make assumption that the device tree is
		 * written correctly. So an error means that there is
		 * no more parent. As we didn't exit yet, then the
		 * previous parent are ready. If there is no clock
		 * parent, no need to wait for them, then we can
		 * consider their absence as being ready
		 */
		return 1;
	}
}

G
Grant Likely 已提交
3044 3045 3046 3047
/**
 * of_clk_init() - Scan and init clock providers from the DT
 * @matches: array of compatible values and init functions for providers.
 *
3048
 * This function scans the device tree for matching clock providers
3049
 * and calls their initialization functions. It also does it by trying
3050
 * to follow the dependencies.
G
Grant Likely 已提交
3051 3052 3053
 */
void __init of_clk_init(const struct of_device_id *matches)
{
3054
	const struct of_device_id *match;
G
Grant Likely 已提交
3055
	struct device_node *np;
3056 3057 3058
	struct clock_provider *clk_provider, *next;
	bool is_init_done;
	bool force = false;
G
Grant Likely 已提交
3059

3060
	if (!matches)
3061
		matches = &__clk_of_table;
3062

3063
	/* First prepare the list of the clocks providers */
3064
	for_each_matching_node_and_match(np, matches, &match) {
3065 3066 3067 3068 3069
		struct clock_provider *parent =
			kzalloc(sizeof(struct clock_provider),	GFP_KERNEL);

		parent->clk_init_cb = match->data;
		parent->np = np;
3070
		list_add_tail(&parent->node, &clk_provider_list);
3071 3072 3073 3074 3075 3076 3077
	}

	while (!list_empty(&clk_provider_list)) {
		is_init_done = false;
		list_for_each_entry_safe(clk_provider, next,
					&clk_provider_list, node) {
			if (force || parent_ready(clk_provider->np)) {
3078

3079
				clk_provider->clk_init_cb(clk_provider->np);
3080 3081
				of_clk_set_defaults(clk_provider->np, true);

3082 3083 3084 3085 3086 3087 3088
				list_del(&clk_provider->node);
				kfree(clk_provider);
				is_init_done = true;
			}
		}

		/*
3089
		 * We didn't manage to initialize any of the
3090 3091 3092 3093 3094 3095
		 * remaining providers during the last loop, so now we
		 * initialize all the remaining ones unconditionally
		 * in case the clock parent was not mandatory
		 */
		if (!is_init_done)
			force = true;
G
Grant Likely 已提交
3096 3097 3098
	}
}
#endif