clk.c 76.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
/*
 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * Standard functionality for the common clock API.  See Documentation/clk.txt
 */

M
Michael Turquette 已提交
12
#include <linux/clk-provider.h>
13
#include <linux/clk/clk-conf.h>
14 15 16 17 18 19
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/err.h>
#include <linux/list.h>
#include <linux/slab.h>
G
Grant Likely 已提交
20
#include <linux/of.h>
21
#include <linux/device.h>
22
#include <linux/init.h>
23
#include <linux/sched.h>
24

25 26
#include "clk.h"

27 28 29
static DEFINE_SPINLOCK(enable_lock);
static DEFINE_MUTEX(prepare_lock);

30 31 32 33 34 35
static struct task_struct *prepare_owner;
static struct task_struct *enable_owner;

static int prepare_refcnt;
static int enable_refcnt;

36 37 38 39
static HLIST_HEAD(clk_root_list);
static HLIST_HEAD(clk_orphan_list);
static LIST_HEAD(clk_notifier_list);

40 41 42 43 44
static long clk_core_get_accuracy(struct clk_core *core);
static unsigned long clk_core_get_rate(struct clk_core *core);
static int clk_core_get_phase(struct clk_core *core);
static bool clk_core_is_prepared(struct clk_core *core);
static bool clk_core_is_enabled(struct clk_core *core);
45 46
static struct clk_core *clk_core_lookup(const char *name);

M
Michael Turquette 已提交
47 48 49 50 51 52 53 54 55 56 57 58 59
/***    private data structures    ***/

struct clk_core {
	const char		*name;
	const struct clk_ops	*ops;
	struct clk_hw		*hw;
	struct module		*owner;
	struct clk_core		*parent;
	const char		**parent_names;
	struct clk_core		**parents;
	u8			num_parents;
	u8			new_parent_index;
	unsigned long		rate;
60
	unsigned long		req_rate;
M
Michael Turquette 已提交
61 62 63 64 65 66 67 68 69 70 71
	unsigned long		new_rate;
	struct clk_core		*new_parent;
	struct clk_core		*new_child;
	unsigned long		flags;
	unsigned int		enable_count;
	unsigned int		prepare_count;
	unsigned long		accuracy;
	int			phase;
	struct hlist_head	children;
	struct hlist_node	child_node;
	struct hlist_node	debug_node;
72
	struct hlist_head	clks;
M
Michael Turquette 已提交
73 74 75 76 77 78 79
	unsigned int		notifier_count;
#ifdef CONFIG_DEBUG_FS
	struct dentry		*dentry;
#endif
	struct kref		ref;
};

80 81 82
#define CREATE_TRACE_POINTS
#include <trace/events/clk.h>

M
Michael Turquette 已提交
83 84 85 86
struct clk {
	struct clk_core	*core;
	const char *dev_id;
	const char *con_id;
87 88
	unsigned long min_rate;
	unsigned long max_rate;
89
	struct hlist_node clks_node;
M
Michael Turquette 已提交
90 91
};

92 93 94
/***           locking             ***/
static void clk_prepare_lock(void)
{
95 96 97 98 99 100 101 102 103 104 105
	if (!mutex_trylock(&prepare_lock)) {
		if (prepare_owner == current) {
			prepare_refcnt++;
			return;
		}
		mutex_lock(&prepare_lock);
	}
	WARN_ON_ONCE(prepare_owner != NULL);
	WARN_ON_ONCE(prepare_refcnt != 0);
	prepare_owner = current;
	prepare_refcnt = 1;
106 107 108 109
}

static void clk_prepare_unlock(void)
{
110 111 112 113 114 115
	WARN_ON_ONCE(prepare_owner != current);
	WARN_ON_ONCE(prepare_refcnt == 0);

	if (--prepare_refcnt)
		return;
	prepare_owner = NULL;
116 117 118 119 120 121
	mutex_unlock(&prepare_lock);
}

static unsigned long clk_enable_lock(void)
{
	unsigned long flags;
122 123 124 125 126 127 128 129 130 131 132 133

	if (!spin_trylock_irqsave(&enable_lock, flags)) {
		if (enable_owner == current) {
			enable_refcnt++;
			return flags;
		}
		spin_lock_irqsave(&enable_lock, flags);
	}
	WARN_ON_ONCE(enable_owner != NULL);
	WARN_ON_ONCE(enable_refcnt != 0);
	enable_owner = current;
	enable_refcnt = 1;
134 135 136 137 138
	return flags;
}

static void clk_enable_unlock(unsigned long flags)
{
139 140 141 142 143 144
	WARN_ON_ONCE(enable_owner != current);
	WARN_ON_ONCE(enable_refcnt == 0);

	if (--enable_refcnt)
		return;
	enable_owner = NULL;
145 146 147
	spin_unlock_irqrestore(&enable_lock, flags);
}

148 149
/***        debugfs support        ***/

150
#ifdef CONFIG_DEBUG_FS
151 152 153 154
#include <linux/debugfs.h>

static struct dentry *rootdir;
static int inited = 0;
155 156
static DEFINE_MUTEX(clk_debug_lock);
static HLIST_HEAD(clk_debug_list);
157

S
Sachin Kamat 已提交
158 159 160 161 162 163 164 165 166 167 168
static struct hlist_head *all_lists[] = {
	&clk_root_list,
	&clk_orphan_list,
	NULL,
};

static struct hlist_head *orphan_list[] = {
	&clk_orphan_list,
	NULL,
};

169 170
static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
				 int level)
171 172 173 174
{
	if (!c)
		return;

175
	seq_printf(s, "%*s%-*s %11d %12d %11lu %10lu %-3d\n",
176 177
		   level * 3 + 1, "",
		   30 - level * 3, c->name,
178 179
		   c->enable_count, c->prepare_count, clk_core_get_rate(c),
		   clk_core_get_accuracy(c), clk_core_get_phase(c));
180 181
}

182
static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
183 184
				     int level)
{
185
	struct clk_core *child;
186 187 188 189 190 191

	if (!c)
		return;

	clk_summary_show_one(s, c, level);

192
	hlist_for_each_entry(child, &c->children, child_node)
193 194 195 196 197
		clk_summary_show_subtree(s, child, level + 1);
}

static int clk_summary_show(struct seq_file *s, void *data)
{
198
	struct clk_core *c;
199
	struct hlist_head **lists = (struct hlist_head **)s->private;
200

201 202
	seq_puts(s, "   clock                         enable_cnt  prepare_cnt        rate   accuracy   phase\n");
	seq_puts(s, "----------------------------------------------------------------------------------------\n");
203

204
	clk_prepare_lock();
205

206 207 208
	for (; *lists; lists++)
		hlist_for_each_entry(c, *lists, child_node)
			clk_summary_show_subtree(s, c, 0);
209

210
	clk_prepare_unlock();
211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227

	return 0;
}


static int clk_summary_open(struct inode *inode, struct file *file)
{
	return single_open(file, clk_summary_show, inode->i_private);
}

static const struct file_operations clk_summary_fops = {
	.open		= clk_summary_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= single_release,
};

228
static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
229 230 231 232 233 234 235
{
	if (!c)
		return;

	seq_printf(s, "\"%s\": { ", c->name);
	seq_printf(s, "\"enable_count\": %d,", c->enable_count);
	seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
236 237 238
	seq_printf(s, "\"rate\": %lu", clk_core_get_rate(c));
	seq_printf(s, "\"accuracy\": %lu", clk_core_get_accuracy(c));
	seq_printf(s, "\"phase\": %d", clk_core_get_phase(c));
239 240
}

241
static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level)
242
{
243
	struct clk_core *child;
244 245 246 247 248 249

	if (!c)
		return;

	clk_dump_one(s, c, level);

250
	hlist_for_each_entry(child, &c->children, child_node) {
251 252 253 254 255 256 257 258 259
		seq_printf(s, ",");
		clk_dump_subtree(s, child, level + 1);
	}

	seq_printf(s, "}");
}

static int clk_dump(struct seq_file *s, void *data)
{
260
	struct clk_core *c;
261
	bool first_node = true;
262
	struct hlist_head **lists = (struct hlist_head **)s->private;
263 264 265

	seq_printf(s, "{");

266
	clk_prepare_lock();
267

268 269 270 271 272 273 274
	for (; *lists; lists++) {
		hlist_for_each_entry(c, *lists, child_node) {
			if (!first_node)
				seq_puts(s, ",");
			first_node = false;
			clk_dump_subtree(s, c, 0);
		}
275 276
	}

277
	clk_prepare_unlock();
278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295

	seq_printf(s, "}");
	return 0;
}


static int clk_dump_open(struct inode *inode, struct file *file)
{
	return single_open(file, clk_dump, inode->i_private);
}

static const struct file_operations clk_dump_fops = {
	.open		= clk_dump_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= single_release,
};

296
static int clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
297 298 299 300
{
	struct dentry *d;
	int ret = -ENOMEM;

301
	if (!core || !pdentry) {
302 303 304 305
		ret = -EINVAL;
		goto out;
	}

306
	d = debugfs_create_dir(core->name, pdentry);
307 308 309
	if (!d)
		goto out;

310
	core->dentry = d;
311

312 313
	d = debugfs_create_u32("clk_rate", S_IRUGO, core->dentry,
			(u32 *)&core->rate);
314 315 316
	if (!d)
		goto err_out;

317 318
	d = debugfs_create_u32("clk_accuracy", S_IRUGO, core->dentry,
			(u32 *)&core->accuracy);
319 320 321
	if (!d)
		goto err_out;

322 323
	d = debugfs_create_u32("clk_phase", S_IRUGO, core->dentry,
			(u32 *)&core->phase);
324 325 326
	if (!d)
		goto err_out;

327 328
	d = debugfs_create_x32("clk_flags", S_IRUGO, core->dentry,
			(u32 *)&core->flags);
329 330 331
	if (!d)
		goto err_out;

332 333
	d = debugfs_create_u32("clk_prepare_count", S_IRUGO, core->dentry,
			(u32 *)&core->prepare_count);
334 335 336
	if (!d)
		goto err_out;

337 338
	d = debugfs_create_u32("clk_enable_count", S_IRUGO, core->dentry,
			(u32 *)&core->enable_count);
339 340 341
	if (!d)
		goto err_out;

342 343
	d = debugfs_create_u32("clk_notifier_count", S_IRUGO, core->dentry,
			(u32 *)&core->notifier_count);
344 345 346
	if (!d)
		goto err_out;

347 348
	if (core->ops->debug_init) {
		ret = core->ops->debug_init(core->hw, core->dentry);
349
		if (ret)
350
			goto err_out;
351
	}
352

353 354 355 356
	ret = 0;
	goto out;

err_out:
357 358
	debugfs_remove_recursive(core->dentry);
	core->dentry = NULL;
359 360 361 362 363 364
out:
	return ret;
}

/**
 * clk_debug_register - add a clk node to the debugfs clk tree
365
 * @core: the clk being added to the debugfs clk tree
366 367 368 369 370
 *
 * Dynamically adds a clk to the debugfs clk tree if debugfs has been
 * initialized.  Otherwise it bails out early since the debugfs clk tree
 * will be created lazily by clk_debug_init as part of a late_initcall.
 */
371
static int clk_debug_register(struct clk_core *core)
372 373 374
{
	int ret = 0;

375
	mutex_lock(&clk_debug_lock);
376
	hlist_add_head(&core->debug_node, &clk_debug_list);
377

378
	if (!inited)
379
		goto unlock;
380

381
	ret = clk_debug_create_one(core, rootdir);
382 383
unlock:
	mutex_unlock(&clk_debug_lock);
384 385 386 387

	return ret;
}

S
Sylwester Nawrocki 已提交
388 389
 /**
 * clk_debug_unregister - remove a clk node from the debugfs clk tree
390
 * @core: the clk being removed from the debugfs clk tree
S
Sylwester Nawrocki 已提交
391 392 393 394 395
 *
 * Dynamically removes a clk and all it's children clk nodes from the
 * debugfs clk tree if clk->dentry points to debugfs created by
 * clk_debug_register in __clk_init.
 */
396
static void clk_debug_unregister(struct clk_core *core)
S
Sylwester Nawrocki 已提交
397
{
398
	mutex_lock(&clk_debug_lock);
399 400 401
	hlist_del_init(&core->debug_node);
	debugfs_remove_recursive(core->dentry);
	core->dentry = NULL;
402
	mutex_unlock(&clk_debug_lock);
S
Sylwester Nawrocki 已提交
403 404
}

405
struct dentry *clk_debugfs_add_file(struct clk_hw *hw, char *name, umode_t mode,
406 407 408 409
				void *data, const struct file_operations *fops)
{
	struct dentry *d = NULL;

410 411 412
	if (hw->core->dentry)
		d = debugfs_create_file(name, mode, hw->core->dentry, data,
					fops);
413 414 415 416 417

	return d;
}
EXPORT_SYMBOL_GPL(clk_debugfs_add_file);

418 419 420 421 422 423 424 425 426 427 428 429 430 431
/**
 * clk_debug_init - lazily create the debugfs clk tree visualization
 *
 * clks are often initialized very early during boot before memory can
 * be dynamically allocated and well before debugfs is setup.
 * clk_debug_init walks the clk tree hierarchy while holding
 * prepare_lock and creates the topology as part of a late_initcall,
 * thus insuring that clks initialized very early will still be
 * represented in the debugfs clk tree.  This function should only be
 * called once at boot-time, and all other clks added dynamically will
 * be done so with clk_debug_register.
 */
static int __init clk_debug_init(void)
{
432
	struct clk_core *core;
433
	struct dentry *d;
434 435 436 437 438 439

	rootdir = debugfs_create_dir("clk", NULL);

	if (!rootdir)
		return -ENOMEM;

440
	d = debugfs_create_file("clk_summary", S_IRUGO, rootdir, &all_lists,
441 442 443 444
				&clk_summary_fops);
	if (!d)
		return -ENOMEM;

445
	d = debugfs_create_file("clk_dump", S_IRUGO, rootdir, &all_lists,
446 447 448 449
				&clk_dump_fops);
	if (!d)
		return -ENOMEM;

450 451 452 453
	d = debugfs_create_file("clk_orphan_summary", S_IRUGO, rootdir,
				&orphan_list, &clk_summary_fops);
	if (!d)
		return -ENOMEM;
454

455 456 457
	d = debugfs_create_file("clk_orphan_dump", S_IRUGO, rootdir,
				&orphan_list, &clk_dump_fops);
	if (!d)
458 459
		return -ENOMEM;

460
	mutex_lock(&clk_debug_lock);
461 462
	hlist_for_each_entry(core, &clk_debug_list, debug_node)
		clk_debug_create_one(core, rootdir);
463 464

	inited = 1;
465
	mutex_unlock(&clk_debug_lock);
466 467 468 469 470

	return 0;
}
late_initcall(clk_debug_init);
#else
471 472
static inline int clk_debug_register(struct clk_core *core) { return 0; }
static inline void clk_debug_reparent(struct clk_core *core,
473
				      struct clk_core *new_parent)
474 475
{
}
476
static inline void clk_debug_unregister(struct clk_core *core)
S
Sylwester Nawrocki 已提交
477 478
{
}
479
#endif
480

481
/* caller must hold prepare_lock */
482
static void clk_unprepare_unused_subtree(struct clk_core *core)
483
{
484
	struct clk_core *child;
485

486 487
	lockdep_assert_held(&prepare_lock);

488
	hlist_for_each_entry(child, &core->children, child_node)
489 490
		clk_unprepare_unused_subtree(child);

491
	if (core->prepare_count)
492 493
		return;

494
	if (core->flags & CLK_IGNORE_UNUSED)
495 496
		return;

497 498 499 500 501 502 503
	if (clk_core_is_prepared(core)) {
		trace_clk_unprepare(core);
		if (core->ops->unprepare_unused)
			core->ops->unprepare_unused(core->hw);
		else if (core->ops->unprepare)
			core->ops->unprepare(core->hw);
		trace_clk_unprepare_complete(core);
504
	}
505 506
}

507
/* caller must hold prepare_lock */
508
static void clk_disable_unused_subtree(struct clk_core *core)
509
{
510
	struct clk_core *child;
511 512
	unsigned long flags;

513 514
	lockdep_assert_held(&prepare_lock);

515
	hlist_for_each_entry(child, &core->children, child_node)
516 517
		clk_disable_unused_subtree(child);

518
	flags = clk_enable_lock();
519

520
	if (core->enable_count)
521 522
		goto unlock_out;

523
	if (core->flags & CLK_IGNORE_UNUSED)
524 525
		goto unlock_out;

526 527 528 529 530
	/*
	 * some gate clocks have special needs during the disable-unused
	 * sequence.  call .disable_unused if available, otherwise fall
	 * back to .disable
	 */
531 532 533 534 535 536 537
	if (clk_core_is_enabled(core)) {
		trace_clk_disable(core);
		if (core->ops->disable_unused)
			core->ops->disable_unused(core->hw);
		else if (core->ops->disable)
			core->ops->disable(core->hw);
		trace_clk_disable_complete(core);
538
	}
539 540

unlock_out:
541
	clk_enable_unlock(flags);
542 543
}

544 545 546 547 548 549 550 551
static bool clk_ignore_unused;
static int __init clk_ignore_unused_setup(char *__unused)
{
	clk_ignore_unused = true;
	return 1;
}
__setup("clk_ignore_unused", clk_ignore_unused_setup);

552 553
static int clk_disable_unused(void)
{
554
	struct clk_core *core;
555

556 557 558 559 560
	if (clk_ignore_unused) {
		pr_warn("clk: Not disabling unused clocks\n");
		return 0;
	}

561
	clk_prepare_lock();
562

563 564
	hlist_for_each_entry(core, &clk_root_list, child_node)
		clk_disable_unused_subtree(core);
565

566 567
	hlist_for_each_entry(core, &clk_orphan_list, child_node)
		clk_disable_unused_subtree(core);
568

569 570
	hlist_for_each_entry(core, &clk_root_list, child_node)
		clk_unprepare_unused_subtree(core);
571

572 573
	hlist_for_each_entry(core, &clk_orphan_list, child_node)
		clk_unprepare_unused_subtree(core);
574

575
	clk_prepare_unlock();
576 577 578

	return 0;
}
579
late_initcall_sync(clk_disable_unused);
580 581 582

/***    helper functions   ***/

583
const char *__clk_get_name(struct clk *clk)
584
{
585
	return !clk ? NULL : clk->core->name;
586
}
587
EXPORT_SYMBOL_GPL(__clk_get_name);
588

589
struct clk_hw *__clk_get_hw(struct clk *clk)
590
{
591
	return !clk ? NULL : clk->core->hw;
592
}
593
EXPORT_SYMBOL_GPL(__clk_get_hw);
594

595
u8 __clk_get_num_parents(struct clk *clk)
596
{
597
	return !clk ? 0 : clk->core->num_parents;
598
}
599
EXPORT_SYMBOL_GPL(__clk_get_num_parents);
600

601
struct clk *__clk_get_parent(struct clk *clk)
602
{
603 604 605 606 607
	if (!clk)
		return NULL;

	/* TODO: Create a per-user clk and change callers to call clk_put */
	return !clk->core->parent ? NULL : clk->core->parent->hw->clk;
608
}
609
EXPORT_SYMBOL_GPL(__clk_get_parent);
610

611
static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core,
612
							 u8 index)
J
James Hogan 已提交
613
{
614
	if (!core || index >= core->num_parents)
J
James Hogan 已提交
615
		return NULL;
616 617 618 619 620
	else if (!core->parents)
		return clk_core_lookup(core->parent_names[index]);
	else if (!core->parents[index])
		return core->parents[index] =
			clk_core_lookup(core->parent_names[index]);
J
James Hogan 已提交
621
	else
622
		return core->parents[index];
J
James Hogan 已提交
623
}
624 625 626 627 628 629 630 631 632 633 634 635

struct clk *clk_get_parent_by_index(struct clk *clk, u8 index)
{
	struct clk_core *parent;

	if (!clk)
		return NULL;

	parent = clk_core_get_parent_by_index(clk->core, index);

	return !parent ? NULL : parent->hw->clk;
}
636
EXPORT_SYMBOL_GPL(clk_get_parent_by_index);
J
James Hogan 已提交
637

638
unsigned int __clk_get_enable_count(struct clk *clk)
639
{
640
	return !clk ? 0 : clk->core->enable_count;
641 642
}

643
static unsigned long clk_core_get_rate_nolock(struct clk_core *core)
644 645 646
{
	unsigned long ret;

647
	if (!core) {
648
		ret = 0;
649 650 651
		goto out;
	}

652
	ret = core->rate;
653

654
	if (core->flags & CLK_IS_ROOT)
655 656
		goto out;

657
	if (!core->parent)
658
		ret = 0;
659 660 661 662

out:
	return ret;
}
663 664 665 666 667 668 669 670

unsigned long __clk_get_rate(struct clk *clk)
{
	if (!clk)
		return 0;

	return clk_core_get_rate_nolock(clk->core);
}
671
EXPORT_SYMBOL_GPL(__clk_get_rate);
672

673
static unsigned long __clk_get_accuracy(struct clk_core *core)
674
{
675
	if (!core)
676 677
		return 0;

678
	return core->accuracy;
679 680
}

681
unsigned long __clk_get_flags(struct clk *clk)
682
{
683
	return !clk ? 0 : clk->core->flags;
684
}
685
EXPORT_SYMBOL_GPL(__clk_get_flags);
686

687
static bool clk_core_is_prepared(struct clk_core *core)
688 689 690
{
	int ret;

691
	if (!core)
692 693 694 695 696 697
		return false;

	/*
	 * .is_prepared is optional for clocks that can prepare
	 * fall back to software usage counter if it is missing
	 */
698 699
	if (!core->ops->is_prepared) {
		ret = core->prepare_count ? 1 : 0;
700 701 702
		goto out;
	}

703
	ret = core->ops->is_prepared(core->hw);
704 705 706 707
out:
	return !!ret;
}

708 709 710 711 712 713 714 715
bool __clk_is_prepared(struct clk *clk)
{
	if (!clk)
		return false;

	return clk_core_is_prepared(clk->core);
}

716
static bool clk_core_is_enabled(struct clk_core *core)
717 718 719
{
	int ret;

720
	if (!core)
721
		return false;
722 723 724 725 726

	/*
	 * .is_enabled is only mandatory for clocks that gate
	 * fall back to software usage counter if .is_enabled is missing
	 */
727 728
	if (!core->ops->is_enabled) {
		ret = core->enable_count ? 1 : 0;
729 730 731
		goto out;
	}

732
	ret = core->ops->is_enabled(core->hw);
733
out:
734
	return !!ret;
735
}
736 737 738 739 740 741 742 743

bool __clk_is_enabled(struct clk *clk)
{
	if (!clk)
		return false;

	return clk_core_is_enabled(clk->core);
}
744
EXPORT_SYMBOL_GPL(__clk_is_enabled);
745

746
static struct clk_core *__clk_lookup_subtree(const char *name,
747
					     struct clk_core *core)
748
{
749 750
	struct clk_core *child;
	struct clk_core *ret;
751

752 753
	if (!strcmp(core->name, name))
		return core;
754

755
	hlist_for_each_entry(child, &core->children, child_node) {
756 757 758 759 760 761 762 763
		ret = __clk_lookup_subtree(name, child);
		if (ret)
			return ret;
	}

	return NULL;
}

764
static struct clk_core *clk_core_lookup(const char *name)
765
{
766 767
	struct clk_core *root_clk;
	struct clk_core *ret;
768 769 770 771 772

	if (!name)
		return NULL;

	/* search the 'proper' clk tree first */
773
	hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
774 775 776 777 778 779
		ret = __clk_lookup_subtree(name, root_clk);
		if (ret)
			return ret;
	}

	/* if not found, then search the orphan tree */
780
	hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
781 782 783 784 785 786 787 788
		ret = __clk_lookup_subtree(name, root_clk);
		if (ret)
			return ret;
	}

	return NULL;
}

789 790
static bool mux_is_better_rate(unsigned long rate, unsigned long now,
			   unsigned long best, unsigned long flags)
791
{
792 793 794 795 796 797 798 799
	if (flags & CLK_MUX_ROUND_CLOSEST)
		return abs(now - rate) < abs(best - rate);

	return now <= rate && now > best;
}

static long
clk_mux_determine_rate_flags(struct clk_hw *hw, unsigned long rate,
800 801
			     unsigned long min_rate,
			     unsigned long max_rate,
802 803 804
			     unsigned long *best_parent_rate,
			     struct clk_hw **best_parent_p,
			     unsigned long flags)
805
{
806
	struct clk_core *core = hw->core, *parent, *best_parent = NULL;
807 808 809 810
	int i, num_parents;
	unsigned long parent_rate, best = 0;

	/* if NO_REPARENT flag set, pass through to current parent */
811 812 813
	if (core->flags & CLK_SET_RATE_NO_REPARENT) {
		parent = core->parent;
		if (core->flags & CLK_SET_RATE_PARENT)
814 815
			best = __clk_determine_rate(parent ? parent->hw : NULL,
						    rate, min_rate, max_rate);
816
		else if (parent)
817
			best = clk_core_get_rate_nolock(parent);
818
		else
819
			best = clk_core_get_rate_nolock(core);
820 821 822 823
		goto out;
	}

	/* find the parent that can provide the fastest rate <= rate */
824
	num_parents = core->num_parents;
825
	for (i = 0; i < num_parents; i++) {
826
		parent = clk_core_get_parent_by_index(core, i);
827 828
		if (!parent)
			continue;
829
		if (core->flags & CLK_SET_RATE_PARENT)
830 831 832
			parent_rate = __clk_determine_rate(parent->hw, rate,
							   min_rate,
							   max_rate);
833
		else
834
			parent_rate = clk_core_get_rate_nolock(parent);
835
		if (mux_is_better_rate(rate, parent_rate, best, flags)) {
836 837 838 839 840 841 842
			best_parent = parent;
			best = parent_rate;
		}
	}

out:
	if (best_parent)
843
		*best_parent_p = best_parent->hw;
844 845 846 847
	*best_parent_rate = best;

	return best;
}
848

849 850 851 852 853 854 855
struct clk *__clk_lookup(const char *name)
{
	struct clk_core *core = clk_core_lookup(name);

	return !core ? NULL : core->hw->clk;
}

856
static void clk_core_get_boundaries(struct clk_core *core,
857 858 859 860 861 862 863 864
				    unsigned long *min_rate,
				    unsigned long *max_rate)
{
	struct clk *clk_user;

	*min_rate = 0;
	*max_rate = ULONG_MAX;

865
	hlist_for_each_entry(clk_user, &core->clks, clks_node)
866 867
		*min_rate = max(*min_rate, clk_user->min_rate);

868
	hlist_for_each_entry(clk_user, &core->clks, clks_node)
869 870 871
		*max_rate = min(*max_rate, clk_user->max_rate);
}

872 873 874 875 876 877
/*
 * Helper for finding best parent to provide a given frequency. This can be used
 * directly as a determine_rate callback (e.g. for a mux), or from a more
 * complex clock that may combine a mux with other operations.
 */
long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
878 879
			      unsigned long min_rate,
			      unsigned long max_rate,
880 881 882
			      unsigned long *best_parent_rate,
			      struct clk_hw **best_parent_p)
{
883 884
	return clk_mux_determine_rate_flags(hw, rate, min_rate, max_rate,
					    best_parent_rate,
885 886
					    best_parent_p, 0);
}
887
EXPORT_SYMBOL_GPL(__clk_mux_determine_rate);
888

889
long __clk_mux_determine_rate_closest(struct clk_hw *hw, unsigned long rate,
890 891
			      unsigned long min_rate,
			      unsigned long max_rate,
892 893 894
			      unsigned long *best_parent_rate,
			      struct clk_hw **best_parent_p)
{
895 896
	return clk_mux_determine_rate_flags(hw, rate, min_rate, max_rate,
					    best_parent_rate,
897 898 899 900 901
					    best_parent_p,
					    CLK_MUX_ROUND_CLOSEST);
}
EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest);

902 903
/***        clk api        ***/

904
static void clk_core_unprepare(struct clk_core *core)
905
{
906
	if (!core)
907 908
		return;

909
	if (WARN_ON(core->prepare_count == 0))
910 911
		return;

912
	if (--core->prepare_count > 0)
913 914
		return;

915
	WARN_ON(core->enable_count > 0);
916

917
	trace_clk_unprepare(core);
918

919 920
	if (core->ops->unprepare)
		core->ops->unprepare(core->hw);
921

922 923
	trace_clk_unprepare_complete(core);
	clk_core_unprepare(core->parent);
924 925 926 927
}

/**
 * clk_unprepare - undo preparation of a clock source
P
Peter Meerwald 已提交
928
 * @clk: the clk being unprepared
929 930 931 932 933 934 935 936 937 938
 *
 * clk_unprepare may sleep, which differentiates it from clk_disable.  In a
 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
 * if the operation may sleep.  One example is a clk which is accessed over
 * I2c.  In the complex case a clk gate operation may require a fast and a slow
 * part.  It is this reason that clk_unprepare and clk_disable are not mutually
 * exclusive.  In fact clk_disable must be called before clk_unprepare.
 */
void clk_unprepare(struct clk *clk)
{
939 940 941
	if (IS_ERR_OR_NULL(clk))
		return;

942
	clk_prepare_lock();
943
	clk_core_unprepare(clk->core);
944
	clk_prepare_unlock();
945 946 947
}
EXPORT_SYMBOL_GPL(clk_unprepare);

948
static int clk_core_prepare(struct clk_core *core)
949 950 951
{
	int ret = 0;

952
	if (!core)
953 954
		return 0;

955 956
	if (core->prepare_count == 0) {
		ret = clk_core_prepare(core->parent);
957 958 959
		if (ret)
			return ret;

960
		trace_clk_prepare(core);
961

962 963
		if (core->ops->prepare)
			ret = core->ops->prepare(core->hw);
964

965
		trace_clk_prepare_complete(core);
966 967

		if (ret) {
968
			clk_core_unprepare(core->parent);
969
			return ret;
970 971 972
		}
	}

973
	core->prepare_count++;
974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993

	return 0;
}

/**
 * clk_prepare - prepare a clock source
 * @clk: the clk being prepared
 *
 * clk_prepare may sleep, which differentiates it from clk_enable.  In a simple
 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
 * operation may sleep.  One example is a clk which is accessed over I2c.  In
 * the complex case a clk ungate operation may require a fast and a slow part.
 * It is this reason that clk_prepare and clk_enable are not mutually
 * exclusive.  In fact clk_prepare must be called before clk_enable.
 * Returns 0 on success, -EERROR otherwise.
 */
int clk_prepare(struct clk *clk)
{
	int ret;

994 995 996
	if (!clk)
		return 0;

997
	clk_prepare_lock();
998
	ret = clk_core_prepare(clk->core);
999
	clk_prepare_unlock();
1000 1001 1002 1003 1004

	return ret;
}
EXPORT_SYMBOL_GPL(clk_prepare);

1005
static void clk_core_disable(struct clk_core *core)
1006
{
1007
	if (!core)
1008 1009
		return;

1010
	if (WARN_ON(core->enable_count == 0))
1011 1012
		return;

1013
	if (--core->enable_count > 0)
1014 1015
		return;

1016
	trace_clk_disable(core);
1017

1018 1019
	if (core->ops->disable)
		core->ops->disable(core->hw);
1020

1021
	trace_clk_disable_complete(core);
1022

1023
	clk_core_disable(core->parent);
1024 1025
}

1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041
/**
 * clk_disable - gate a clock
 * @clk: the clk being gated
 *
 * clk_disable must not sleep, which differentiates it from clk_unprepare.  In
 * a simple case, clk_disable can be used instead of clk_unprepare to gate a
 * clk if the operation is fast and will never sleep.  One example is a
 * SoC-internal clk which is controlled via simple register writes.  In the
 * complex case a clk gate operation may require a fast and a slow part.  It is
 * this reason that clk_unprepare and clk_disable are not mutually exclusive.
 * In fact clk_disable must be called before clk_unprepare.
 */
void clk_disable(struct clk *clk)
{
	unsigned long flags;

1042 1043 1044
	if (IS_ERR_OR_NULL(clk))
		return;

1045
	flags = clk_enable_lock();
1046
	clk_core_disable(clk->core);
1047
	clk_enable_unlock(flags);
1048 1049 1050
}
EXPORT_SYMBOL_GPL(clk_disable);

1051
static int clk_core_enable(struct clk_core *core)
1052 1053 1054
{
	int ret = 0;

1055
	if (!core)
1056 1057
		return 0;

1058
	if (WARN_ON(core->prepare_count == 0))
1059 1060
		return -ESHUTDOWN;

1061 1062
	if (core->enable_count == 0) {
		ret = clk_core_enable(core->parent);
1063 1064 1065 1066

		if (ret)
			return ret;

1067
		trace_clk_enable(core);
1068

1069 1070
		if (core->ops->enable)
			ret = core->ops->enable(core->hw);
1071

1072
		trace_clk_enable_complete(core);
1073 1074

		if (ret) {
1075
			clk_core_disable(core->parent);
1076
			return ret;
1077 1078 1079
		}
	}

1080
	core->enable_count++;
1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101
	return 0;
}

/**
 * clk_enable - ungate a clock
 * @clk: the clk being ungated
 *
 * clk_enable must not sleep, which differentiates it from clk_prepare.  In a
 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
 * if the operation will never sleep.  One example is a SoC-internal clk which
 * is controlled via simple register writes.  In the complex case a clk ungate
 * operation may require a fast and a slow part.  It is this reason that
 * clk_enable and clk_prepare are not mutually exclusive.  In fact clk_prepare
 * must be called before clk_enable.  Returns 0 on success, -EERROR
 * otherwise.
 */
int clk_enable(struct clk *clk)
{
	unsigned long flags;
	int ret;

1102 1103 1104
	if (!clk)
		return 0;

1105
	flags = clk_enable_lock();
1106
	ret = clk_core_enable(clk->core);
1107
	clk_enable_unlock(flags);
1108 1109 1110 1111 1112

	return ret;
}
EXPORT_SYMBOL_GPL(clk_enable);

1113
static unsigned long clk_core_round_rate_nolock(struct clk_core *core,
1114 1115 1116
						unsigned long rate,
						unsigned long min_rate,
						unsigned long max_rate)
1117
{
1118
	unsigned long parent_rate = 0;
1119
	struct clk_core *parent;
1120
	struct clk_hw *parent_hw;
1121

1122 1123
	lockdep_assert_held(&prepare_lock);

1124
	if (!core)
1125
		return 0;
1126

1127
	parent = core->parent;
1128 1129 1130
	if (parent)
		parent_rate = parent->rate;

1131
	if (core->ops->determine_rate) {
1132
		parent_hw = parent ? parent->hw : NULL;
1133
		return core->ops->determine_rate(core->hw, rate,
1134 1135
						min_rate, max_rate,
						&parent_rate, &parent_hw);
1136 1137 1138 1139
	} else if (core->ops->round_rate)
		return core->ops->round_rate(core->hw, rate, &parent_rate);
	else if (core->flags & CLK_SET_RATE_PARENT)
		return clk_core_round_rate_nolock(core->parent, rate, min_rate,
1140
						  max_rate);
1141
	else
1142
		return core->rate;
1143
}
1144

1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166
/**
 * __clk_determine_rate - get the closest rate actually supported by a clock
 * @hw: determine the rate of this clock
 * @rate: target rate
 * @min_rate: returned rate must be greater than this rate
 * @max_rate: returned rate must be less than this rate
 *
 * Caller must hold prepare_lock.  Useful for clk_ops such as .set_rate and
 * .determine_rate.
 */
unsigned long __clk_determine_rate(struct clk_hw *hw,
				   unsigned long rate,
				   unsigned long min_rate,
				   unsigned long max_rate)
{
	if (!hw)
		return 0;

	return clk_core_round_rate_nolock(hw->core, rate, min_rate, max_rate);
}
EXPORT_SYMBOL_GPL(__clk_determine_rate);

1167 1168 1169 1170 1171 1172 1173 1174 1175
/**
 * __clk_round_rate - round the given rate for a clk
 * @clk: round the rate of this clock
 * @rate: the rate which is to be rounded
 *
 * Caller must hold prepare_lock.  Useful for clk_ops such as .set_rate
 */
unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
{
1176 1177 1178
	unsigned long min_rate;
	unsigned long max_rate;

1179 1180 1181
	if (!clk)
		return 0;

1182 1183 1184
	clk_core_get_boundaries(clk->core, &min_rate, &max_rate);

	return clk_core_round_rate_nolock(clk->core, rate, min_rate, max_rate);
1185
}
1186
EXPORT_SYMBOL_GPL(__clk_round_rate);
1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200

/**
 * clk_round_rate - round the given rate for a clk
 * @clk: the clk for which we are rounding a rate
 * @rate: the rate which is to be rounded
 *
 * Takes in a rate as input and rounds it to a rate that the clk can actually
 * use which is then returned.  If clk doesn't support round_rate operation
 * then the parent rate is returned.
 */
long clk_round_rate(struct clk *clk, unsigned long rate)
{
	unsigned long ret;

1201 1202 1203
	if (!clk)
		return 0;

1204
	clk_prepare_lock();
1205
	ret = __clk_round_rate(clk, rate);
1206
	clk_prepare_unlock();
1207 1208 1209 1210 1211 1212 1213

	return ret;
}
EXPORT_SYMBOL_GPL(clk_round_rate);

/**
 * __clk_notify - call clk notifier chain
1214
 * @core: clk that is changing rate
1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225
 * @msg: clk notifier type (see include/linux/clk.h)
 * @old_rate: old clk rate
 * @new_rate: new clk rate
 *
 * Triggers a notifier call chain on the clk rate-change notification
 * for 'clk'.  Passes a pointer to the struct clk and the previous
 * and current rates to the notifier callback.  Intended to be called by
 * internal clock code only.  Returns NOTIFY_DONE from the last driver
 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
 * a driver returns that.
 */
1226
static int __clk_notify(struct clk_core *core, unsigned long msg,
1227 1228 1229 1230 1231 1232 1233 1234 1235 1236
		unsigned long old_rate, unsigned long new_rate)
{
	struct clk_notifier *cn;
	struct clk_notifier_data cnd;
	int ret = NOTIFY_DONE;

	cnd.old_rate = old_rate;
	cnd.new_rate = new_rate;

	list_for_each_entry(cn, &clk_notifier_list, node) {
1237
		if (cn->clk->core == core) {
1238
			cnd.clk = cn->clk;
1239 1240 1241 1242 1243 1244 1245 1246
			ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
					&cnd);
		}
	}

	return ret;
}

1247 1248
/**
 * __clk_recalc_accuracies
1249
 * @core: first clk in the subtree
1250 1251 1252 1253 1254 1255 1256 1257
 *
 * Walks the subtree of clks starting with clk and recalculates accuracies as
 * it goes.  Note that if a clk does not implement the .recalc_accuracy
 * callback then it is assumed that the clock will take on the accuracy of it's
 * parent.
 *
 * Caller must hold prepare_lock.
 */
1258
static void __clk_recalc_accuracies(struct clk_core *core)
1259 1260
{
	unsigned long parent_accuracy = 0;
1261
	struct clk_core *child;
1262

1263 1264
	lockdep_assert_held(&prepare_lock);

1265 1266
	if (core->parent)
		parent_accuracy = core->parent->accuracy;
1267

1268 1269
	if (core->ops->recalc_accuracy)
		core->accuracy = core->ops->recalc_accuracy(core->hw,
1270 1271
							  parent_accuracy);
	else
1272
		core->accuracy = parent_accuracy;
1273

1274
	hlist_for_each_entry(child, &core->children, child_node)
1275 1276 1277
		__clk_recalc_accuracies(child);
}

1278
static long clk_core_get_accuracy(struct clk_core *core)
1279 1280 1281 1282
{
	unsigned long accuracy;

	clk_prepare_lock();
1283 1284
	if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE))
		__clk_recalc_accuracies(core);
1285

1286
	accuracy = __clk_get_accuracy(core);
1287 1288 1289 1290 1291
	clk_prepare_unlock();

	return accuracy;
}

1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302
/**
 * clk_get_accuracy - return the accuracy of clk
 * @clk: the clk whose accuracy is being returned
 *
 * Simply returns the cached accuracy of the clk, unless
 * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be
 * issued.
 * If clk is NULL then returns 0.
 */
long clk_get_accuracy(struct clk *clk)
{
1303 1304
	if (!clk)
		return 0;
1305

1306
	return clk_core_get_accuracy(clk->core);
1307 1308 1309
}
EXPORT_SYMBOL_GPL(clk_get_accuracy);

1310
static unsigned long clk_recalc(struct clk_core *core,
1311
				unsigned long parent_rate)
1312
{
1313 1314
	if (core->ops->recalc_rate)
		return core->ops->recalc_rate(core->hw, parent_rate);
1315 1316 1317
	return parent_rate;
}

1318 1319
/**
 * __clk_recalc_rates
1320
 * @core: first clk in the subtree
1321 1322 1323 1324
 * @msg: notification type (see include/linux/clk.h)
 *
 * Walks the subtree of clks starting with clk and recalculates rates as it
 * goes.  Note that if a clk does not implement the .recalc_rate callback then
P
Peter Meerwald 已提交
1325
 * it is assumed that the clock will take on the rate of its parent.
1326 1327 1328 1329 1330 1331
 *
 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
 * if necessary.
 *
 * Caller must hold prepare_lock.
 */
1332
static void __clk_recalc_rates(struct clk_core *core, unsigned long msg)
1333 1334 1335
{
	unsigned long old_rate;
	unsigned long parent_rate = 0;
1336
	struct clk_core *child;
1337

1338 1339
	lockdep_assert_held(&prepare_lock);

1340
	old_rate = core->rate;
1341

1342 1343
	if (core->parent)
		parent_rate = core->parent->rate;
1344

1345
	core->rate = clk_recalc(core, parent_rate);
1346 1347 1348 1349 1350

	/*
	 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
	 * & ABORT_RATE_CHANGE notifiers
	 */
1351 1352
	if (core->notifier_count && msg)
		__clk_notify(core, msg, old_rate, core->rate);
1353

1354
	hlist_for_each_entry(child, &core->children, child_node)
1355 1356 1357
		__clk_recalc_rates(child, msg);
}

1358
static unsigned long clk_core_get_rate(struct clk_core *core)
1359 1360 1361
{
	unsigned long rate;

1362
	clk_prepare_lock();
1363

1364 1365
	if (core && (core->flags & CLK_GET_RATE_NOCACHE))
		__clk_recalc_rates(core, 0);
1366

1367
	rate = clk_core_get_rate_nolock(core);
1368
	clk_prepare_unlock();
1369 1370 1371

	return rate;
}
1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387

/**
 * clk_get_rate - return the rate of clk
 * @clk: the clk whose rate is being returned
 *
 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
 * is set, which means a recalc_rate will be issued.
 * If clk is NULL then returns 0.
 */
unsigned long clk_get_rate(struct clk *clk)
{
	if (!clk)
		return 0;

	return clk_core_get_rate(clk->core);
}
1388 1389
EXPORT_SYMBOL_GPL(clk_get_rate);

1390
static int clk_fetch_parent_index(struct clk_core *core,
1391
				  struct clk_core *parent)
1392
{
1393
	int i;
1394

1395 1396
	if (!core->parents) {
		core->parents = kcalloc(core->num_parents,
1397
					sizeof(struct clk *), GFP_KERNEL);
1398
		if (!core->parents)
1399 1400
			return -ENOMEM;
	}
1401 1402 1403 1404

	/*
	 * find index of new parent clock using cached parent ptrs,
	 * or if not yet cached, use string name comparison and cache
1405
	 * them now to avoid future calls to clk_core_lookup.
1406
	 */
1407 1408
	for (i = 0; i < core->num_parents; i++) {
		if (core->parents[i] == parent)
1409
			return i;
1410

1411
		if (core->parents[i])
1412 1413
			continue;

1414 1415
		if (!strcmp(core->parent_names[i], parent->name)) {
			core->parents[i] = clk_core_lookup(parent->name);
1416
			return i;
1417 1418 1419
		}
	}

1420
	return -EINVAL;
1421 1422
}

1423
static void clk_reparent(struct clk_core *core, struct clk_core *new_parent)
1424
{
1425
	hlist_del(&core->child_node);
1426

1427 1428
	if (new_parent) {
		/* avoid duplicate POST_RATE_CHANGE notifications */
1429
		if (new_parent->new_child == core)
1430 1431
			new_parent->new_child = NULL;

1432
		hlist_add_head(&core->child_node, &new_parent->children);
1433
	} else {
1434
		hlist_add_head(&core->child_node, &clk_orphan_list);
1435
	}
1436

1437
	core->parent = new_parent;
1438 1439
}

1440
static struct clk_core *__clk_set_parent_before(struct clk_core *core,
1441
					   struct clk_core *parent)
1442 1443
{
	unsigned long flags;
1444
	struct clk_core *old_parent = core->parent;
1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462

	/*
	 * Migrate prepare state between parents and prevent race with
	 * clk_enable().
	 *
	 * If the clock is not prepared, then a race with
	 * clk_enable/disable() is impossible since we already have the
	 * prepare lock (future calls to clk_enable() need to be preceded by
	 * a clk_prepare()).
	 *
	 * If the clock is prepared, migrate the prepared state to the new
	 * parent and also protect against a race with clk_enable() by
	 * forcing the clock and the new parent on.  This ensures that all
	 * future calls to clk_enable() are practically NOPs with respect to
	 * hardware and software states.
	 *
	 * See also: Comment for clk_set_parent() below.
	 */
1463
	if (core->prepare_count) {
1464 1465
		clk_core_prepare(parent);
		clk_core_enable(parent);
1466
		clk_core_enable(core);
1467 1468 1469 1470
	}

	/* update the clk tree topology */
	flags = clk_enable_lock();
1471
	clk_reparent(core, parent);
1472 1473
	clk_enable_unlock(flags);

S
Stephen Boyd 已提交
1474 1475 1476
	return old_parent;
}

1477 1478 1479
static void __clk_set_parent_after(struct clk_core *core,
				   struct clk_core *parent,
				   struct clk_core *old_parent)
S
Stephen Boyd 已提交
1480 1481 1482 1483 1484
{
	/*
	 * Finish the migration of prepare state and undo the changes done
	 * for preventing a race with clk_enable().
	 */
1485 1486 1487 1488
	if (core->prepare_count) {
		clk_core_disable(core);
		clk_core_disable(old_parent);
		clk_core_unprepare(old_parent);
S
Stephen Boyd 已提交
1489 1490 1491
	}
}

1492
static int __clk_set_parent(struct clk_core *core, struct clk_core *parent,
1493
			    u8 p_index)
S
Stephen Boyd 已提交
1494 1495 1496
{
	unsigned long flags;
	int ret = 0;
1497
	struct clk_core *old_parent;
S
Stephen Boyd 已提交
1498

1499
	old_parent = __clk_set_parent_before(core, parent);
S
Stephen Boyd 已提交
1500

1501
	trace_clk_set_parent(core, parent);
1502

1503
	/* change clock input source */
1504 1505
	if (parent && core->ops->set_parent)
		ret = core->ops->set_parent(core->hw, p_index);
1506

1507
	trace_clk_set_parent_complete(core, parent);
1508

1509 1510
	if (ret) {
		flags = clk_enable_lock();
1511
		clk_reparent(core, old_parent);
1512 1513
		clk_enable_unlock(flags);

1514 1515
		if (core->prepare_count) {
			clk_core_disable(core);
1516 1517
			clk_core_disable(parent);
			clk_core_unprepare(parent);
1518 1519 1520 1521
		}
		return ret;
	}

1522
	__clk_set_parent_after(core, parent, old_parent);
1523 1524 1525 1526

	return 0;
}

1527 1528
/**
 * __clk_speculate_rates
1529
 * @core: first clk in the subtree
1530 1531 1532 1533 1534 1535 1536 1537 1538
 * @parent_rate: the "future" rate of clk's parent
 *
 * Walks the subtree of clks starting with clk, speculating rates as it
 * goes and firing off PRE_RATE_CHANGE notifications as necessary.
 *
 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
 * pre-rate change notifications and returns early if no clks in the
 * subtree have subscribed to the notifications.  Note that if a clk does not
 * implement the .recalc_rate callback then it is assumed that the clock will
P
Peter Meerwald 已提交
1539
 * take on the rate of its parent.
1540 1541 1542
 *
 * Caller must hold prepare_lock.
 */
1543
static int __clk_speculate_rates(struct clk_core *core,
1544
				 unsigned long parent_rate)
1545
{
1546
	struct clk_core *child;
1547 1548 1549
	unsigned long new_rate;
	int ret = NOTIFY_DONE;

1550 1551
	lockdep_assert_held(&prepare_lock);

1552
	new_rate = clk_recalc(core, parent_rate);
1553

1554
	/* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */
1555 1556
	if (core->notifier_count)
		ret = __clk_notify(core, PRE_RATE_CHANGE, core->rate, new_rate);
1557

1558 1559
	if (ret & NOTIFY_STOP_MASK) {
		pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n",
1560
				__func__, core->name, ret);
1561
		goto out;
1562
	}
1563

1564
	hlist_for_each_entry(child, &core->children, child_node) {
1565
		ret = __clk_speculate_rates(child, new_rate);
1566
		if (ret & NOTIFY_STOP_MASK)
1567 1568 1569 1570 1571 1572 1573
			break;
	}

out:
	return ret;
}

1574
static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate,
1575
			     struct clk_core *new_parent, u8 p_index)
1576
{
1577
	struct clk_core *child;
1578

1579 1580 1581
	core->new_rate = new_rate;
	core->new_parent = new_parent;
	core->new_parent_index = p_index;
1582
	/* include clk in new parent's PRE_RATE_CHANGE notifications */
1583 1584 1585
	core->new_child = NULL;
	if (new_parent && new_parent != core->parent)
		new_parent->new_child = core;
1586

1587
	hlist_for_each_entry(child, &core->children, child_node) {
1588
		child->new_rate = clk_recalc(child, new_rate);
1589
		clk_calc_subtree(child, child->new_rate, NULL, 0);
1590 1591 1592 1593 1594 1595 1596
	}
}

/*
 * calculate the new rates returning the topmost clock that has to be
 * changed.
 */
1597
static struct clk_core *clk_calc_new_rates(struct clk_core *core,
1598
					   unsigned long rate)
1599
{
1600
	struct clk_core *top = core;
1601
	struct clk_core *old_parent, *parent;
1602
	struct clk_hw *parent_hw;
1603
	unsigned long best_parent_rate = 0;
1604
	unsigned long new_rate;
1605 1606
	unsigned long min_rate;
	unsigned long max_rate;
1607
	int p_index = 0;
1608
	long ret;
1609

1610
	/* sanity */
1611
	if (IS_ERR_OR_NULL(core))
1612 1613
		return NULL;

1614
	/* save parent rate, if it exists */
1615
	parent = old_parent = core->parent;
1616 1617 1618
	if (parent)
		best_parent_rate = parent->rate;

1619
	clk_core_get_boundaries(core, &min_rate, &max_rate);
1620

1621
	/* find the closest rate and parent clk/rate */
1622
	if (core->ops->determine_rate) {
1623
		parent_hw = parent ? parent->hw : NULL;
1624
		ret = core->ops->determine_rate(core->hw, rate,
1625 1626 1627 1628 1629 1630 1631 1632
					       min_rate,
					       max_rate,
					       &best_parent_rate,
					       &parent_hw);
		if (ret < 0)
			return NULL;

		new_rate = ret;
1633
		parent = parent_hw ? parent_hw->core : NULL;
1634 1635
	} else if (core->ops->round_rate) {
		ret = core->ops->round_rate(core->hw, rate,
1636 1637 1638 1639 1640
					   &best_parent_rate);
		if (ret < 0)
			return NULL;

		new_rate = ret;
1641 1642
		if (new_rate < min_rate || new_rate > max_rate)
			return NULL;
1643
	} else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) {
1644
		/* pass-through clock without adjustable parent */
1645
		core->new_rate = core->rate;
1646 1647 1648 1649 1650
		return NULL;
	} else {
		/* pass-through clock with adjustable parent */
		top = clk_calc_new_rates(parent, rate);
		new_rate = parent->new_rate;
1651
		goto out;
1652 1653
	}

1654 1655
	/* some clocks must be gated to change parent */
	if (parent != old_parent &&
1656
	    (core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) {
1657
		pr_debug("%s: %s not gated but wants to reparent\n",
1658
			 __func__, core->name);
1659 1660 1661
		return NULL;
	}

1662
	/* try finding the new parent index */
1663 1664
	if (parent && core->num_parents > 1) {
		p_index = clk_fetch_parent_index(core, parent);
1665
		if (p_index < 0) {
1666
			pr_debug("%s: clk %s can not be parent of clk %s\n",
1667
				 __func__, parent->name, core->name);
1668 1669
			return NULL;
		}
1670 1671
	}

1672
	if ((core->flags & CLK_SET_RATE_PARENT) && parent &&
1673 1674
	    best_parent_rate != parent->rate)
		top = clk_calc_new_rates(parent, best_parent_rate);
1675 1676

out:
1677
	clk_calc_subtree(core, new_rate, parent, p_index);
1678 1679 1680 1681 1682 1683 1684 1685 1686

	return top;
}

/*
 * Notify about rate changes in a subtree. Always walk down the whole tree
 * so that in case of an error we can walk down the whole tree again and
 * abort the change.
 */
1687
static struct clk_core *clk_propagate_rate_change(struct clk_core *core,
1688
						  unsigned long event)
1689
{
1690
	struct clk_core *child, *tmp_clk, *fail_clk = NULL;
1691 1692
	int ret = NOTIFY_DONE;

1693
	if (core->rate == core->new_rate)
1694
		return NULL;
1695

1696 1697
	if (core->notifier_count) {
		ret = __clk_notify(core, event, core->rate, core->new_rate);
1698
		if (ret & NOTIFY_STOP_MASK)
1699
			fail_clk = core;
1700 1701
	}

1702
	hlist_for_each_entry(child, &core->children, child_node) {
1703
		/* Skip children who will be reparented to another clock */
1704
		if (child->new_parent && child->new_parent != core)
1705 1706 1707 1708 1709 1710
			continue;
		tmp_clk = clk_propagate_rate_change(child, event);
		if (tmp_clk)
			fail_clk = tmp_clk;
	}

1711 1712 1713
	/* handle the new child who might not be in core->children yet */
	if (core->new_child) {
		tmp_clk = clk_propagate_rate_change(core->new_child, event);
1714 1715
		if (tmp_clk)
			fail_clk = tmp_clk;
1716 1717 1718 1719 1720 1721 1722 1723 1724
	}

	return fail_clk;
}

/*
 * walk down a subtree and set the new rates notifying the rate
 * change on the way
 */
1725
static void clk_change_rate(struct clk_core *core)
1726
{
1727
	struct clk_core *child;
1728
	struct hlist_node *tmp;
1729
	unsigned long old_rate;
1730
	unsigned long best_parent_rate = 0;
S
Stephen Boyd 已提交
1731
	bool skip_set_rate = false;
1732
	struct clk_core *old_parent;
1733

1734
	old_rate = core->rate;
1735

1736 1737 1738 1739
	if (core->new_parent)
		best_parent_rate = core->new_parent->rate;
	else if (core->parent)
		best_parent_rate = core->parent->rate;
1740

1741 1742 1743
	if (core->new_parent && core->new_parent != core->parent) {
		old_parent = __clk_set_parent_before(core, core->new_parent);
		trace_clk_set_parent(core, core->new_parent);
S
Stephen Boyd 已提交
1744

1745
		if (core->ops->set_rate_and_parent) {
S
Stephen Boyd 已提交
1746
			skip_set_rate = true;
1747
			core->ops->set_rate_and_parent(core->hw, core->new_rate,
S
Stephen Boyd 已提交
1748
					best_parent_rate,
1749 1750 1751
					core->new_parent_index);
		} else if (core->ops->set_parent) {
			core->ops->set_parent(core->hw, core->new_parent_index);
S
Stephen Boyd 已提交
1752 1753
		}

1754 1755
		trace_clk_set_parent_complete(core, core->new_parent);
		__clk_set_parent_after(core, core->new_parent, old_parent);
S
Stephen Boyd 已提交
1756 1757
	}

1758
	trace_clk_set_rate(core, core->new_rate);
1759

1760 1761
	if (!skip_set_rate && core->ops->set_rate)
		core->ops->set_rate(core->hw, core->new_rate, best_parent_rate);
1762

1763
	trace_clk_set_rate_complete(core, core->new_rate);
1764

1765
	core->rate = clk_recalc(core, best_parent_rate);
1766

1767 1768
	if (core->notifier_count && old_rate != core->rate)
		__clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate);
1769

1770 1771 1772 1773
	/*
	 * Use safe iteration, as change_rate can actually swap parents
	 * for certain clock types.
	 */
1774
	hlist_for_each_entry_safe(child, tmp, &core->children, child_node) {
1775
		/* Skip children who will be reparented to another clock */
1776
		if (child->new_parent && child->new_parent != core)
1777
			continue;
1778
		clk_change_rate(child);
1779 1780
	}

1781 1782 1783
	/* handle the new child who might not be in core->children yet */
	if (core->new_child)
		clk_change_rate(core->new_child);
1784 1785
}

1786
static int clk_core_set_rate_nolock(struct clk_core *core,
1787 1788 1789 1790 1791 1792
				    unsigned long req_rate)
{
	struct clk_core *top, *fail_clk;
	unsigned long rate = req_rate;
	int ret = 0;

1793
	if (!core)
1794 1795 1796
		return 0;

	/* bail early if nothing to do */
1797
	if (rate == clk_core_get_rate_nolock(core))
1798 1799
		return 0;

1800
	if ((core->flags & CLK_SET_RATE_GATE) && core->prepare_count)
1801 1802 1803
		return -EBUSY;

	/* calculate new rates and get the topmost changed clock */
1804
	top = clk_calc_new_rates(core, rate);
1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819
	if (!top)
		return -EINVAL;

	/* notify that we are about to change rates */
	fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
	if (fail_clk) {
		pr_debug("%s: failed to set %s rate\n", __func__,
				fail_clk->name);
		clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
		return -EBUSY;
	}

	/* change the rates */
	clk_change_rate(top);

1820
	core->req_rate = req_rate;
1821 1822 1823 1824

	return ret;
}

1825 1826 1827 1828 1829
/**
 * clk_set_rate - specify a new rate for clk
 * @clk: the clk whose rate is being changed
 * @rate: the new rate for clk
 *
1830
 * In the simplest case clk_set_rate will only adjust the rate of clk.
1831
 *
1832 1833 1834 1835 1836
 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
 * propagate up to clk's parent; whether or not this happens depends on the
 * outcome of clk's .round_rate implementation.  If *parent_rate is unchanged
 * after calling .round_rate then upstream parent propagation is ignored.  If
 * *parent_rate comes back with a new rate for clk's parent then we propagate
P
Peter Meerwald 已提交
1837
 * up to clk's parent and set its rate.  Upward propagation will continue
1838 1839
 * until either a clk does not support the CLK_SET_RATE_PARENT flag or
 * .round_rate stops requesting changes to clk's parent_rate.
1840
 *
1841 1842
 * Rate changes are accomplished via tree traversal that also recalculates the
 * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
1843 1844 1845 1846 1847
 *
 * Returns 0 on success, -EERROR otherwise.
 */
int clk_set_rate(struct clk *clk, unsigned long rate)
{
1848
	int ret;
1849

1850 1851 1852
	if (!clk)
		return 0;

1853
	/* prevent racing with updates to the clock topology */
1854
	clk_prepare_lock();
1855

1856
	ret = clk_core_set_rate_nolock(clk->core, rate);
1857

1858
	clk_prepare_unlock();
1859

1860 1861 1862
	return ret;
}
EXPORT_SYMBOL_GPL(clk_set_rate);
1863

1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883
/**
 * clk_set_rate_range - set a rate range for a clock source
 * @clk: clock source
 * @min: desired minimum clock rate in Hz, inclusive
 * @max: desired maximum clock rate in Hz, inclusive
 *
 * Returns success (0) or negative errno.
 */
int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
{
	int ret = 0;

	if (!clk)
		return 0;

	if (min > max) {
		pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n",
		       __func__, clk->core->name, clk->dev_id, clk->con_id,
		       min, max);
		return -EINVAL;
1884 1885
	}

1886 1887 1888 1889 1890 1891 1892
	clk_prepare_lock();

	if (min != clk->min_rate || max != clk->max_rate) {
		clk->min_rate = min;
		clk->max_rate = max;
		ret = clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
	}
1893

1894
	clk_prepare_unlock();
1895 1896 1897

	return ret;
}
1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930
EXPORT_SYMBOL_GPL(clk_set_rate_range);

/**
 * clk_set_min_rate - set a minimum clock rate for a clock source
 * @clk: clock source
 * @rate: desired minimum clock rate in Hz, inclusive
 *
 * Returns success (0) or negative errno.
 */
int clk_set_min_rate(struct clk *clk, unsigned long rate)
{
	if (!clk)
		return 0;

	return clk_set_rate_range(clk, rate, clk->max_rate);
}
EXPORT_SYMBOL_GPL(clk_set_min_rate);

/**
 * clk_set_max_rate - set a maximum clock rate for a clock source
 * @clk: clock source
 * @rate: desired maximum clock rate in Hz, inclusive
 *
 * Returns success (0) or negative errno.
 */
int clk_set_max_rate(struct clk *clk, unsigned long rate)
{
	if (!clk)
		return 0;

	return clk_set_rate_range(clk, clk->min_rate, rate);
}
EXPORT_SYMBOL_GPL(clk_set_max_rate);
1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941

/**
 * clk_get_parent - return the parent of a clk
 * @clk: the clk whose parent gets returned
 *
 * Simply returns clk->parent.  Returns NULL if clk is NULL.
 */
struct clk *clk_get_parent(struct clk *clk)
{
	struct clk *parent;

1942
	clk_prepare_lock();
1943
	parent = __clk_get_parent(clk);
1944
	clk_prepare_unlock();
1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956

	return parent;
}
EXPORT_SYMBOL_GPL(clk_get_parent);

/*
 * .get_parent is mandatory for clocks with multiple possible parents.  It is
 * optional for single-parent clocks.  Always call .get_parent if it is
 * available and WARN if it is missing for multi-parent clocks.
 *
 * For single-parent clocks without .get_parent, first check to see if the
 * .parents array exists, and if so use it to avoid an expensive tree
1957
 * traversal.  If .parents does not exist then walk the tree.
1958
 */
1959
static struct clk_core *__clk_init_parent(struct clk_core *core)
1960
{
1961
	struct clk_core *ret = NULL;
1962 1963 1964 1965
	u8 index;

	/* handle the trivial cases */

1966
	if (!core->num_parents)
1967 1968
		goto out;

1969 1970 1971 1972
	if (core->num_parents == 1) {
		if (IS_ERR_OR_NULL(core->parent))
			core->parent = clk_core_lookup(core->parent_names[0]);
		ret = core->parent;
1973 1974 1975
		goto out;
	}

1976 1977
	if (!core->ops->get_parent) {
		WARN(!core->ops->get_parent,
1978 1979 1980 1981 1982 1983
			"%s: multi-parent clocks must implement .get_parent\n",
			__func__);
		goto out;
	};

	/*
1984 1985
	 * Do our best to cache parent clocks in core->parents.  This prevents
	 * unnecessary and expensive lookups.  We don't set core->parent here;
1986
	 * that is done by the calling function.
1987 1988
	 */

1989
	index = core->ops->get_parent(core->hw);
1990

1991 1992 1993
	if (!core->parents)
		core->parents =
			kcalloc(core->num_parents, sizeof(struct clk *),
1994 1995
					GFP_KERNEL);

1996
	ret = clk_core_get_parent_by_index(core, index);
1997 1998 1999 2000 2001

out:
	return ret;
}

2002
static void clk_core_reparent(struct clk_core *core,
2003
				  struct clk_core *new_parent)
2004
{
2005 2006 2007
	clk_reparent(core, new_parent);
	__clk_recalc_accuracies(core);
	__clk_recalc_rates(core, POST_RATE_CHANGE);
2008 2009 2010
}

/**
T
Thierry Reding 已提交
2011 2012 2013
 * clk_has_parent - check if a clock is a possible parent for another
 * @clk: clock source
 * @parent: parent clock source
2014
 *
T
Thierry Reding 已提交
2015 2016
 * This function can be used in drivers that need to check that a clock can be
 * the parent of another without actually changing the parent.
2017
 *
T
Thierry Reding 已提交
2018
 * Returns true if @parent is a possible parent for @clk, false otherwise.
2019
 */
T
Thierry Reding 已提交
2020 2021
bool clk_has_parent(struct clk *clk, struct clk *parent)
{
2022
	struct clk_core *core, *parent_core;
T
Thierry Reding 已提交
2023 2024 2025 2026 2027 2028
	unsigned int i;

	/* NULL clocks should be nops, so return success if either is NULL. */
	if (!clk || !parent)
		return true;

2029 2030 2031
	core = clk->core;
	parent_core = parent->core;

T
Thierry Reding 已提交
2032
	/* Optimize for the case where the parent is already the parent. */
2033
	if (core->parent == parent_core)
T
Thierry Reding 已提交
2034 2035
		return true;

2036 2037
	for (i = 0; i < core->num_parents; i++)
		if (strcmp(core->parent_names[i], parent_core->name) == 0)
T
Thierry Reding 已提交
2038 2039 2040 2041 2042 2043
			return true;

	return false;
}
EXPORT_SYMBOL_GPL(clk_has_parent);

2044
static int clk_core_set_parent(struct clk_core *core, struct clk_core *parent)
2045 2046
{
	int ret = 0;
2047
	int p_index = 0;
2048
	unsigned long p_rate = 0;
2049

2050
	if (!core)
2051 2052
		return 0;

2053
	/* prevent racing with updates to the clock topology */
2054
	clk_prepare_lock();
2055

2056
	if (core->parent == parent)
2057 2058
		goto out;

2059
	/* verify ops for for multi-parent clks */
2060
	if ((core->num_parents > 1) && (!core->ops->set_parent)) {
2061 2062 2063 2064
		ret = -ENOSYS;
		goto out;
	}

2065
	/* check that we are allowed to re-parent if the clock is in use */
2066
	if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) {
2067 2068 2069 2070 2071 2072
		ret = -EBUSY;
		goto out;
	}

	/* try finding the new parent index */
	if (parent) {
2073
		p_index = clk_fetch_parent_index(core, parent);
2074
		p_rate = parent->rate;
2075
		if (p_index < 0) {
2076
			pr_debug("%s: clk %s can not be parent of clk %s\n",
2077
					__func__, parent->name, core->name);
2078
			ret = p_index;
2079 2080 2081 2082
			goto out;
		}
	}

2083
	/* propagate PRE_RATE_CHANGE notifications */
2084
	ret = __clk_speculate_rates(core, p_rate);
2085 2086

	/* abort if a driver objects */
2087
	if (ret & NOTIFY_STOP_MASK)
2088 2089
		goto out;

2090
	/* do the re-parent */
2091
	ret = __clk_set_parent(core, parent, p_index);
2092

2093 2094
	/* propagate rate an accuracy recalculation accordingly */
	if (ret) {
2095
		__clk_recalc_rates(core, ABORT_RATE_CHANGE);
2096
	} else {
2097 2098
		__clk_recalc_rates(core, POST_RATE_CHANGE);
		__clk_recalc_accuracies(core);
2099
	}
2100 2101

out:
2102
	clk_prepare_unlock();
2103 2104 2105

	return ret;
}
2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130

/**
 * clk_set_parent - switch the parent of a mux clk
 * @clk: the mux clk whose input we are switching
 * @parent: the new input to clk
 *
 * Re-parent clk to use parent as its new input source.  If clk is in
 * prepared state, the clk will get enabled for the duration of this call. If
 * that's not acceptable for a specific clk (Eg: the consumer can't handle
 * that, the reparenting is glitchy in hardware, etc), use the
 * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
 *
 * After successfully changing clk's parent clk_set_parent will update the
 * clk topology, sysfs topology and propagate rate recalculation via
 * __clk_recalc_rates.
 *
 * Returns 0 on success, -EERROR otherwise.
 */
int clk_set_parent(struct clk *clk, struct clk *parent)
{
	if (!clk)
		return 0;

	return clk_core_set_parent(clk->core, parent ? parent->core : NULL);
}
2131 2132
EXPORT_SYMBOL_GPL(clk_set_parent);

2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154
/**
 * clk_set_phase - adjust the phase shift of a clock signal
 * @clk: clock signal source
 * @degrees: number of degrees the signal is shifted
 *
 * Shifts the phase of a clock signal by the specified
 * degrees. Returns 0 on success, -EERROR otherwise.
 *
 * This function makes no distinction about the input or reference
 * signal that we adjust the clock signal phase against. For example
 * phase locked-loop clock signal generators we may shift phase with
 * respect to feedback clock signal input, but for other cases the
 * clock phase may be shifted with respect to some other, unspecified
 * signal.
 *
 * Additionally the concept of phase shift does not propagate through
 * the clock tree hierarchy, which sets it apart from clock rates and
 * clock accuracy. A parent clock phase attribute does not have an
 * impact on the phase attribute of a child clock.
 */
int clk_set_phase(struct clk *clk, int degrees)
{
2155
	int ret = -EINVAL;
2156 2157

	if (!clk)
2158
		return 0;
2159 2160 2161 2162 2163 2164 2165 2166

	/* sanity check degrees */
	degrees %= 360;
	if (degrees < 0)
		degrees += 360;

	clk_prepare_lock();

2167 2168
	trace_clk_set_phase(clk->core, degrees);

2169 2170
	if (clk->core->ops->set_phase)
		ret = clk->core->ops->set_phase(clk->core->hw, degrees);
2171

2172 2173
	trace_clk_set_phase_complete(clk->core, degrees);

2174
	if (!ret)
2175
		clk->core->phase = degrees;
2176 2177 2178 2179 2180

	clk_prepare_unlock();

	return ret;
}
M
Maxime Ripard 已提交
2181
EXPORT_SYMBOL_GPL(clk_set_phase);
2182

2183
static int clk_core_get_phase(struct clk_core *core)
2184
{
2185
	int ret;
2186 2187

	clk_prepare_lock();
2188
	ret = core->phase;
2189 2190 2191 2192
	clk_prepare_unlock();

	return ret;
}
M
Maxime Ripard 已提交
2193
EXPORT_SYMBOL_GPL(clk_get_phase);
2194

2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208
/**
 * clk_get_phase - return the phase shift of a clock signal
 * @clk: clock signal source
 *
 * Returns the phase shift of a clock node in degrees, otherwise returns
 * -EERROR.
 */
int clk_get_phase(struct clk *clk)
{
	if (!clk)
		return 0;

	return clk_core_get_phase(clk->core);
}
2209

M
Michael Turquette 已提交
2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235
/**
 * clk_is_match - check if two clk's point to the same hardware clock
 * @p: clk compared against q
 * @q: clk compared against p
 *
 * Returns true if the two struct clk pointers both point to the same hardware
 * clock node. Put differently, returns true if struct clk *p and struct clk *q
 * share the same struct clk_core object.
 *
 * Returns false otherwise. Note that two NULL clks are treated as matching.
 */
bool clk_is_match(const struct clk *p, const struct clk *q)
{
	/* trivial case: identical struct clk's or both NULL */
	if (p == q)
		return true;

	/* true if clk->core pointers match. Avoid derefing garbage */
	if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q))
		if (p->core == q->core)
			return true;

	return false;
}
EXPORT_SYMBOL_GPL(clk_is_match);

2236 2237 2238 2239 2240
/**
 * __clk_init - initialize the data structures in a struct clk
 * @dev:	device initializing this clk, placeholder for now
 * @clk:	clk being initialized
 *
2241
 * Initializes the lists in struct clk_core, queries the hardware for the
2242 2243
 * parent and rate and sets them both.
 */
M
Michael Turquette 已提交
2244
static int __clk_init(struct device *dev, struct clk *clk_user)
2245
{
2246
	int i, ret = 0;
2247
	struct clk_core *orphan;
2248
	struct hlist_node *tmp2;
2249
	struct clk_core *core;
2250
	unsigned long rate;
2251

2252
	if (!clk_user)
2253
		return -EINVAL;
2254

2255
	core = clk_user->core;
2256

2257
	clk_prepare_lock();
2258 2259

	/* check to see if a clock with this name is already registered */
2260
	if (clk_core_lookup(core->name)) {
2261
		pr_debug("%s: clk %s already initialized\n",
2262
				__func__, core->name);
2263
		ret = -EEXIST;
2264
		goto out;
2265
	}
2266

2267
	/* check that clk_ops are sane.  See Documentation/clk.txt */
2268 2269 2270
	if (core->ops->set_rate &&
	    !((core->ops->round_rate || core->ops->determine_rate) &&
	      core->ops->recalc_rate)) {
2271
		pr_warning("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n",
2272
				__func__, core->name);
2273
		ret = -EINVAL;
2274 2275 2276
		goto out;
	}

2277
	if (core->ops->set_parent && !core->ops->get_parent) {
2278
		pr_warning("%s: %s must implement .get_parent & .set_parent\n",
2279
				__func__, core->name);
2280
		ret = -EINVAL;
2281 2282 2283
		goto out;
	}

2284 2285
	if (core->ops->set_rate_and_parent &&
			!(core->ops->set_parent && core->ops->set_rate)) {
S
Stephen Boyd 已提交
2286
		pr_warn("%s: %s must implement .set_parent & .set_rate\n",
2287
				__func__, core->name);
S
Stephen Boyd 已提交
2288 2289 2290 2291
		ret = -EINVAL;
		goto out;
	}

2292
	/* throw a WARN if any entries in parent_names are NULL */
2293 2294
	for (i = 0; i < core->num_parents; i++)
		WARN(!core->parent_names[i],
2295
				"%s: invalid NULL in %s's .parent_names\n",
2296
				__func__, core->name);
2297 2298 2299 2300

	/*
	 * Allocate an array of struct clk *'s to avoid unnecessary string
	 * look-ups of clk's possible parents.  This can fail for clocks passed
2301
	 * in to clk_init during early boot; thus any access to core->parents[]
2302 2303 2304
	 * must always check for a NULL pointer and try to populate it if
	 * necessary.
	 *
2305 2306
	 * If core->parents is not NULL we skip this entire block.  This allows
	 * for clock drivers to statically initialize core->parents.
2307
	 */
2308 2309
	if (core->num_parents > 1 && !core->parents) {
		core->parents = kcalloc(core->num_parents, sizeof(struct clk *),
2310
					GFP_KERNEL);
2311
		/*
2312
		 * clk_core_lookup returns NULL for parents that have not been
2313 2314 2315 2316
		 * clk_init'd; thus any access to clk->parents[] must check
		 * for a NULL pointer.  We can always perform lazy lookups for
		 * missing parents later on.
		 */
2317 2318 2319 2320
		if (core->parents)
			for (i = 0; i < core->num_parents; i++)
				core->parents[i] =
					clk_core_lookup(core->parent_names[i]);
2321 2322
	}

2323
	core->parent = __clk_init_parent(core);
2324 2325

	/*
2326
	 * Populate core->parent if parent has already been __clk_init'd.  If
2327 2328 2329 2330 2331 2332 2333 2334
	 * parent has not yet been __clk_init'd then place clk in the orphan
	 * list.  If clk has set the CLK_IS_ROOT flag then place it in the root
	 * clk list.
	 *
	 * Every time a new clk is clk_init'd then we walk the list of orphan
	 * clocks and re-parent any that are children of the clock currently
	 * being clk_init'd.
	 */
2335 2336 2337 2338 2339
	if (core->parent)
		hlist_add_head(&core->child_node,
				&core->parent->children);
	else if (core->flags & CLK_IS_ROOT)
		hlist_add_head(&core->child_node, &clk_root_list);
2340
	else
2341
		hlist_add_head(&core->child_node, &clk_orphan_list);
2342

2343 2344 2345 2346 2347 2348 2349
	/*
	 * Set clk's accuracy.  The preferred method is to use
	 * .recalc_accuracy. For simple clocks and lazy developers the default
	 * fallback is to use the parent's accuracy.  If a clock doesn't have a
	 * parent (or is orphaned) then accuracy is set to zero (perfect
	 * clock).
	 */
2350 2351 2352 2353 2354
	if (core->ops->recalc_accuracy)
		core->accuracy = core->ops->recalc_accuracy(core->hw,
					__clk_get_accuracy(core->parent));
	else if (core->parent)
		core->accuracy = core->parent->accuracy;
2355
	else
2356
		core->accuracy = 0;
2357

2358 2359 2360 2361 2362
	/*
	 * Set clk's phase.
	 * Since a phase is by definition relative to its parent, just
	 * query the current clock phase, or just assume it's in phase.
	 */
2363 2364
	if (core->ops->get_phase)
		core->phase = core->ops->get_phase(core->hw);
2365
	else
2366
		core->phase = 0;
2367

2368 2369 2370 2371 2372 2373
	/*
	 * Set clk's rate.  The preferred method is to use .recalc_rate.  For
	 * simple clocks and lazy developers the default fallback is to use the
	 * parent's rate.  If a clock doesn't have a parent (or is orphaned)
	 * then rate is set to zero.
	 */
2374 2375 2376 2377 2378
	if (core->ops->recalc_rate)
		rate = core->ops->recalc_rate(core->hw,
				clk_core_get_rate_nolock(core->parent));
	else if (core->parent)
		rate = core->parent->rate;
2379
	else
2380
		rate = 0;
2381
	core->rate = core->req_rate = rate;
2382 2383 2384 2385 2386

	/*
	 * walk the list of orphan clocks and reparent any that are children of
	 * this clock
	 */
2387
	hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
2388
		if (orphan->num_parents && orphan->ops->get_parent) {
2389
			i = orphan->ops->get_parent(orphan->hw);
2390 2391
			if (!strcmp(core->name, orphan->parent_names[i]))
				clk_core_reparent(orphan, core);
2392 2393 2394
			continue;
		}

2395
		for (i = 0; i < orphan->num_parents; i++)
2396 2397
			if (!strcmp(core->name, orphan->parent_names[i])) {
				clk_core_reparent(orphan, core);
2398 2399
				break;
			}
2400
	 }
2401 2402 2403 2404 2405 2406 2407

	/*
	 * optional platform-specific magic
	 *
	 * The .init callback is not used by any of the basic clock types, but
	 * exists for weird hardware that must perform initialization magic.
	 * Please consider other ways of solving initialization problems before
P
Peter Meerwald 已提交
2408
	 * using this callback, as its use is discouraged.
2409
	 */
2410 2411
	if (core->ops->init)
		core->ops->init(core->hw);
2412

2413
	kref_init(&core->ref);
2414
out:
2415
	clk_prepare_unlock();
2416

2417
	if (!ret)
2418
		clk_debug_register(core);
2419

2420
	return ret;
2421 2422
}

2423 2424
struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id,
			     const char *con_id)
2425 2426 2427
{
	struct clk *clk;

2428 2429 2430
	/* This is to allow this function to be chained to others */
	if (!hw || IS_ERR(hw))
		return (struct clk *) hw;
2431

2432 2433 2434 2435 2436 2437 2438
	clk = kzalloc(sizeof(*clk), GFP_KERNEL);
	if (!clk)
		return ERR_PTR(-ENOMEM);

	clk->core = hw->core;
	clk->dev_id = dev_id;
	clk->con_id = con_id;
2439 2440 2441
	clk->max_rate = ULONG_MAX;

	clk_prepare_lock();
2442
	hlist_add_head(&clk->clks_node, &hw->core->clks);
2443
	clk_prepare_unlock();
2444 2445 2446

	return clk;
}
2447

2448
void __clk_free_clk(struct clk *clk)
2449 2450
{
	clk_prepare_lock();
2451
	hlist_del(&clk->clks_node);
2452 2453 2454 2455
	clk_prepare_unlock();

	kfree(clk);
}
2456

2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468
/**
 * clk_register - allocate a new clock, register it and return an opaque cookie
 * @dev: device that is registering this clock
 * @hw: link to hardware-specific clock data
 *
 * clk_register is the primary interface for populating the clock tree with new
 * clock nodes.  It returns a pointer to the newly allocated struct clk which
 * cannot be dereferenced by driver code but may be used in conjuction with the
 * rest of the clock API.  In the event of an error clk_register will return an
 * error code; drivers must test for an error code after calling clk_register.
 */
struct clk *clk_register(struct device *dev, struct clk_hw *hw)
2469
{
2470
	int i, ret;
2471
	struct clk_core *core;
2472

2473 2474
	core = kzalloc(sizeof(*core), GFP_KERNEL);
	if (!core) {
2475 2476 2477
		ret = -ENOMEM;
		goto fail_out;
	}
2478

2479 2480
	core->name = kstrdup_const(hw->init->name, GFP_KERNEL);
	if (!core->name) {
2481 2482 2483
		ret = -ENOMEM;
		goto fail_name;
	}
2484
	core->ops = hw->init->ops;
2485
	if (dev && dev->driver)
2486 2487 2488 2489 2490
		core->owner = dev->driver->owner;
	core->hw = hw;
	core->flags = hw->init->flags;
	core->num_parents = hw->init->num_parents;
	hw->core = core;
2491

2492
	/* allocate local copy in case parent_names is __initdata */
2493
	core->parent_names = kcalloc(core->num_parents, sizeof(char *),
2494
					GFP_KERNEL);
2495

2496
	if (!core->parent_names) {
2497 2498 2499 2500 2501 2502
		ret = -ENOMEM;
		goto fail_parent_names;
	}


	/* copy each string name in case parent_names is __initdata */
2503 2504
	for (i = 0; i < core->num_parents; i++) {
		core->parent_names[i] = kstrdup_const(hw->init->parent_names[i],
2505
						GFP_KERNEL);
2506
		if (!core->parent_names[i]) {
2507 2508 2509 2510 2511
			ret = -ENOMEM;
			goto fail_parent_names_copy;
		}
	}

2512
	INIT_HLIST_HEAD(&core->clks);
2513

2514 2515 2516 2517 2518 2519 2520
	hw->clk = __clk_create_clk(hw, NULL, NULL);
	if (IS_ERR(hw->clk)) {
		ret = PTR_ERR(hw->clk);
		goto fail_parent_names_copy;
	}

	ret = __clk_init(dev, hw->clk);
2521
	if (!ret)
2522
		return hw->clk;
2523

2524
	__clk_free_clk(hw->clk);
2525
	hw->clk = NULL;
2526

2527 2528
fail_parent_names_copy:
	while (--i >= 0)
2529 2530
		kfree_const(core->parent_names[i]);
	kfree(core->parent_names);
2531
fail_parent_names:
2532
	kfree_const(core->name);
2533
fail_name:
2534
	kfree(core);
2535 2536
fail_out:
	return ERR_PTR(ret);
2537 2538 2539
}
EXPORT_SYMBOL_GPL(clk_register);

S
Sylwester Nawrocki 已提交
2540 2541 2542 2543 2544 2545
/*
 * Free memory allocated for a clock.
 * Caller must hold prepare_lock.
 */
static void __clk_release(struct kref *ref)
{
2546 2547
	struct clk_core *core = container_of(ref, struct clk_core, ref);
	int i = core->num_parents;
S
Sylwester Nawrocki 已提交
2548

2549 2550
	lockdep_assert_held(&prepare_lock);

2551
	kfree(core->parents);
S
Sylwester Nawrocki 已提交
2552
	while (--i >= 0)
2553
		kfree_const(core->parent_names[i]);
S
Sylwester Nawrocki 已提交
2554

2555 2556 2557
	kfree(core->parent_names);
	kfree_const(core->name);
	kfree(core);
S
Sylwester Nawrocki 已提交
2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594
}

/*
 * Empty clk_ops for unregistered clocks. These are used temporarily
 * after clk_unregister() was called on a clock and until last clock
 * consumer calls clk_put() and the struct clk object is freed.
 */
static int clk_nodrv_prepare_enable(struct clk_hw *hw)
{
	return -ENXIO;
}

static void clk_nodrv_disable_unprepare(struct clk_hw *hw)
{
	WARN_ON_ONCE(1);
}

static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate,
					unsigned long parent_rate)
{
	return -ENXIO;
}

static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index)
{
	return -ENXIO;
}

static const struct clk_ops clk_nodrv_ops = {
	.enable		= clk_nodrv_prepare_enable,
	.disable	= clk_nodrv_disable_unprepare,
	.prepare	= clk_nodrv_prepare_enable,
	.unprepare	= clk_nodrv_disable_unprepare,
	.set_rate	= clk_nodrv_set_rate,
	.set_parent	= clk_nodrv_set_parent,
};

M
Mark Brown 已提交
2595 2596 2597 2598
/**
 * clk_unregister - unregister a currently registered clock
 * @clk: clock to unregister
 */
S
Sylwester Nawrocki 已提交
2599 2600 2601 2602
void clk_unregister(struct clk *clk)
{
	unsigned long flags;

2603 2604 2605
	if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
		return;

2606
	clk_debug_unregister(clk->core);
S
Sylwester Nawrocki 已提交
2607 2608 2609

	clk_prepare_lock();

2610 2611 2612
	if (clk->core->ops == &clk_nodrv_ops) {
		pr_err("%s: unregistered clock: %s\n", __func__,
		       clk->core->name);
2613
		return;
S
Sylwester Nawrocki 已提交
2614 2615 2616 2617 2618 2619
	}
	/*
	 * Assign empty clock ops for consumers that might still hold
	 * a reference to this clock.
	 */
	flags = clk_enable_lock();
2620
	clk->core->ops = &clk_nodrv_ops;
S
Sylwester Nawrocki 已提交
2621 2622
	clk_enable_unlock(flags);

2623 2624
	if (!hlist_empty(&clk->core->children)) {
		struct clk_core *child;
2625
		struct hlist_node *t;
S
Sylwester Nawrocki 已提交
2626 2627

		/* Reparent all children to the orphan list. */
2628 2629 2630
		hlist_for_each_entry_safe(child, t, &clk->core->children,
					  child_node)
			clk_core_set_parent(child, NULL);
S
Sylwester Nawrocki 已提交
2631 2632
	}

2633
	hlist_del_init(&clk->core->child_node);
S
Sylwester Nawrocki 已提交
2634

2635
	if (clk->core->prepare_count)
S
Sylwester Nawrocki 已提交
2636
		pr_warn("%s: unregistering prepared clock: %s\n",
2637 2638
					__func__, clk->core->name);
	kref_put(&clk->core->ref, __clk_release);
2639

S
Sylwester Nawrocki 已提交
2640 2641
	clk_prepare_unlock();
}
M
Mark Brown 已提交
2642 2643
EXPORT_SYMBOL_GPL(clk_unregister);

2644 2645
static void devm_clk_release(struct device *dev, void *res)
{
2646
	clk_unregister(*(struct clk **)res);
2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660
}

/**
 * devm_clk_register - resource managed clk_register()
 * @dev: device that is registering this clock
 * @hw: link to hardware-specific clock data
 *
 * Managed clk_register(). Clocks returned from this function are
 * automatically clk_unregister()ed on driver detach. See clk_register() for
 * more information.
 */
struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
{
	struct clk *clk;
2661
	struct clk **clkp;
2662

2663 2664
	clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL);
	if (!clkp)
2665 2666
		return ERR_PTR(-ENOMEM);

2667 2668 2669 2670
	clk = clk_register(dev, hw);
	if (!IS_ERR(clk)) {
		*clkp = clk;
		devres_add(dev, clkp);
2671
	} else {
2672
		devres_free(clkp);
2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700
	}

	return clk;
}
EXPORT_SYMBOL_GPL(devm_clk_register);

static int devm_clk_match(struct device *dev, void *res, void *data)
{
	struct clk *c = res;
	if (WARN_ON(!c))
		return 0;
	return c == data;
}

/**
 * devm_clk_unregister - resource managed clk_unregister()
 * @clk: clock to unregister
 *
 * Deallocate a clock allocated with devm_clk_register(). Normally
 * this function will not need to be called and the resource management
 * code will ensure that the resource is freed.
 */
void devm_clk_unregister(struct device *dev, struct clk *clk)
{
	WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk));
}
EXPORT_SYMBOL_GPL(devm_clk_unregister);

2701 2702 2703 2704 2705
/*
 * clkdev helpers
 */
int __clk_get(struct clk *clk)
{
2706 2707 2708 2709
	struct clk_core *core = !clk ? NULL : clk->core;

	if (core) {
		if (!try_module_get(core->owner))
2710
			return 0;
2711

2712
		kref_get(&core->ref);
2713
	}
2714 2715 2716 2717 2718
	return 1;
}

void __clk_put(struct clk *clk)
{
2719 2720
	struct module *owner;

2721
	if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
2722 2723
		return;

S
Sylwester Nawrocki 已提交
2724
	clk_prepare_lock();
2725

2726
	hlist_del(&clk->clks_node);
2727 2728 2729 2730
	if (clk->min_rate > clk->core->req_rate ||
	    clk->max_rate < clk->core->req_rate)
		clk_core_set_rate_nolock(clk->core, clk->core->req_rate);

2731 2732 2733
	owner = clk->core->owner;
	kref_put(&clk->core->ref, __clk_release);

S
Sylwester Nawrocki 已提交
2734 2735
	clk_prepare_unlock();

2736
	module_put(owner);
2737 2738

	kfree(clk);
2739 2740
}

2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753
/***        clk rate change notifiers        ***/

/**
 * clk_notifier_register - add a clk rate change notifier
 * @clk: struct clk * to watch
 * @nb: struct notifier_block * with callback info
 *
 * Request notification when clk's rate changes.  This uses an SRCU
 * notifier because we want it to block and notifier unregistrations are
 * uncommon.  The callbacks associated with the notifier must not
 * re-enter into the clk framework by calling any top-level clk APIs;
 * this will cause a nested prepare_lock mutex.
 *
2754 2755 2756
 * In all notification cases cases (pre, post and abort rate change) the
 * original clock rate is passed to the callback via struct
 * clk_notifier_data.old_rate and the new frequency is passed via struct
2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771
 * clk_notifier_data.new_rate.
 *
 * clk_notifier_register() must be called from non-atomic context.
 * Returns -EINVAL if called with null arguments, -ENOMEM upon
 * allocation failure; otherwise, passes along the return value of
 * srcu_notifier_chain_register().
 */
int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
{
	struct clk_notifier *cn;
	int ret = -ENOMEM;

	if (!clk || !nb)
		return -EINVAL;

2772
	clk_prepare_lock();
2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792

	/* search the list of notifiers for this clk */
	list_for_each_entry(cn, &clk_notifier_list, node)
		if (cn->clk == clk)
			break;

	/* if clk wasn't in the notifier list, allocate new clk_notifier */
	if (cn->clk != clk) {
		cn = kzalloc(sizeof(struct clk_notifier), GFP_KERNEL);
		if (!cn)
			goto out;

		cn->clk = clk;
		srcu_init_notifier_head(&cn->notifier_head);

		list_add(&cn->node, &clk_notifier_list);
	}

	ret = srcu_notifier_chain_register(&cn->notifier_head, nb);

2793
	clk->core->notifier_count++;
2794 2795

out:
2796
	clk_prepare_unlock();
2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820

	return ret;
}
EXPORT_SYMBOL_GPL(clk_notifier_register);

/**
 * clk_notifier_unregister - remove a clk rate change notifier
 * @clk: struct clk *
 * @nb: struct notifier_block * with callback info
 *
 * Request no further notification for changes to 'clk' and frees memory
 * allocated in clk_notifier_register.
 *
 * Returns -EINVAL if called with null arguments; otherwise, passes
 * along the return value of srcu_notifier_chain_unregister().
 */
int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
{
	struct clk_notifier *cn = NULL;
	int ret = -EINVAL;

	if (!clk || !nb)
		return -EINVAL;

2821
	clk_prepare_lock();
2822 2823 2824 2825 2826 2827 2828 2829

	list_for_each_entry(cn, &clk_notifier_list, node)
		if (cn->clk == clk)
			break;

	if (cn->clk == clk) {
		ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);

2830
		clk->core->notifier_count--;
2831 2832 2833 2834

		/* XXX the notifier code should handle this better */
		if (!cn->notifier_head.head) {
			srcu_cleanup_notifier_head(&cn->notifier_head);
2835
			list_del(&cn->node);
2836 2837 2838 2839 2840 2841 2842
			kfree(cn);
		}

	} else {
		ret = -ENOENT;
	}

2843
	clk_prepare_unlock();
2844 2845 2846 2847

	return ret;
}
EXPORT_SYMBOL_GPL(clk_notifier_unregister);
G
Grant Likely 已提交
2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865

#ifdef CONFIG_OF
/**
 * struct of_clk_provider - Clock provider registration structure
 * @link: Entry in global list of clock providers
 * @node: Pointer to device tree node of clock provider
 * @get: Get clock callback.  Returns NULL or a struct clk for the
 *       given clock specifier
 * @data: context pointer to be passed into @get callback
 */
struct of_clk_provider {
	struct list_head link;

	struct device_node *node;
	struct clk *(*get)(struct of_phandle_args *clkspec, void *data);
	void *data;
};

2866 2867 2868
static const struct of_device_id __clk_of_table_sentinel
	__used __section(__clk_of_table_end);

G
Grant Likely 已提交
2869
static LIST_HEAD(of_clk_providers);
2870 2871
static DEFINE_MUTEX(of_clk_mutex);

G
Grant Likely 已提交
2872 2873 2874 2875 2876 2877 2878
struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
				     void *data)
{
	return data;
}
EXPORT_SYMBOL_GPL(of_clk_src_simple_get);

2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892
struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
{
	struct clk_onecell_data *clk_data = data;
	unsigned int idx = clkspec->args[0];

	if (idx >= clk_data->clk_num) {
		pr_err("%s: invalid clock index %d\n", __func__, idx);
		return ERR_PTR(-EINVAL);
	}

	return clk_data->clks[idx];
}
EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);

G
Grant Likely 已提交
2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904
/**
 * of_clk_add_provider() - Register a clock provider for a node
 * @np: Device node pointer associated with clock provider
 * @clk_src_get: callback for decoding clock
 * @data: context pointer for @clk_src_get callback.
 */
int of_clk_add_provider(struct device_node *np,
			struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
						   void *data),
			void *data)
{
	struct of_clk_provider *cp;
2905
	int ret;
G
Grant Likely 已提交
2906 2907 2908 2909 2910 2911 2912 2913 2914

	cp = kzalloc(sizeof(struct of_clk_provider), GFP_KERNEL);
	if (!cp)
		return -ENOMEM;

	cp->node = of_node_get(np);
	cp->data = data;
	cp->get = clk_src_get;

2915
	mutex_lock(&of_clk_mutex);
G
Grant Likely 已提交
2916
	list_add(&cp->link, &of_clk_providers);
2917
	mutex_unlock(&of_clk_mutex);
G
Grant Likely 已提交
2918 2919
	pr_debug("Added clock from %s\n", np->full_name);

2920 2921 2922 2923 2924
	ret = of_clk_set_defaults(np, true);
	if (ret < 0)
		of_clk_del_provider(np);

	return ret;
G
Grant Likely 已提交
2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935
}
EXPORT_SYMBOL_GPL(of_clk_add_provider);

/**
 * of_clk_del_provider() - Remove a previously registered clock provider
 * @np: Device node pointer associated with clock provider
 */
void of_clk_del_provider(struct device_node *np)
{
	struct of_clk_provider *cp;

2936
	mutex_lock(&of_clk_mutex);
G
Grant Likely 已提交
2937 2938 2939 2940 2941 2942 2943 2944
	list_for_each_entry(cp, &of_clk_providers, link) {
		if (cp->node == np) {
			list_del(&cp->link);
			of_node_put(cp->node);
			kfree(cp);
			break;
		}
	}
2945
	mutex_unlock(&of_clk_mutex);
G
Grant Likely 已提交
2946 2947 2948
}
EXPORT_SYMBOL_GPL(of_clk_del_provider);

2949 2950
struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
				       const char *dev_id, const char *con_id)
G
Grant Likely 已提交
2951 2952
{
	struct of_clk_provider *provider;
2953
	struct clk *clk = ERR_PTR(-EPROBE_DEFER);
G
Grant Likely 已提交
2954

2955 2956 2957
	if (!clkspec)
		return ERR_PTR(-EINVAL);

G
Grant Likely 已提交
2958
	/* Check if we have such a provider in our array */
2959
	mutex_lock(&of_clk_mutex);
G
Grant Likely 已提交
2960 2961 2962
	list_for_each_entry(provider, &of_clk_providers, link) {
		if (provider->node == clkspec->np)
			clk = provider->get(clkspec, provider->data);
2963 2964 2965 2966 2967 2968 2969 2970 2971
		if (!IS_ERR(clk)) {
			clk = __clk_create_clk(__clk_get_hw(clk), dev_id,
					       con_id);

			if (!IS_ERR(clk) && !__clk_get(clk)) {
				__clk_free_clk(clk);
				clk = ERR_PTR(-ENOENT);
			}

G
Grant Likely 已提交
2972
			break;
2973
		}
G
Grant Likely 已提交
2974
	}
2975
	mutex_unlock(&of_clk_mutex);
2976 2977 2978 2979

	return clk;
}

2980 2981 2982 2983 2984 2985 2986 2987
/**
 * of_clk_get_from_provider() - Lookup a clock from a clock provider
 * @clkspec: pointer to a clock specifier data structure
 *
 * This function looks up a struct clk from the registered list of clock
 * providers, an input is a clock specifier data structure as returned
 * from the of_parse_phandle_with_args() function call.
 */
2988 2989
struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
{
2990
	return __of_clk_get_from_provider(clkspec, NULL, __func__);
G
Grant Likely 已提交
2991 2992
}

2993 2994 2995 2996 2997 2998
int of_clk_get_parent_count(struct device_node *np)
{
	return of_count_phandle_with_args(np, "clocks", "#clock-cells");
}
EXPORT_SYMBOL_GPL(of_clk_get_parent_count);

G
Grant Likely 已提交
2999 3000 3001
const char *of_clk_get_parent_name(struct device_node *np, int index)
{
	struct of_phandle_args clkspec;
B
Ben Dooks 已提交
3002
	struct property *prop;
G
Grant Likely 已提交
3003
	const char *clk_name;
B
Ben Dooks 已提交
3004 3005
	const __be32 *vp;
	u32 pv;
G
Grant Likely 已提交
3006
	int rc;
B
Ben Dooks 已提交
3007
	int count;
G
Grant Likely 已提交
3008 3009 3010 3011 3012 3013 3014 3015 3016

	if (index < 0)
		return NULL;

	rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
					&clkspec);
	if (rc)
		return NULL;

B
Ben Dooks 已提交
3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030
	index = clkspec.args_count ? clkspec.args[0] : 0;
	count = 0;

	/* if there is an indices property, use it to transfer the index
	 * specified into an array offset for the clock-output-names property.
	 */
	of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) {
		if (index == pv) {
			index = count;
			break;
		}
		count++;
	}

G
Grant Likely 已提交
3031
	if (of_property_read_string_index(clkspec.np, "clock-output-names",
B
Ben Dooks 已提交
3032
					  index,
G
Grant Likely 已提交
3033 3034 3035 3036 3037 3038 3039 3040
					  &clk_name) < 0)
		clk_name = clkspec.np->name;

	of_node_put(clkspec.np);
	return clk_name;
}
EXPORT_SYMBOL_GPL(of_clk_get_parent_name);

3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083
struct clock_provider {
	of_clk_init_cb_t clk_init_cb;
	struct device_node *np;
	struct list_head node;
};

static LIST_HEAD(clk_provider_list);

/*
 * This function looks for a parent clock. If there is one, then it
 * checks that the provider for this parent clock was initialized, in
 * this case the parent clock will be ready.
 */
static int parent_ready(struct device_node *np)
{
	int i = 0;

	while (true) {
		struct clk *clk = of_clk_get(np, i);

		/* this parent is ready we can check the next one */
		if (!IS_ERR(clk)) {
			clk_put(clk);
			i++;
			continue;
		}

		/* at least one parent is not ready, we exit now */
		if (PTR_ERR(clk) == -EPROBE_DEFER)
			return 0;

		/*
		 * Here we make assumption that the device tree is
		 * written correctly. So an error means that there is
		 * no more parent. As we didn't exit yet, then the
		 * previous parent are ready. If there is no clock
		 * parent, no need to wait for them, then we can
		 * consider their absence as being ready
		 */
		return 1;
	}
}

G
Grant Likely 已提交
3084 3085 3086 3087
/**
 * of_clk_init() - Scan and init clock providers from the DT
 * @matches: array of compatible values and init functions for providers.
 *
3088
 * This function scans the device tree for matching clock providers
3089
 * and calls their initialization functions. It also does it by trying
3090
 * to follow the dependencies.
G
Grant Likely 已提交
3091 3092 3093
 */
void __init of_clk_init(const struct of_device_id *matches)
{
3094
	const struct of_device_id *match;
G
Grant Likely 已提交
3095
	struct device_node *np;
3096 3097 3098
	struct clock_provider *clk_provider, *next;
	bool is_init_done;
	bool force = false;
G
Grant Likely 已提交
3099

3100
	if (!matches)
3101
		matches = &__clk_of_table;
3102

3103
	/* First prepare the list of the clocks providers */
3104
	for_each_matching_node_and_match(np, matches, &match) {
3105 3106 3107 3108 3109
		struct clock_provider *parent =
			kzalloc(sizeof(struct clock_provider),	GFP_KERNEL);

		parent->clk_init_cb = match->data;
		parent->np = np;
3110
		list_add_tail(&parent->node, &clk_provider_list);
3111 3112 3113 3114 3115 3116 3117
	}

	while (!list_empty(&clk_provider_list)) {
		is_init_done = false;
		list_for_each_entry_safe(clk_provider, next,
					&clk_provider_list, node) {
			if (force || parent_ready(clk_provider->np)) {
3118

3119
				clk_provider->clk_init_cb(clk_provider->np);
3120 3121
				of_clk_set_defaults(clk_provider->np, true);

3122 3123 3124 3125 3126 3127 3128
				list_del(&clk_provider->node);
				kfree(clk_provider);
				is_init_done = true;
			}
		}

		/*
3129
		 * We didn't manage to initialize any of the
3130 3131 3132 3133 3134 3135
		 * remaining providers during the last loop, so now we
		 * initialize all the remaining ones unconditionally
		 * in case the clock parent was not mandatory
		 */
		if (!is_init_done)
			force = true;
G
Grant Likely 已提交
3136 3137 3138
	}
}
#endif