clock.c 13.7 KB
Newer Older
1 2 3
/*
 * arch/sh/kernel/cpu/clock.c - SuperH clock framework
 *
4
 *  Copyright (C) 2005 - 2009  Paul Mundt
5 6 7
 *
 * This clock framework is derived from the OMAP version by:
 *
8
 *	Copyright (C) 2004 - 2008 Nokia Corporation
9 10
 *	Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
 *
P
Paul Mundt 已提交
11 12
 *  Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
 *
13 14 15 16
 *  With clkdev bits:
 *
 *	Copyright (C) 2008 Russell King.
 *
17 18 19 20 21 22 23
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 */
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
24
#include <linux/mutex.h>
25
#include <linux/list.h>
26 27
#include <linux/kobject.h>
#include <linux/sysdev.h>
28 29
#include <linux/seq_file.h>
#include <linux/err.h>
P
Paul Mundt 已提交
30
#include <linux/platform_device.h>
31
#include <linux/debugfs.h>
32
#include <linux/cpufreq.h>
33
#include <asm/clock.h>
34
#include <asm/machvec.h>
35 36 37

static LIST_HEAD(clock_list);
static DEFINE_SPINLOCK(clock_lock);
38
static DEFINE_MUTEX(clock_list_sem);
39

40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113
void clk_rate_table_build(struct clk *clk,
			  struct cpufreq_frequency_table *freq_table,
			  int nr_freqs,
			  struct clk_div_mult_table *src_table,
			  unsigned long *bitmap)
{
	unsigned long mult, div;
	unsigned long freq;
	int i;

	for (i = 0; i < nr_freqs; i++) {
		div = 1;
		mult = 1;

		if (src_table->divisors && i < src_table->nr_divisors)
			div = src_table->divisors[i];

		if (src_table->multipliers && i < src_table->nr_multipliers)
			mult = src_table->multipliers[i];

		if (!div || !mult || (bitmap && !test_bit(i, bitmap)))
			freq = CPUFREQ_ENTRY_INVALID;
		else
			freq = clk->parent->rate * mult / div;

		freq_table[i].index = i;
		freq_table[i].frequency = freq;
	}

	/* Termination entry */
	freq_table[i].index = i;
	freq_table[i].frequency = CPUFREQ_TABLE_END;
}

long clk_rate_table_round(struct clk *clk,
			  struct cpufreq_frequency_table *freq_table,
			  unsigned long rate)
{
	unsigned long rate_error, rate_error_prev = ~0UL;
	unsigned long rate_best_fit = rate;
	unsigned long highest, lowest;
	int i;

	highest = lowest = 0;

	for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
		unsigned long freq = freq_table[i].frequency;

		if (freq == CPUFREQ_ENTRY_INVALID)
			continue;

		if (freq > highest)
			highest = freq;
		if (freq < lowest)
			lowest = freq;

		rate_error = abs(freq - rate);
		if (rate_error < rate_error_prev) {
			rate_best_fit = freq;
			rate_error_prev = rate_error;
		}

		if (rate_error == 0)
			break;
	}

	if (rate >= highest)
		rate_best_fit = highest;
	if (rate <= lowest)
		rate_best_fit = lowest;

	return rate_best_fit;
}

114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132
int clk_rate_table_find(struct clk *clk,
			struct cpufreq_frequency_table *freq_table,
			unsigned long rate)
{
	int i;

	for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
		unsigned long freq = freq_table[i].frequency;

		if (freq == CPUFREQ_ENTRY_INVALID)
			continue;

		if (freq == rate)
			return i;
	}

	return -ENOENT;
}

133 134 135
/* Used for clocks that always have same value as the parent clock */
unsigned long followparent_recalc(struct clk *clk)
{
136
	return clk->parent ? clk->parent->rate : 0;
137 138
}

139 140 141 142 143 144 145 146 147 148 149 150 151
int clk_reparent(struct clk *child, struct clk *parent)
{
	list_del_init(&child->sibling);
	if (parent)
		list_add(&child->sibling, &parent->children);
	child->parent = parent;

	/* now do the debugfs renaming to reattach the child
	   to the proper parent */

	return 0;
}

152
/* Propagate rate to children */
153
void propagate_rate(struct clk *tclk)
154 155 156
{
	struct clk *clkp;

157
	list_for_each_entry(clkp, &tclk->children, sibling) {
158
		if (clkp->ops && clkp->ops->recalc)
159
			clkp->rate = clkp->ops->recalc(clkp);
160

161
		propagate_rate(clkp);
162 163 164
	}
}

165
static void __clk_disable(struct clk *clk)
166
{
167 168 169 170 171
	if (clk->usecount == 0) {
		printk(KERN_ERR "Trying disable clock %s with 0 usecount\n",
		       clk->name);
		WARN_ON(1);
		return;
172
	}
173

174 175 176 177 178 179
	if (!(--clk->usecount)) {
		if (likely(clk->ops && clk->ops->disable))
			clk->ops->disable(clk);
		if (likely(clk->parent))
			__clk_disable(clk->parent);
	}
180 181
}

182
void clk_disable(struct clk *clk)
183 184 185
{
	unsigned long flags;

186
	if (!clk)
187
		return;
188

189
	spin_lock_irqsave(&clock_lock, flags);
190
	__clk_disable(clk);
191 192
	spin_unlock_irqrestore(&clock_lock, flags);
}
193
EXPORT_SYMBOL_GPL(clk_disable);
194

195
static int __clk_enable(struct clk *clk)
196
{
197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213
	int ret = 0;

	if (clk->usecount++ == 0) {
		if (clk->parent) {
			ret = __clk_enable(clk->parent);
			if (unlikely(ret))
				goto err;
		}

		if (clk->ops && clk->ops->enable) {
			ret = clk->ops->enable(clk);
			if (ret) {
				if (clk->parent)
					__clk_disable(clk->parent);
				goto err;
			}
		}
214
	}
215 216 217 218 219

	return ret;
err:
	clk->usecount--;
	return ret;
220 221
}

222
int clk_enable(struct clk *clk)
223 224
{
	unsigned long flags;
225
	int ret;
226

227
	if (!clk)
228
		return -EINVAL;
229

230
	spin_lock_irqsave(&clock_lock, flags);
231
	ret = __clk_enable(clk);
232
	spin_unlock_irqrestore(&clock_lock, flags);
233 234

	return ret;
235
}
236
EXPORT_SYMBOL_GPL(clk_enable);
237

238 239 240 241 242 243 244 245 246 247 248 249 250 251
static LIST_HEAD(root_clks);

/**
 * recalculate_root_clocks - recalculate and propagate all root clocks
 *
 * Recalculates all root clocks (clocks with no parent), which if the
 * clock's .recalc is set correctly, should also propagate their rates.
 * Called at init.
 */
void recalculate_root_clocks(void)
{
	struct clk *clkp;

	list_for_each_entry(clkp, &root_clks, sibling) {
252
		if (clkp->ops && clkp->ops->recalc)
253 254 255 256 257
			clkp->rate = clkp->ops->recalc(clkp);
		propagate_rate(clkp);
	}
}

258 259
int clk_register(struct clk *clk)
{
260 261 262 263 264 265 266 267 268
	if (clk == NULL || IS_ERR(clk))
		return -EINVAL;

	/*
	 * trap out already registered clocks
	 */
	if (clk->node.next || clk->node.prev)
		return 0;

269
	mutex_lock(&clock_list_sem);
270

271
	INIT_LIST_HEAD(&clk->children);
272
	clk->usecount = 0;
273 274 275 276 277 278

	if (clk->parent)
		list_add(&clk->sibling, &clk->parent->children);
	else
		list_add(&clk->sibling, &root_clks);

279
	list_add(&clk->node, &clock_list);
280
	if (clk->ops && clk->ops->init)
281
		clk->ops->init(clk);
282
	mutex_unlock(&clock_list_sem);
283 284 285

	return 0;
}
286
EXPORT_SYMBOL_GPL(clk_register);
287 288 289

void clk_unregister(struct clk *clk)
{
290
	mutex_lock(&clock_list_sem);
291
	list_del(&clk->sibling);
292
	list_del(&clk->node);
293
	mutex_unlock(&clock_list_sem);
294
}
295
EXPORT_SYMBOL_GPL(clk_unregister);
296

297 298 299 300 301 302 303 304 305
static void clk_enable_init_clocks(void)
{
	struct clk *clkp;

	list_for_each_entry(clkp, &clock_list, node)
		if (clkp->flags & CLK_ENABLE_ON_INIT)
			clk_enable(clkp);
}

306
unsigned long clk_get_rate(struct clk *clk)
307 308 309
{
	return clk->rate;
}
310
EXPORT_SYMBOL_GPL(clk_get_rate);
311 312

int clk_set_rate(struct clk *clk, unsigned long rate)
313 314 315
{
	return clk_set_rate_ex(clk, rate, 0);
}
316
EXPORT_SYMBOL_GPL(clk_set_rate);
317 318

int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id)
319 320
{
	int ret = -EOPNOTSUPP;
321
	unsigned long flags;
322

323
	spin_lock_irqsave(&clock_lock, flags);
324

325
	if (likely(clk->ops && clk->ops->set_rate)) {
326
		ret = clk->ops->set_rate(clk, rate, algo_id);
327 328 329 330 331
		if (ret != 0)
			goto out_unlock;
	} else {
		clk->rate = rate;
		ret = 0;
332 333
	}

334 335 336 337 338 339 340 341
	if (clk->ops && clk->ops->recalc)
		clk->rate = clk->ops->recalc(clk);

	propagate_rate(clk);

out_unlock:
	spin_unlock_irqrestore(&clock_lock, flags);

342 343
	return ret;
}
344
EXPORT_SYMBOL_GPL(clk_set_rate_ex);
345

346 347
int clk_set_parent(struct clk *clk, struct clk *parent)
{
348
	unsigned long flags;
349 350 351 352
	int ret = -EINVAL;

	if (!parent || !clk)
		return ret;
353 354
	if (clk->parent == parent)
		return 0;
355

356 357 358 359
	spin_lock_irqsave(&clock_lock, flags);
	if (clk->usecount == 0) {
		if (clk->ops->set_parent)
			ret = clk->ops->set_parent(clk, parent);
360 361 362
		else
			ret = clk_reparent(clk, parent);

363
		if (ret == 0) {
364 365
			pr_debug("clock: set parent of %s to %s (new rate %ld)\n",
				 clk->name, clk->parent->name, clk->rate);
366 367 368 369 370 371 372
			if (clk->ops->recalc)
				clk->rate = clk->ops->recalc(clk);
			propagate_rate(clk);
		}
	} else
		ret = -EBUSY;
	spin_unlock_irqrestore(&clock_lock, flags);
373 374 375 376 377 378 379 380 381 382 383

	return ret;
}
EXPORT_SYMBOL_GPL(clk_set_parent);

struct clk *clk_get_parent(struct clk *clk)
{
	return clk->parent;
}
EXPORT_SYMBOL_GPL(clk_get_parent);

384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399
long clk_round_rate(struct clk *clk, unsigned long rate)
{
	if (likely(clk->ops && clk->ops->round_rate)) {
		unsigned long flags, rounded;

		spin_lock_irqsave(&clock_lock, flags);
		rounded = clk->ops->round_rate(clk, rate);
		spin_unlock_irqrestore(&clock_lock, flags);

		return rounded;
	}

	return clk_get_rate(clk);
}
EXPORT_SYMBOL_GPL(clk_round_rate);

400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449
/*
 * Find the correct struct clk for the device and connection ID.
 * We do slightly fuzzy matching here:
 *  An entry with a NULL ID is assumed to be a wildcard.
 *  If an entry has a device ID, it must match
 *  If an entry has a connection ID, it must match
 * Then we take the most specific entry - with the following
 * order of precidence: dev+con > dev only > con only.
 */
static struct clk *clk_find(const char *dev_id, const char *con_id)
{
	struct clk_lookup *p;
	struct clk *clk = NULL;
	int match, best = 0;

	list_for_each_entry(p, &clock_list, node) {
		match = 0;
		if (p->dev_id) {
			if (!dev_id || strcmp(p->dev_id, dev_id))
				continue;
			match += 2;
		}
		if (p->con_id) {
			if (!con_id || strcmp(p->con_id, con_id))
				continue;
			match += 1;
		}
		if (match == 0)
			continue;

		if (match > best) {
			clk = p->clk;
			best = match;
		}
	}
	return clk;
}

struct clk *clk_get_sys(const char *dev_id, const char *con_id)
{
	struct clk *clk;

	mutex_lock(&clock_list_sem);
	clk = clk_find(dev_id, con_id);
	mutex_unlock(&clock_list_sem);

	return clk ? clk : ERR_PTR(-ENOENT);
}
EXPORT_SYMBOL_GPL(clk_get_sys);

P
Paul Mundt 已提交
450 451 452 453 454
/*
 * Returns a clock. Note that we first try to use device id on the bus
 * and clock name. If this fails, we try to use clock name only.
 */
struct clk *clk_get(struct device *dev, const char *id)
455
{
456
	const char *dev_id = dev ? dev_name(dev) : NULL;
457
	struct clk *p, *clk = ERR_PTR(-ENOENT);
P
Paul Mundt 已提交
458 459
	int idno;

460
	clk = clk_get_sys(dev_id, id);
461
	if (clk && !IS_ERR(clk))
462 463
		return clk;

P
Paul Mundt 已提交
464 465 466 467
	if (dev == NULL || dev->bus != &platform_bus_type)
		idno = -1;
	else
		idno = to_platform_device(dev)->id;
468

469
	mutex_lock(&clock_list_sem);
P
Paul Mundt 已提交
470 471 472 473 474 475 476 477
	list_for_each_entry(p, &clock_list, node) {
		if (p->id == idno &&
		    strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
			clk = p;
			goto found;
		}
	}

478 479 480 481 482 483
	list_for_each_entry(p, &clock_list, node) {
		if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
			clk = p;
			break;
		}
	}
P
Paul Mundt 已提交
484 485

found:
486
	mutex_unlock(&clock_list_sem);
487 488 489

	return clk;
}
490
EXPORT_SYMBOL_GPL(clk_get);
491 492 493 494 495 496

void clk_put(struct clk *clk)
{
	if (clk && !IS_ERR(clk))
		module_put(clk->owner);
}
497
EXPORT_SYMBOL_GPL(clk_put);
498

499 500 501 502 503 504 505 506 507
#ifdef CONFIG_PM
static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state)
{
	static pm_message_t prev_state;
	struct clk *clkp;

	switch (state.event) {
	case PM_EVENT_ON:
		/* Resumeing from hibernation */
508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523
		if (prev_state.event != PM_EVENT_FREEZE)
			break;

		list_for_each_entry(clkp, &clock_list, node) {
			if (likely(clkp->ops)) {
				unsigned long rate = clkp->rate;

				if (likely(clkp->ops->set_parent))
					clkp->ops->set_parent(clkp,
						clkp->parent);
				if (likely(clkp->ops->set_rate))
					clkp->ops->set_rate(clkp,
						rate, NO_CHANGE);
				else if (likely(clkp->ops->recalc))
					clkp->rate = clkp->ops->recalc(clkp);
			}
524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564
		}
		break;
	case PM_EVENT_FREEZE:
		break;
	case PM_EVENT_SUSPEND:
		break;
	}

	prev_state = state;
	return 0;
}

static int clks_sysdev_resume(struct sys_device *dev)
{
	return clks_sysdev_suspend(dev, PMSG_ON);
}

static struct sysdev_class clks_sysdev_class = {
	.name = "clks",
};

static struct sysdev_driver clks_sysdev_driver = {
	.suspend = clks_sysdev_suspend,
	.resume = clks_sysdev_resume,
};

static struct sys_device clks_sysdev_dev = {
	.cls = &clks_sysdev_class,
};

static int __init clk_sysdev_init(void)
{
	sysdev_class_register(&clks_sysdev_class);
	sysdev_driver_register(&clks_sysdev_class, &clks_sysdev_driver);
	sysdev_register(&clks_sysdev_dev);

	return 0;
}
subsys_initcall(clk_sysdev_init);
#endif

565 566
int __init clk_init(void)
{
567
	int ret;
568

569 570 571 572
	ret = arch_clk_init();
	if (unlikely(ret)) {
		pr_err("%s: CPU clock registration failed.\n", __func__);
		return ret;
573 574
	}

575 576 577 578 579 580 581 582
	if (sh_mv.mv_clk_init) {
		ret = sh_mv.mv_clk_init();
		if (unlikely(ret)) {
			pr_err("%s: machvec clock initialization failed.\n",
			       __func__);
			return ret;
		}
	}
583

584
	/* Kick the child clocks.. */
585
	recalculate_root_clocks();
586

587 588 589
	/* Enable the necessary init clocks */
	clk_enable_init_clocks();

590 591 592
	return ret;
}

593 594 595 596 597 598
/*
 *	debugfs support to trace clock tree hierarchy and attributes
 */
static struct dentry *clk_debugfs_root;

static int clk_debugfs_register_one(struct clk *c)
599
{
600
	int err;
601
	struct dentry *d, *child, *child_tmp;
602 603 604 605 606
	struct clk *pa = c->parent;
	char s[255];
	char *p = s;

	p += sprintf(p, "%s", c->name);
607
	if (c->id >= 0)
608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632
		sprintf(p, ":%d", c->id);
	d = debugfs_create_dir(s, pa ? pa->dentry : clk_debugfs_root);
	if (!d)
		return -ENOMEM;
	c->dentry = d;

	d = debugfs_create_u8("usecount", S_IRUGO, c->dentry, (u8 *)&c->usecount);
	if (!d) {
		err = -ENOMEM;
		goto err_out;
	}
	d = debugfs_create_u32("rate", S_IRUGO, c->dentry, (u32 *)&c->rate);
	if (!d) {
		err = -ENOMEM;
		goto err_out;
	}
	d = debugfs_create_x32("flags", S_IRUGO, c->dentry, (u32 *)&c->flags);
	if (!d) {
		err = -ENOMEM;
		goto err_out;
	}
	return 0;

err_out:
	d = c->dentry;
633
	list_for_each_entry_safe(child, child_tmp, &d->d_subdirs, d_u.d_child)
634 635 636 637 638 639 640 641 642 643 644 645 646 647 648
		debugfs_remove(child);
	debugfs_remove(c->dentry);
	return err;
}

static int clk_debugfs_register(struct clk *c)
{
	int err;
	struct clk *pa = c->parent;

	if (pa && !pa->dentry) {
		err = clk_debugfs_register(pa);
		if (err)
			return err;
	}
649

650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673
	if (!c->dentry) {
		err = clk_debugfs_register_one(c);
		if (err)
			return err;
	}
	return 0;
}

static int __init clk_debugfs_init(void)
{
	struct clk *c;
	struct dentry *d;
	int err;

	d = debugfs_create_dir("clock", NULL);
	if (!d)
		return -ENOMEM;
	clk_debugfs_root = d;

	list_for_each_entry(c, &clock_list, node) {
		err = clk_debugfs_register(c);
		if (err)
			goto err_out;
	}
674
	return 0;
675 676 677
err_out:
	debugfs_remove(clk_debugfs_root); /* REVISIT: Cleanup correctly */
	return err;
678
}
679
late_initcall(clk_debugfs_init);